RVC / demo.py
NeoPy's picture
Update demo.py
c15ab2a verified
from original import *
import shutil, glob
from easyfuncs import download_from_url, CachedModels
import os
os.makedirs("dataset", exist_ok=True)
model_library = CachedModels()
# Helper moved outside to avoid lambda issues in UI definition
def get_audio_paths(path):
if not os.path.exists(path):
return []
return [os.path.abspath(os.path.join(path, f)) for f in os.listdir(path) if os.path.splitext(f)[1].lower() in ('.mp3', '.wav', '.flac', '.ogg')]
with gr.Blocks(title="🔊", theme=gr.themes.Base(primary_hue="blue", neutral_hue="zinc")) as app:
with gr.Tabs():
with gr.Tab("Inference"):
with gr.Row():
# Get initial model choices from original.py
initial_model_choices = sorted(names) if names else []
voice_model = gr.Dropdown(
label="Model Voice",
choices=initial_model_choices,
value=initial_model_choices[0] if initial_model_choices else None,
interactive=True
)
refresh_button = gr.Button("Refresh", variant="primary")
spk_item = gr.Slider(
minimum=0,
maximum=2333,
step=1,
label="Speaker ID",
value=0,
visible=False,
interactive=True,
)
vc_transform0 = gr.Number(
label="Pitch",
value=0
)
but0 = gr.Button(value="Convert", variant="primary")
with gr.Row():
with gr.Column():
with gr.Row():
dropbox = gr.Audio(label="Drop your audio here & hit the Reload button.", type="filepath")
with gr.Row():
record_button = gr.Audio(sources=["microphone"], label="OR Record audio.", type="filepath")
with gr.Row():
input_audio0 = gr.Dropdown(
label="Input Path",
value=None,
choices=[],
allow_custom_value=True
)
with gr.Row():
audio_player = gr.Audio()
def update_audio_player(path):
if path and os.path.exists(path):
return path
return None
input_audio0.change(
fn=update_audio_player,
inputs=[input_audio0],
outputs=[audio_player]
)
def handle_record(audio):
if audio:
return audio
return None
record_button.change(
fn=handle_record,
inputs=[record_button],
outputs=[input_audio0]
)
def handle_upload(audio):
if audio:
return audio
return None
dropbox.change(
fn=handle_upload,
inputs=[dropbox],
outputs=[input_audio0]
)
with gr.Column():
with gr.Accordion("Change Index", open=False):
file_index2 = gr.Dropdown(
label="Change Index",
choices=[],
interactive=True,
value=None
)
index_rate1 = gr.Slider(
minimum=0,
maximum=1,
label="Index Strength",
value=0.5,
interactive=True,
)
vc_output2 = gr.Audio(label="Output")
with gr.Accordion("General Settings", open=False):
f0method0 = gr.Radio(
label="Method",
choices=["pm", "harvest", "crepe", "rmvpe"]
if config.dml == False
else ["pm", "harvest", "rmvpe"],
value="rmvpe",
interactive=True,
)
filter_radius0 = gr.Slider(
minimum=0,
maximum=7,
label="Breathiness Reduction (Harvest only)",
value=3,
step=1,
interactive=True,
)
resample_sr0 = gr.Slider(
minimum=0,
maximum=48000,
label="Resample",
value=0,
step=1,
interactive=True,
visible=False
)
rms_mix_rate0 = gr.Slider(
minimum=0,
maximum=1,
label="Volume Normalization",
value=0,
interactive=True,
)
protect0 = gr.Slider(
minimum=0,
maximum=0.5,
label="Breathiness Protection (0 is enabled, 0.5 is disabled)",
value=0.33,
step=0.01,
interactive=True,
)
file_index1 = gr.Textbox(
label="Index Path",
interactive=True,
visible=False
)
# Consolidated refresh logic
def refresh_ui():
# Get updated lists from change_choices which returns dictionaries
try:
model_result, index_result = change_choices()
model_choices = model_result["choices"]
index_choices = index_result["choices"]
except Exception as e:
print(f"Error in change_choices: {e}")
model_choices = []
index_choices = []
audio_paths = get_audio_paths('audios')
# Get current values to preserve selection when possible
current_model = voice_model.value
current_index = file_index2.value
current_audio = input_audio0.value
# Set defaults with fallback logic
default_model = (current_model if current_model in model_choices
else (model_choices[0] if model_choices else None))
default_index = (current_index if current_index in index_choices
else (index_choices[0] if index_choices else None))
default_audio = (current_audio if current_audio in audio_paths
else (audio_paths[0] if audio_paths else None))
return (
gr.update(choices=model_choices, value=default_model), # voice_model
gr.update(choices=index_choices, value=default_index), # file_index2
gr.update(choices=audio_paths, value=default_audio) # input_audio0
)
refresh_button.click(
fn=refresh_ui,
inputs=[],
outputs=[voice_model, file_index2, input_audio0],
api_name="infer_refresh",
)
with gr.Row():
f0_file = gr.File(label="F0 Path", visible=False)
with gr.Row():
vc_output1 = gr.Textbox(label="Information", placeholder="Welcome!", visible=False)
but0.click(
vc.vc_single,
[
spk_item,
input_audio0,
vc_transform0,
f0_file,
f0method0,
file_index1,
file_index2,
index_rate1,
filter_radius0,
resample_sr0,
rms_mix_rate0,
protect0,
],
[vc_output1, vc_output2],
api_name="infer_convert",
)
voice_model.change(
fn=vc.get_vc,
inputs=[voice_model, protect0, protect0],
outputs=[spk_item, protect0, protect0, file_index2, file_index2],
api_name="infer_change_voice",
)
with gr.Tab("Download Models"):
with gr.Row():
url_input = gr.Textbox(label="URL to model", value="", placeholder="https://...", scale=6)
name_output = gr.Textbox(label="Save as", value="", placeholder="MyModel", scale=2)
url_download = gr.Button(value="Download Model", scale=2)
url_download.click(
inputs=[url_input, name_output],
outputs=[url_input],
fn=download_from_url,
)
with gr.Row():
model_browser = gr.Dropdown(choices=list(model_library.models.keys()), label="OR Search Models (Quality UNKNOWN)", scale=5)
download_from_browser = gr.Button(value="Get", scale=2)
download_from_browser.click(
inputs=[model_browser],
outputs=[model_browser],
fn=lambda model: download_from_url(model_library.models[model], model),
)
with gr.Tab("Train"):
with gr.Row():
with gr.Column():
training_name = gr.Textbox(label="Name your model", value="My-Voice", placeholder="My-Voice")
np7 = gr.Slider(
minimum=0,
maximum=config.n_cpu,
step=1,
label="Number of CPU processes used to extract pitch features",
value=int(np.ceil(config.n_cpu / 1.5)),
interactive=True,
)
sr2 = gr.Radio(
label="Sampling Rate",
choices=["40k", "32k"],
value="32k",
interactive=True,
visible=False
)
if_f0_3 = gr.Radio(
label="Will your model be used for singing? If not, you can ignore this.",
choices=[True, False],
value=True,
interactive=True,
visible=False
)
version19 = gr.Radio(
label="Version",
choices=["v1", "v2"],
value="v2",
interactive=True,
visible=False,
)
dataset_folder = gr.Textbox(
label="dataset folder", value='dataset'
)
easy_uploader = gr.File(label="Drop your audio files here", file_count="multiple", file_types=["audio"])
but1 = gr.Button("1. Process", variant="primary")
info1 = gr.Textbox(label="Information", value="", visible=True)
def handle_file_upload(files, folder):
if not folder or folder.strip() == "":
gr.Warning('Please enter a folder name for your dataset')
return []
if not os.path.exists(folder):
os.makedirs(folder, exist_ok=True)
saved_files = []
for file_obj in files:
if hasattr(file_obj, 'name'): # Handle Gradio file object
filename = os.path.basename(file_obj.name)
dest_path = os.path.join(folder, filename)
shutil.copy2(file_obj.name, dest_path)
saved_files.append(dest_path)
elif isinstance(file_obj, str): # Handle string path
filename = os.path.basename(file_obj)
dest_path = os.path.join(folder, filename)
shutil.copy2(file_obj, dest_path)
saved_files.append(dest_path)
return []
easy_uploader.upload(
fn=handle_file_upload,
inputs=[easy_uploader, dataset_folder],
outputs=[]
)
gpus6 = gr.Textbox(
label="Enter the GPU numbers to use separated by -, (e.g. 0-1-2)",
value=gpus,
interactive=True,
visible=F0GPUVisible,
)
gpu_info9 = gr.Textbox(
label="GPU Info", value=gpu_info, visible=F0GPUVisible
)
spk_id5 = gr.Slider(
minimum=0,
maximum=4,
step=1,
label="Speaker ID",
value=0,
interactive=True,
visible=False
)
but1.click(
preprocess_dataset,
[dataset_folder, training_name, sr2, np7],
[info1],
api_name="train_preprocess",
)
with gr.Column():
f0method8 = gr.Radio(
label="F0 extraction method",
choices=["pm", "harvest", "dio", "rmvpe", "rmvpe_gpu"],
value="rmvpe_gpu",
interactive=True,
)
gpus_rmvpe = gr.Textbox(
label="GPU numbers to use separated by -, (e.g. 0-1-2)",
value="%s-%s" % (gpus, gpus),
interactive=True,
visible=F0GPUVisible,
)
but2 = gr.Button("2. Extract Features", variant="primary")
info2 = gr.Textbox(label="Information", value="", max_lines=8)
f0method8.change(
fn=change_f0_method,
inputs=[f0method8],
outputs=[gpus_rmvpe],
)
but2.click(
extract_f0_feature,
[
gpus6,
np7,
f0method8,
if_f0_3,
training_name,
version19,
gpus_rmvpe,
],
[info2],
api_name="train_extract_f0_feature",
)
with gr.Column():
total_epoch11 = gr.Slider(
minimum=2,
maximum=1000,
step=1,
label="Epochs (more epochs may improve quality but takes longer)",
value=150,
interactive=True,
)
but4 = gr.Button("3. Train Index", variant="primary")
but3 = gr.Button("4. Train Model", variant="primary")
info3 = gr.Textbox(label="Information", value="", max_lines=10)
with gr.Accordion(label="General Settings", open=False):
gpus16 = gr.Textbox(
label="GPUs separated by -, (e.g. 0-1-2)",
value="0",
interactive=True,
visible=True
)
save_epoch10 = gr.Slider(
minimum=1,
maximum=50,
step=1,
label="Weight Saving Frequency",
value=25,
interactive=True,
)
batch_size12 = gr.Slider(
minimum=1,
maximum=40,
step=1,
label="Batch Size",
value=default_batch_size,
interactive=True,
)
if_save_latest13 = gr.Radio(
label="Only save the latest model",
choices=["yes", "no"],
value="yes",
interactive=True,
visible=False
)
if_cache_gpu17 = gr.Radio(
label="If your dataset is UNDER 10 minutes, cache it to train faster",
choices=["yes", "no"],
value="no",
interactive=True,
)
if_save_every_weights18 = gr.Radio(
label="Save small model at every save point",
choices=["yes", "no"],
value="yes",
interactive=True,
)
with gr.Accordion(label="Change pretrains", open=False):
def get_pretrained_choices(sr, if_f0, version):
# Use the original functions from original.py
if version == "v1":
path_str = ""
else:
path_str = "_v2"
if if_f0:
f0_str = "f0"
else:
f0_str = ""
pretrained_G, pretrained_D = get_pretrained_models(path_str, f0_str, sr)
return [pretrained_G] if pretrained_G else [], [pretrained_D] if pretrained_D else []
pretrained_G14 = gr.Dropdown(
label="pretrained G",
choices=[],
value="",
interactive=True,
visible=True
)
pretrained_D15 = gr.Dropdown(
label="pretrained D",
choices=[],
value="",
visible=True,
interactive=True
)
def update_pretrained_dropdowns(sr, if_f0, ver):
sr_str = sr if isinstance(sr, str) else str(sr)
g_choices, d_choices = get_pretrained_choices(sr_str, if_f0, ver)
return (
gr.update(choices=g_choices, value=g_choices[0] if g_choices else ""),
gr.update(choices=d_choices, value=d_choices[0] if d_choices else "")
)
# Bind update function to changes
sr2.change(fn=update_pretrained_dropdowns, inputs=[sr2, if_f0_3, version19], outputs=[pretrained_G14, pretrained_D15])
version19.change(fn=update_pretrained_dropdowns, inputs=[sr2, if_f0_3, version19], outputs=[pretrained_G14, pretrained_D15])
if_f0_3.change(fn=update_pretrained_dropdowns, inputs=[sr2, if_f0_3, version19], outputs=[pretrained_G14, pretrained_D15])
with gr.Row():
download_model = gr.Button('5.Download Model')
with gr.Row():
model_files = gr.File(label='Your Model and Index file can be downloaded here:')
def download_model_files(name):
if not name or name.strip() == "":
return [], "Please enter a model name"
model_path = f'logs/{name}'
index_pattern = f'logs/{name}/added_*.index'
files = []
if os.path.exists(model_path):
files.extend([os.path.join(model_path, f) for f in os.listdir(model_path) if f.endswith('.pth')])
files.extend(glob.glob(index_pattern))
return files, f"Found {len(files)} files"
download_model.click(
fn=download_model_files,
inputs=[training_name],
outputs=[model_files, info3]
)
if_f0_3.change(
fn=change_f0,
inputs=[if_f0_3, sr2, version19],
outputs=[f0method8, pretrained_G14, pretrained_D15],
)
but5 = gr.Button("1 Click Training", variant="primary", visible=False)
but3.click(
click_train,
[
training_name,
sr2,
if_f0_3,
spk_id5,
save_epoch10,
total_epoch11,
batch_size12,
if_save_latest13,
pretrained_G14,
pretrained_D15,
gpus16,
if_cache_gpu17,
if_save_every_weights18,
version19,
],
info3,
api_name="train_start",
)
but4.click(train_index, [training_name, version19], info3)
but5.click(
train1key,
[
training_name,
sr2,
if_f0_3,
dataset_folder,
spk_id5,
np7,
f0method8,
save_epoch10,
total_epoch11,
batch_size12,
if_save_latest13,
pretrained_G14,
pretrained_D15,
gpus16,
if_cache_gpu17,
if_save_every_weights18,
version19,
gpus_rmvpe,
],
info3,
api_name="train_start_all",
)
# Populate UI on load
def on_load():
# Initial refresh
model_result, index_result = change_choices()
audio_paths = get_audio_paths('audios')
default_model = model_result["choices"][0] if model_result["choices"] else None
default_index = index_result["choices"][0] if index_result["choices"] else None
default_audio = audio_paths[0] if audio_paths else None
return (
gr.update(choices=model_result["choices"], value=default_model), # voice_model
gr.update(choices=index_result["choices"], value=default_index), # file_index2
gr.update(choices=audio_paths, value=default_audio) # input_audio0
)
app.load(
fn=on_load,
inputs=[],
outputs=[voice_model, file_index2, input_audio0]
)
if config.iscolab:
app.launch(share=True, quiet=False)
else:
app.launch(
server_name="0.0.0.0",
inbrowser=not config.noautoopen,
server_port=config.listen_port,
quiet=True,
)