Upload 9 files
Browse files- tts/vocoder_models--en--ljspeech--hifigan_v2/config.json +158 -0
- tts/vocoder_models--en--ljspeech--hifigan_v2/model_file.pth +3 -0
- tts/vocoder_models--en--ljspeech--multiband-melgan/config.json +197 -0
- tts/vocoder_models--en--ljspeech--multiband-melgan/model_file.pth +3 -0
- tts/vocoder_models--en--ljspeech--multiband-melgan/scale_stats.npy +3 -0
- tts/vocoder_models--en--ljspeech--univnet/config.json +165 -0
- tts/vocoder_models--en--ljspeech--univnet/model_file.pth +3 -0
- tts/vocoder_models--en--ljspeech--univnet/scale_stats.npy +3 -0
- tts/wavlm/WavLM-Large.pt +3 -0
tts/vocoder_models--en--ljspeech--hifigan_v2/config.json
ADDED
|
@@ -0,0 +1,158 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"run_name": "hifigan",
|
| 3 |
+
"run_description": "universal hifigan trained on LibriTTS with no spectrogram normalization and using log() for scaling instead of log10()",
|
| 4 |
+
|
| 5 |
+
|
| 6 |
+
// AUDIO PARAMETERS
|
| 7 |
+
"audio":{
|
| 8 |
+
"fft_size": 1024, // number of stft frequency levels. Size of the linear spectogram frame.
|
| 9 |
+
"win_length": 1024, // stft window length in ms.
|
| 10 |
+
"hop_length": 256, // stft window hop-lengh in ms.
|
| 11 |
+
"frame_length_ms": null, // stft window length in ms.If null, 'win_length' is used.
|
| 12 |
+
"frame_shift_ms": null, // stft window hop-lengh in ms. If null, 'hop_length' is used.
|
| 13 |
+
|
| 14 |
+
// Audio processing parameters
|
| 15 |
+
"sample_rate": 22050, // DATASET-RELATED: wav sample-rate. If different than the original data, it is resampled.
|
| 16 |
+
"preemphasis": 0.0, // pre-emphasis to reduce spec noise and make it more structured. If 0.0, no -pre-emphasis.
|
| 17 |
+
"ref_level_db": 20, // reference level db, theoretically 20db is the sound of air.
|
| 18 |
+
"log_func": "np.log",
|
| 19 |
+
|
| 20 |
+
// Silence trimming
|
| 21 |
+
"do_trim_silence": false,// enable trimming of slience of audio as you load it. LJspeech (false), TWEB (false), Nancy (true)
|
| 22 |
+
"trim_db": 60, // threshold for timming silence. Set this according to your dataset.
|
| 23 |
+
|
| 24 |
+
// MelSpectrogram parameters
|
| 25 |
+
"num_mels": 80, // size of the mel spec frame.
|
| 26 |
+
"mel_fmin": 0.0, // minimum freq level for mel-spec. ~50 for male and ~95 for female voices. Tune for dataset!!
|
| 27 |
+
"mel_fmax": 8000.0, // maximum freq level for mel-spec. Tune for dataset!!
|
| 28 |
+
"spec_gain": 1.0, // scaler value appplied after log transform of spectrogram.
|
| 29 |
+
|
| 30 |
+
// Normalization parameters
|
| 31 |
+
"signal_norm": false, // normalize spec values. Mean-Var normalization if 'stats_path' is defined otherwise range normalization defined by the other params.
|
| 32 |
+
"min_level_db": -100, // lower bound for normalization
|
| 33 |
+
"symmetric_norm": true, // move normalization to range [-1, 1]
|
| 34 |
+
"max_norm": 4.0, // scale normalization to range [-max_norm, max_norm] or [0, max_norm]
|
| 35 |
+
"clip_norm": true, // clip normalized values into the range.
|
| 36 |
+
"stats_path": null // DO NOT USE WITH MULTI_SPEAKER MODEL. scaler stats file computed by 'compute_statistics.py'. If it is defined, mean-std based notmalization is used and other normalization params are ignored
|
| 37 |
+
},
|
| 38 |
+
|
| 39 |
+
// DISTRIBUTED TRAINING
|
| 40 |
+
"distributed":{
|
| 41 |
+
"backend": "nccl",
|
| 42 |
+
"url": "tcp:\/\/localhost:54324"
|
| 43 |
+
},
|
| 44 |
+
|
| 45 |
+
// MODEL PARAMETERS
|
| 46 |
+
"use_pqmf": false,
|
| 47 |
+
|
| 48 |
+
// LOSS PARAMETERS
|
| 49 |
+
"use_stft_loss": false,
|
| 50 |
+
"use_subband_stft_loss": false,
|
| 51 |
+
"use_mse_gan_loss": true,
|
| 52 |
+
"use_hinge_gan_loss": false,
|
| 53 |
+
"use_feat_match_loss": true, // use only with melgan discriminators
|
| 54 |
+
"use_l1_spec_loss": true,
|
| 55 |
+
|
| 56 |
+
// loss weights
|
| 57 |
+
"stft_loss_weight": 0,
|
| 58 |
+
"subband_stft_loss_weight": 0,
|
| 59 |
+
"mse_G_loss_weight": 1,
|
| 60 |
+
"hinge_G_loss_weight": 0,
|
| 61 |
+
"feat_match_loss_weight": 10,
|
| 62 |
+
"l1_spec_loss_weight": 45,
|
| 63 |
+
|
| 64 |
+
// multiscale stft loss parameters
|
| 65 |
+
// "stft_loss_params": {
|
| 66 |
+
// "n_ffts": [1024, 2048, 512],
|
| 67 |
+
// "hop_lengths": [120, 240, 50],
|
| 68 |
+
// "win_lengths": [600, 1200, 240]
|
| 69 |
+
// },
|
| 70 |
+
|
| 71 |
+
"l1_spec_loss_params": {
|
| 72 |
+
"use_mel": true,
|
| 73 |
+
"sample_rate": 16000,
|
| 74 |
+
"n_fft": 1024,
|
| 75 |
+
"hop_length": 256,
|
| 76 |
+
"win_length": 1024,
|
| 77 |
+
"n_mels": 80,
|
| 78 |
+
"mel_fmin": 0.0,
|
| 79 |
+
"mel_fmax": null
|
| 80 |
+
},
|
| 81 |
+
|
| 82 |
+
"target_loss": "avg_G_loss", // loss value to pick the best model to save after each epoch
|
| 83 |
+
|
| 84 |
+
// DISCRIMINATOR
|
| 85 |
+
"discriminator_model": "hifigan_discriminator",
|
| 86 |
+
//"discriminator_model_params":{
|
| 87 |
+
// "peroids": [2, 3, 5, 7, 11],
|
| 88 |
+
// "base_channels": 16,
|
| 89 |
+
// "max_channels":512,
|
| 90 |
+
// "downsample_factors":[4, 4, 4]
|
| 91 |
+
//},
|
| 92 |
+
"steps_to_start_discriminator": 0, // steps required to start GAN trainining.1
|
| 93 |
+
|
| 94 |
+
// GENERATOR
|
| 95 |
+
"generator_model": "hifigan_generator",
|
| 96 |
+
"generator_model_params": {
|
| 97 |
+
"resblock_type": "1",
|
| 98 |
+
"upsample_factors": [8,8,2,2],
|
| 99 |
+
"upsample_kernel_sizes": [16,16,4,4],
|
| 100 |
+
"upsample_initial_channel": 128,
|
| 101 |
+
"resblock_kernel_sizes": [3,7,11],
|
| 102 |
+
"resblock_dilation_sizes": [[1,3,5], [1,3,5], [1,3,5]]
|
| 103 |
+
},
|
| 104 |
+
|
| 105 |
+
// DATASET
|
| 106 |
+
"data_path": "/home/erogol/gdrive/Datasets/non-binary-voice-files/vo_voice_quality_transformation/",
|
| 107 |
+
"feature_path": null,
|
| 108 |
+
// "feature_path": "/home/erogol/gdrive/Datasets/non-binary-voice-files/tacotron-DCA/",
|
| 109 |
+
"seq_len": 8192,
|
| 110 |
+
"pad_short": 2000,
|
| 111 |
+
"conv_pad": 0,
|
| 112 |
+
"use_noise_augment": false,
|
| 113 |
+
"use_cache": true,
|
| 114 |
+
"reinit_layers": [], // give a list of layer names to restore from the given checkpoint. If not defined, it reloads all heuristically matching layers.
|
| 115 |
+
|
| 116 |
+
// TRAINING
|
| 117 |
+
"batch_size": 16, // Batch size for training. Lower values than 32 might cause hard to learn attention. It is overwritten by 'gradual_training'.
|
| 118 |
+
|
| 119 |
+
// VALIDATION
|
| 120 |
+
"run_eval": true,
|
| 121 |
+
"test_delay_epochs": 10, //Until attention is aligned, testing only wastes computation time.
|
| 122 |
+
"test_sentences_file": null, // set a file to load sentences to be used for testing. If it is null then we use default english sentences.
|
| 123 |
+
|
| 124 |
+
// OPTIMIZER
|
| 125 |
+
"epochs": 10000, // total number of epochs to train.
|
| 126 |
+
"wd": 0.0, // Weight decay weight.
|
| 127 |
+
"gen_clip_grad": -1, // Generator gradient clipping threshold. Apply gradient clipping if > 0
|
| 128 |
+
"disc_clip_grad": -1, // Discriminator gradient clipping threshold.
|
| 129 |
+
// "lr_scheduler_gen": "ExponentialLR", // one of the schedulers from https://pytorch.org/docs/stable/optim.html#how-to-adjust-learning-rate
|
| 130 |
+
// "lr_scheduler_gen_params": {
|
| 131 |
+
// "gamma": 0.999,
|
| 132 |
+
// "last_epoch": -1
|
| 133 |
+
// },
|
| 134 |
+
// "lr_scheduler_disc": "ExponentialLR", // one of the schedulers from https://pytorch.org/docs/stable/optim.html#how-to-adjust-learning-rate
|
| 135 |
+
// "lr_scheduler_disc_params": {
|
| 136 |
+
// "gamma": 0.999,
|
| 137 |
+
// "last_epoch": -1
|
| 138 |
+
// },
|
| 139 |
+
"lr_gen": 0.00001, // Initial learning rate. If Noam decay is active, maximum learning rate.
|
| 140 |
+
"lr_disc": 0.00001,
|
| 141 |
+
|
| 142 |
+
// TENSORBOARD and LOGGING
|
| 143 |
+
"print_step": 25, // Number of steps to log traning on console.
|
| 144 |
+
"print_eval": false, // If True, it prints loss values for each step in eval run.
|
| 145 |
+
"save_step": 25000, // Number of training steps expected to plot training stats on TB and save model checkpoints.
|
| 146 |
+
"checkpoint": true, // If true, it saves checkpoints per "save_step"
|
| 147 |
+
"tb_model_param_stats": false, // true, plots param stats per layer on tensorboard. Might be memory consuming, but good for debugging.
|
| 148 |
+
|
| 149 |
+
// DATA LOADING
|
| 150 |
+
"num_loader_workers": 8, // number of training data loader processes. Don't set it too big. 4-8 are good values.
|
| 151 |
+
"num_val_loader_workers": 4, // number of evaluation data loader processes.
|
| 152 |
+
"eval_split_size": 10,
|
| 153 |
+
|
| 154 |
+
// PATHS
|
| 155 |
+
"output_path": "/home/erogol/gdrive/Trainings/sam/"
|
| 156 |
+
}
|
| 157 |
+
|
| 158 |
+
|
tts/vocoder_models--en--ljspeech--hifigan_v2/model_file.pth
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:4047e93886faa1aba11948efa71f59dcb0ec9117e286660e59b91892ef98d129
|
| 3 |
+
size 3794153
|
tts/vocoder_models--en--ljspeech--multiband-melgan/config.json
ADDED
|
@@ -0,0 +1,197 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"output_path": "/home/erogol/Models/LJSpeech/",
|
| 3 |
+
"logger_uri": null,
|
| 4 |
+
"run_name": "multiband-melgan",
|
| 5 |
+
"project_name": null,
|
| 6 |
+
"run_description": "multiband melgan mean-var scaling",
|
| 7 |
+
"print_step": 25,
|
| 8 |
+
"plot_step": 100,
|
| 9 |
+
"model_param_stats": false,
|
| 10 |
+
"wandb_entity": null,
|
| 11 |
+
"dashboard_logger": "tensorboard",
|
| 12 |
+
"log_model_step": null,
|
| 13 |
+
"save_step": 25000,
|
| 14 |
+
"save_n_checkpoints": 5,
|
| 15 |
+
"save_checkpoints": true,
|
| 16 |
+
"save_all_best": false,
|
| 17 |
+
"save_best_after": 10000,
|
| 18 |
+
"target_loss": "avg_G_loss",
|
| 19 |
+
"print_eval": false,
|
| 20 |
+
"test_delay_epochs": 10,
|
| 21 |
+
"run_eval": true,
|
| 22 |
+
"run_eval_steps": null,
|
| 23 |
+
"distributed_backend": "nccl",
|
| 24 |
+
"distributed_url": "tcp://localhost:54321",
|
| 25 |
+
"mixed_precision": false,
|
| 26 |
+
"epochs": 10000,
|
| 27 |
+
"batch_size": 64,
|
| 28 |
+
"eval_batch_size": 16,
|
| 29 |
+
"grad_clip": null,
|
| 30 |
+
"scheduler_after_epoch": true,
|
| 31 |
+
"lr": 0.001,
|
| 32 |
+
"optimizer": "AdamW",
|
| 33 |
+
"optimizer_params": {
|
| 34 |
+
"betas": [
|
| 35 |
+
0.8,
|
| 36 |
+
0.99
|
| 37 |
+
],
|
| 38 |
+
"weight_decay": 0.0
|
| 39 |
+
},
|
| 40 |
+
"lr_scheduler": null,
|
| 41 |
+
"lr_scheduler_params": {},
|
| 42 |
+
"use_grad_scaler": false,
|
| 43 |
+
"cudnn_enable": true,
|
| 44 |
+
"cudnn_deterministic": false,
|
| 45 |
+
"cudnn_benchmark": true,
|
| 46 |
+
"training_seed": 54321,
|
| 47 |
+
"model": "multiband_melgan",
|
| 48 |
+
"num_loader_workers": 4,
|
| 49 |
+
"num_eval_loader_workers": 0,
|
| 50 |
+
"use_noise_augment": false,
|
| 51 |
+
"audio": {
|
| 52 |
+
"fft_size": 1024,
|
| 53 |
+
"win_length": 1024,
|
| 54 |
+
"hop_length": 256,
|
| 55 |
+
"frame_shift_ms": null,
|
| 56 |
+
"frame_length_ms": null,
|
| 57 |
+
"stft_pad_mode": "reflect",
|
| 58 |
+
"sample_rate": 22050,
|
| 59 |
+
"resample": false,
|
| 60 |
+
"preemphasis": 0.0,
|
| 61 |
+
"ref_level_db": 0,
|
| 62 |
+
"do_sound_norm": false,
|
| 63 |
+
"log_func": "np.log10",
|
| 64 |
+
"do_trim_silence": true,
|
| 65 |
+
"trim_db": 60,
|
| 66 |
+
"do_rms_norm": false,
|
| 67 |
+
"db_level": null,
|
| 68 |
+
"power": 1.5,
|
| 69 |
+
"griffin_lim_iters": 60,
|
| 70 |
+
"num_mels": 80,
|
| 71 |
+
"mel_fmin": 50.0,
|
| 72 |
+
"mel_fmax": 7600.0,
|
| 73 |
+
"spec_gain": 1,
|
| 74 |
+
"do_amp_to_db_linear": true,
|
| 75 |
+
"do_amp_to_db_mel": true,
|
| 76 |
+
"pitch_fmax": 640.0,
|
| 77 |
+
"pitch_fmin": 0.0,
|
| 78 |
+
"signal_norm": true,
|
| 79 |
+
"min_level_db": -100,
|
| 80 |
+
"symmetric_norm": true,
|
| 81 |
+
"max_norm": 4.0,
|
| 82 |
+
"clip_norm": true,
|
| 83 |
+
"stats_path": "C:/Users/Torch/AppData/Local\\tts\\vocoder_models--en--ljspeech--multiband-melgan\\scale_stats.npy"
|
| 84 |
+
},
|
| 85 |
+
"eval_split_size": 10,
|
| 86 |
+
"data_path": "/home/erogol/Data/LJSpeech-1.1/wavs/",
|
| 87 |
+
"feature_path": null,
|
| 88 |
+
"seq_len": 16384,
|
| 89 |
+
"pad_short": 2000,
|
| 90 |
+
"conv_pad": 0,
|
| 91 |
+
"use_cache": true,
|
| 92 |
+
"wd": 0.0,
|
| 93 |
+
"use_stft_loss": true,
|
| 94 |
+
"use_subband_stft_loss": true,
|
| 95 |
+
"use_mse_gan_loss": true,
|
| 96 |
+
"use_hinge_gan_loss": false,
|
| 97 |
+
"use_feat_match_loss": false,
|
| 98 |
+
"use_l1_spec_loss": false,
|
| 99 |
+
"stft_loss_weight": 0.5,
|
| 100 |
+
"subband_stft_loss_weight": 0.5,
|
| 101 |
+
"mse_G_loss_weight": 2.5,
|
| 102 |
+
"hinge_G_loss_weight": 2.5,
|
| 103 |
+
"feat_match_loss_weight": 25.0,
|
| 104 |
+
"l1_spec_loss_weight": 0.0,
|
| 105 |
+
"stft_loss_params": {
|
| 106 |
+
"n_ffts": [
|
| 107 |
+
1024,
|
| 108 |
+
2048,
|
| 109 |
+
512
|
| 110 |
+
],
|
| 111 |
+
"hop_lengths": [
|
| 112 |
+
120,
|
| 113 |
+
240,
|
| 114 |
+
50
|
| 115 |
+
],
|
| 116 |
+
"win_lengths": [
|
| 117 |
+
600,
|
| 118 |
+
1200,
|
| 119 |
+
240
|
| 120 |
+
]
|
| 121 |
+
},
|
| 122 |
+
"l1_spec_loss_params": {
|
| 123 |
+
"use_mel": true,
|
| 124 |
+
"sample_rate": 22050,
|
| 125 |
+
"n_fft": 1024,
|
| 126 |
+
"hop_length": 256,
|
| 127 |
+
"win_length": 1024,
|
| 128 |
+
"n_mels": 80,
|
| 129 |
+
"mel_fmin": 0.0,
|
| 130 |
+
"mel_fmax": null
|
| 131 |
+
},
|
| 132 |
+
"lr_gen": 0.0001,
|
| 133 |
+
"lr_disc": 0.0001,
|
| 134 |
+
"lr_scheduler_gen": "MultiStepLR",
|
| 135 |
+
"lr_scheduler_gen_params": {
|
| 136 |
+
"gamma": 0.5,
|
| 137 |
+
"milestones": [
|
| 138 |
+
100000,
|
| 139 |
+
200000,
|
| 140 |
+
300000,
|
| 141 |
+
400000,
|
| 142 |
+
500000,
|
| 143 |
+
600000
|
| 144 |
+
]
|
| 145 |
+
},
|
| 146 |
+
"lr_scheduler_disc": "MultiStepLR",
|
| 147 |
+
"lr_scheduler_disc_params": {
|
| 148 |
+
"gamma": 0.5,
|
| 149 |
+
"milestones": [
|
| 150 |
+
100000,
|
| 151 |
+
200000,
|
| 152 |
+
300000,
|
| 153 |
+
400000,
|
| 154 |
+
500000,
|
| 155 |
+
600000
|
| 156 |
+
]
|
| 157 |
+
},
|
| 158 |
+
"use_pqmf": true,
|
| 159 |
+
"diff_samples_for_G_and_D": false,
|
| 160 |
+
"discriminator_model": "melgan_multiscale_discriminator",
|
| 161 |
+
"discriminator_model_params": {
|
| 162 |
+
"base_channels": 16,
|
| 163 |
+
"max_channels": 512,
|
| 164 |
+
"downsample_factors": [
|
| 165 |
+
4,
|
| 166 |
+
4,
|
| 167 |
+
4
|
| 168 |
+
]
|
| 169 |
+
},
|
| 170 |
+
"generator_model": "multiband_melgan_generator",
|
| 171 |
+
"generator_model_params": {
|
| 172 |
+
"upsample_factors": [
|
| 173 |
+
8,
|
| 174 |
+
4,
|
| 175 |
+
2
|
| 176 |
+
],
|
| 177 |
+
"num_res_blocks": 4
|
| 178 |
+
},
|
| 179 |
+
"steps_to_start_discriminator": true,
|
| 180 |
+
"subband_stft_loss_params": {
|
| 181 |
+
"n_ffts": [
|
| 182 |
+
384,
|
| 183 |
+
683,
|
| 184 |
+
171
|
| 185 |
+
],
|
| 186 |
+
"hop_lengths": [
|
| 187 |
+
30,
|
| 188 |
+
60,
|
| 189 |
+
10
|
| 190 |
+
],
|
| 191 |
+
"win_lengths": [
|
| 192 |
+
150,
|
| 193 |
+
300,
|
| 194 |
+
60
|
| 195 |
+
]
|
| 196 |
+
}
|
| 197 |
+
}
|
tts/vocoder_models--en--ljspeech--multiband-melgan/model_file.pth
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:56f16cee42bef70a2d75b08f9b9ea952c9ee0ccf76dd88a91d51e3ca4c11b449
|
| 3 |
+
size 82831385
|
tts/vocoder_models--en--ljspeech--multiband-melgan/scale_stats.npy
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:8c4a45b935563157509ddbff09f59e4ffea35e1d07f3bbf87ec21484cb275c4a
|
| 3 |
+
size 10491
|
tts/vocoder_models--en--ljspeech--univnet/config.json
ADDED
|
@@ -0,0 +1,165 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"output_path": "/home/ubuntu/TTS/recipes/ljspeech/univnet",
|
| 3 |
+
"logger_uri": null,
|
| 4 |
+
"run_name": "",
|
| 5 |
+
"project_name": null,
|
| 6 |
+
"run_description": "",
|
| 7 |
+
"print_step": 25,
|
| 8 |
+
"plot_step": 100,
|
| 9 |
+
"model_param_stats": false,
|
| 10 |
+
"wandb_entity": null,
|
| 11 |
+
"dashboard_logger": "tensorboard",
|
| 12 |
+
"log_model_step": null,
|
| 13 |
+
"save_step": 10000,
|
| 14 |
+
"save_n_checkpoints": 5,
|
| 15 |
+
"save_checkpoints": true,
|
| 16 |
+
"save_all_best": false,
|
| 17 |
+
"save_best_after": 10000,
|
| 18 |
+
"target_loss": "loss_0",
|
| 19 |
+
"print_eval": false,
|
| 20 |
+
"test_delay_epochs": -1,
|
| 21 |
+
"run_eval": true,
|
| 22 |
+
"run_eval_steps": null,
|
| 23 |
+
"distributed_backend": "nccl",
|
| 24 |
+
"distributed_url": "tcp://localhost:54321",
|
| 25 |
+
"mixed_precision": false,
|
| 26 |
+
"epochs": 1000,
|
| 27 |
+
"batch_size": 64,
|
| 28 |
+
"eval_batch_size": 16,
|
| 29 |
+
"grad_clip": null,
|
| 30 |
+
"scheduler_after_epoch": false,
|
| 31 |
+
"lr": 0.001,
|
| 32 |
+
"optimizer": "AdamW",
|
| 33 |
+
"optimizer_params": {
|
| 34 |
+
"betas": [
|
| 35 |
+
0.5,
|
| 36 |
+
0.9
|
| 37 |
+
],
|
| 38 |
+
"weight_decay": 0.0
|
| 39 |
+
},
|
| 40 |
+
"lr_scheduler": null,
|
| 41 |
+
"lr_scheduler_params": {},
|
| 42 |
+
"use_grad_scaler": false,
|
| 43 |
+
"cudnn_enable": true,
|
| 44 |
+
"cudnn_deterministic": false,
|
| 45 |
+
"cudnn_benchmark": false,
|
| 46 |
+
"training_seed": 54321,
|
| 47 |
+
"model": "univnet",
|
| 48 |
+
"num_loader_workers": 4,
|
| 49 |
+
"num_eval_loader_workers": 4,
|
| 50 |
+
"use_noise_augment": true,
|
| 51 |
+
"audio": {
|
| 52 |
+
"fft_size": 1024,
|
| 53 |
+
"win_length": 1024,
|
| 54 |
+
"hop_length": 256,
|
| 55 |
+
"frame_shift_ms": null,
|
| 56 |
+
"frame_length_ms": null,
|
| 57 |
+
"stft_pad_mode": "reflect",
|
| 58 |
+
"sample_rate": 22050,
|
| 59 |
+
"resample": false,
|
| 60 |
+
"preemphasis": 0.0,
|
| 61 |
+
"ref_level_db": 20,
|
| 62 |
+
"do_sound_norm": false,
|
| 63 |
+
"log_func": "np.log10",
|
| 64 |
+
"do_trim_silence": true,
|
| 65 |
+
"trim_db": 60,
|
| 66 |
+
"do_rms_norm": false,
|
| 67 |
+
"db_level": null,
|
| 68 |
+
"power": 1.5,
|
| 69 |
+
"griffin_lim_iters": 60,
|
| 70 |
+
"num_mels": 80,
|
| 71 |
+
"mel_fmin": 50.0,
|
| 72 |
+
"mel_fmax": 7600.0,
|
| 73 |
+
"spec_gain": 1,
|
| 74 |
+
"do_amp_to_db_linear": true,
|
| 75 |
+
"do_amp_to_db_mel": true,
|
| 76 |
+
"pitch_fmax": 640.0,
|
| 77 |
+
"pitch_fmin": 1.0,
|
| 78 |
+
"signal_norm": true,
|
| 79 |
+
"min_level_db": -100,
|
| 80 |
+
"symmetric_norm": true,
|
| 81 |
+
"max_norm": 4.0,
|
| 82 |
+
"clip_norm": true,
|
| 83 |
+
"stats_path": "C:/Users/Torch/AppData/Local\\tts\\vocoder_models--en--ljspeech--univnet\\scale_stats.npy"
|
| 84 |
+
},
|
| 85 |
+
"eval_split_size": 10,
|
| 86 |
+
"data_path": "/home/ubuntu/TTS/recipes/ljspeech/univnet/../LJSpeech-1.1/wavs/",
|
| 87 |
+
"feature_path": "/home/ubuntu/TTS/recipes/ljspeech/univnet/../LJSpeech-1.1/specs/mel/",
|
| 88 |
+
"seq_len": 8192,
|
| 89 |
+
"pad_short": 2000,
|
| 90 |
+
"conv_pad": 0,
|
| 91 |
+
"use_cache": false,
|
| 92 |
+
"wd": 0.0,
|
| 93 |
+
"use_stft_loss": true,
|
| 94 |
+
"use_subband_stft_loss": false,
|
| 95 |
+
"use_mse_gan_loss": true,
|
| 96 |
+
"use_hinge_gan_loss": false,
|
| 97 |
+
"use_feat_match_loss": false,
|
| 98 |
+
"use_l1_spec_loss": false,
|
| 99 |
+
"stft_loss_weight": 2.5,
|
| 100 |
+
"subband_stft_loss_weight": 0.0,
|
| 101 |
+
"mse_G_loss_weight": 1.0,
|
| 102 |
+
"hinge_G_loss_weight": 0.0,
|
| 103 |
+
"feat_match_loss_weight": 0.0,
|
| 104 |
+
"l1_spec_loss_weight": 0.0,
|
| 105 |
+
"stft_loss_params": {
|
| 106 |
+
"n_ffts": [
|
| 107 |
+
1024,
|
| 108 |
+
2048,
|
| 109 |
+
512
|
| 110 |
+
],
|
| 111 |
+
"hop_lengths": [
|
| 112 |
+
120,
|
| 113 |
+
240,
|
| 114 |
+
50
|
| 115 |
+
],
|
| 116 |
+
"win_lengths": [
|
| 117 |
+
600,
|
| 118 |
+
1200,
|
| 119 |
+
240
|
| 120 |
+
]
|
| 121 |
+
},
|
| 122 |
+
"l1_spec_loss_params": {
|
| 123 |
+
"use_mel": true,
|
| 124 |
+
"sample_rate": 22050,
|
| 125 |
+
"n_fft": 1024,
|
| 126 |
+
"hop_length": 256,
|
| 127 |
+
"win_length": 1024,
|
| 128 |
+
"n_mels": 80,
|
| 129 |
+
"mel_fmin": 0.0,
|
| 130 |
+
"mel_fmax": null
|
| 131 |
+
},
|
| 132 |
+
"lr_gen": 0.0001,
|
| 133 |
+
"lr_disc": 0.0001,
|
| 134 |
+
"lr_scheduler_gen": null,
|
| 135 |
+
"lr_scheduler_gen_params": {
|
| 136 |
+
"gamma": 0.999,
|
| 137 |
+
"last_epoch": -1
|
| 138 |
+
},
|
| 139 |
+
"lr_scheduler_disc": null,
|
| 140 |
+
"lr_scheduler_disc_params": {
|
| 141 |
+
"gamma": 0.999,
|
| 142 |
+
"last_epoch": -1
|
| 143 |
+
},
|
| 144 |
+
"use_pqmf": false,
|
| 145 |
+
"diff_samples_for_G_and_D": false,
|
| 146 |
+
"discriminator_model": "univnet_discriminator",
|
| 147 |
+
"generator_model": "univnet_generator",
|
| 148 |
+
"generator_model_params": {
|
| 149 |
+
"in_channels": 64,
|
| 150 |
+
"out_channels": 1,
|
| 151 |
+
"hidden_channels": 32,
|
| 152 |
+
"cond_channels": 80,
|
| 153 |
+
"upsample_factors": [
|
| 154 |
+
8,
|
| 155 |
+
8,
|
| 156 |
+
4
|
| 157 |
+
],
|
| 158 |
+
"lvc_layers_each_block": 4,
|
| 159 |
+
"lvc_kernel_size": 3,
|
| 160 |
+
"kpnet_hidden_channels": 64,
|
| 161 |
+
"kpnet_conv_size": 3,
|
| 162 |
+
"dropout": 0.0
|
| 163 |
+
},
|
| 164 |
+
"steps_to_start_discriminator": 100000
|
| 165 |
+
}
|
tts/vocoder_models--en--ljspeech--univnet/model_file.pth
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:7350b5a0527507d97fafcf155e25c9b56adee4bfeb5d37d445cb45fdc48f3ca4
|
| 3 |
+
size 675105017
|
tts/vocoder_models--en--ljspeech--univnet/scale_stats.npy
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:e20389c9a2e54ec45baab12a8e4d12b65e1eb24d055b50214db6cd45f779be18
|
| 3 |
+
size 10700
|
tts/wavlm/WavLM-Large.pt
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:6fb4b3c3e6aa567f0a997b30855859cb81528ee8078802af439f7b2da0bf100f
|
| 3 |
+
size 1261965425
|