| { | |
| "best_global_step": 780, | |
| "best_metric": 1.4478473663330078, | |
| "best_model_checkpoint": "results/models/distilgpt2_split_3/checkpoint-780", | |
| "epoch": 2.0, | |
| "eval_steps": 500, | |
| "global_step": 1560, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.1282051282051282, | |
| "grad_norm": 2.1318576335906982, | |
| "learning_rate": 4.689102564102564e-05, | |
| "loss": 1.8462, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 0.2564102564102564, | |
| "grad_norm": 1.8293204307556152, | |
| "learning_rate": 4.368589743589744e-05, | |
| "loss": 1.5051, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 0.38461538461538464, | |
| "grad_norm": 2.407285690307617, | |
| "learning_rate": 4.0480769230769236e-05, | |
| "loss": 1.4097, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 0.5128205128205128, | |
| "grad_norm": 2.0177223682403564, | |
| "learning_rate": 3.727564102564103e-05, | |
| "loss": 1.3136, | |
| "step": 400 | |
| }, | |
| { | |
| "epoch": 0.6410256410256411, | |
| "grad_norm": 1.5830180644989014, | |
| "learning_rate": 3.4070512820512825e-05, | |
| "loss": 1.2698, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 0.7692307692307693, | |
| "grad_norm": 1.6503092050552368, | |
| "learning_rate": 3.0865384615384616e-05, | |
| "loss": 1.2133, | |
| "step": 600 | |
| }, | |
| { | |
| "epoch": 0.8974358974358975, | |
| "grad_norm": 1.608035922050476, | |
| "learning_rate": 2.7660256410256413e-05, | |
| "loss": 1.1817, | |
| "step": 700 | |
| }, | |
| { | |
| "epoch": 1.0, | |
| "eval_loss": 1.4478473663330078, | |
| "eval_runtime": 14.1171, | |
| "eval_samples_per_second": 370.259, | |
| "eval_steps_per_second": 23.163, | |
| "step": 780 | |
| }, | |
| { | |
| "epoch": 1.0256410256410255, | |
| "grad_norm": 1.399240493774414, | |
| "learning_rate": 2.4455128205128204e-05, | |
| "loss": 1.1556, | |
| "step": 800 | |
| }, | |
| { | |
| "epoch": 1.1538461538461537, | |
| "grad_norm": 1.8416355848312378, | |
| "learning_rate": 2.125e-05, | |
| "loss": 1.1005, | |
| "step": 900 | |
| }, | |
| { | |
| "epoch": 1.282051282051282, | |
| "grad_norm": 1.4166646003723145, | |
| "learning_rate": 1.8044871794871796e-05, | |
| "loss": 1.0782, | |
| "step": 1000 | |
| }, | |
| { | |
| "epoch": 1.4102564102564101, | |
| "grad_norm": 1.3811384439468384, | |
| "learning_rate": 1.483974358974359e-05, | |
| "loss": 1.0568, | |
| "step": 1100 | |
| }, | |
| { | |
| "epoch": 1.5384615384615383, | |
| "grad_norm": 1.5326135158538818, | |
| "learning_rate": 1.1634615384615386e-05, | |
| "loss": 1.0446, | |
| "step": 1200 | |
| }, | |
| { | |
| "epoch": 1.6666666666666665, | |
| "grad_norm": 1.2560216188430786, | |
| "learning_rate": 8.42948717948718e-06, | |
| "loss": 1.0604, | |
| "step": 1300 | |
| }, | |
| { | |
| "epoch": 1.7948717948717947, | |
| "grad_norm": 1.4168336391448975, | |
| "learning_rate": 5.224358974358975e-06, | |
| "loss": 1.0371, | |
| "step": 1400 | |
| }, | |
| { | |
| "epoch": 1.9230769230769231, | |
| "grad_norm": 1.3679345846176147, | |
| "learning_rate": 2.0192307692307692e-06, | |
| "loss": 1.0274, | |
| "step": 1500 | |
| }, | |
| { | |
| "epoch": 2.0, | |
| "eval_loss": 1.4688283205032349, | |
| "eval_runtime": 14.0926, | |
| "eval_samples_per_second": 370.904, | |
| "eval_steps_per_second": 23.204, | |
| "step": 1560 | |
| } | |
| ], | |
| "logging_steps": 100, | |
| "max_steps": 1560, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 2, | |
| "save_steps": 500, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": true | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 6521966894776320.0, | |
| "train_batch_size": 4, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |