Invalid JSON: Unexpected token 'N', ..."on_loss": NaN,
"... is not valid JSON
| { | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 0.9999742221534813, | |
| "eval_steps": 500, | |
| "global_step": 19396, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "decision_loss": 17.2946, | |
| "encoder_mlm_loss": 1.3309, | |
| "epoch": 0.0, | |
| "grad_norm": 506.80633544921875, | |
| "learning_rate": 4.4e-07, | |
| "loss": 37.9076, | |
| "reason_loss": NaN, | |
| "step": 50 | |
| }, | |
| { | |
| "decision_loss": 15.157, | |
| "encoder_mlm_loss": 1.3116, | |
| "epoch": 0.01, | |
| "grad_norm": 324.48309326171875, | |
| "learning_rate": 9.400000000000001e-07, | |
| "loss": 33.6669, | |
| "reason_loss": NaN, | |
| "step": 100 | |
| }, | |
| { | |
| "decision_loss": 11.6826, | |
| "encoder_mlm_loss": 1.2069, | |
| "epoch": 0.01, | |
| "grad_norm": 171.52377319335938, | |
| "learning_rate": 1.44e-06, | |
| "loss": 26.0507, | |
| "reason_loss": NaN, | |
| "step": 150 | |
| }, | |
| { | |
| "decision_loss": 8.4601, | |
| "encoder_mlm_loss": 1.1887, | |
| "epoch": 0.01, | |
| "grad_norm": 69.40190887451172, | |
| "learning_rate": 1.93e-06, | |
| "loss": 19.9829, | |
| "reason_loss": NaN, | |
| "step": 200 | |
| }, | |
| { | |
| "decision_loss": 6.6507, | |
| "encoder_mlm_loss": 1.0606, | |
| "epoch": 0.01, | |
| "grad_norm": 32.03302764892578, | |
| "learning_rate": 2.43e-06, | |
| "loss": 16.3406, | |
| "reason_loss": NaN, | |
| "step": 250 | |
| }, | |
| { | |
| "decision_loss": 5.7539, | |
| "encoder_mlm_loss": 0.9811, | |
| "epoch": 0.02, | |
| "grad_norm": 21.778579711914062, | |
| "learning_rate": 2.93e-06, | |
| "loss": 14.2326, | |
| "reason_loss": NaN, | |
| "step": 300 | |
| }, | |
| { | |
| "decision_loss": 5.3877, | |
| "encoder_mlm_loss": 0.9975, | |
| "epoch": 0.02, | |
| "grad_norm": 18.334007263183594, | |
| "learning_rate": 3.4200000000000003e-06, | |
| "loss": 13.4659, | |
| "reason_loss": NaN, | |
| "step": 350 | |
| }, | |
| { | |
| "decision_loss": 5.1198, | |
| "encoder_mlm_loss": 0.9698, | |
| "epoch": 0.02, | |
| "grad_norm": 20.7861270904541, | |
| "learning_rate": 3.92e-06, | |
| "loss": 12.9997, | |
| "reason_loss": NaN, | |
| "step": 400 | |
| }, | |
| { | |
| "decision_loss": 4.9401, | |
| "encoder_mlm_loss": 0.8893, | |
| "epoch": 0.02, | |
| "grad_norm": 16.364755630493164, | |
| "learning_rate": 4.420000000000001e-06, | |
| "loss": 12.7823, | |
| "reason_loss": NaN, | |
| "step": 450 | |
| }, | |
| { | |
| "decision_loss": 4.8397, | |
| "encoder_mlm_loss": 0.8811, | |
| "epoch": 0.03, | |
| "grad_norm": 15.240585327148438, | |
| "learning_rate": 4.92e-06, | |
| "loss": 12.3987, | |
| "reason_loss": NaN, | |
| "step": 500 | |
| }, | |
| { | |
| "decision_loss": 4.8283, | |
| "encoder_mlm_loss": 0.8613, | |
| "epoch": 0.03, | |
| "grad_norm": 16.48607063293457, | |
| "learning_rate": 5.42e-06, | |
| "loss": 12.1919, | |
| "reason_loss": NaN, | |
| "step": 550 | |
| }, | |
| { | |
| "decision_loss": 4.7769, | |
| "encoder_mlm_loss": 0.8291, | |
| "epoch": 0.03, | |
| "grad_norm": 14.643627166748047, | |
| "learning_rate": 5.920000000000001e-06, | |
| "loss": 12.0182, | |
| "reason_loss": NaN, | |
| "step": 600 | |
| }, | |
| { | |
| "decision_loss": 4.8057, | |
| "encoder_mlm_loss": 0.8424, | |
| "epoch": 0.03, | |
| "grad_norm": 14.431436538696289, | |
| "learning_rate": 6.4199999999999995e-06, | |
| "loss": 11.9925, | |
| "reason_loss": NaN, | |
| "step": 650 | |
| }, | |
| { | |
| "decision_loss": 4.6025, | |
| "encoder_mlm_loss": 0.8223, | |
| "epoch": 0.04, | |
| "grad_norm": 27.42824935913086, | |
| "learning_rate": 6.92e-06, | |
| "loss": 11.7964, | |
| "reason_loss": NaN, | |
| "step": 700 | |
| }, | |
| { | |
| "decision_loss": 4.5554, | |
| "encoder_mlm_loss": 0.8215, | |
| "epoch": 0.04, | |
| "grad_norm": 15.171280860900879, | |
| "learning_rate": 7.420000000000001e-06, | |
| "loss": 11.8384, | |
| "reason_loss": NaN, | |
| "step": 750 | |
| }, | |
| { | |
| "decision_loss": 4.4705, | |
| "encoder_mlm_loss": 0.7881, | |
| "epoch": 0.04, | |
| "grad_norm": 15.859951972961426, | |
| "learning_rate": 7.92e-06, | |
| "loss": 11.9251, | |
| "reason_loss": NaN, | |
| "step": 800 | |
| }, | |
| { | |
| "decision_loss": 4.502, | |
| "encoder_mlm_loss": 0.7824, | |
| "epoch": 0.04, | |
| "grad_norm": 13.420965194702148, | |
| "learning_rate": 8.42e-06, | |
| "loss": 11.589, | |
| "reason_loss": NaN, | |
| "step": 850 | |
| }, | |
| { | |
| "decision_loss": 4.4354, | |
| "encoder_mlm_loss": 0.7933, | |
| "epoch": 0.05, | |
| "grad_norm": 20.237834930419922, | |
| "learning_rate": 8.920000000000001e-06, | |
| "loss": 11.4423, | |
| "reason_loss": NaN, | |
| "step": 900 | |
| }, | |
| { | |
| "decision_loss": 4.4066, | |
| "encoder_mlm_loss": 0.8067, | |
| "epoch": 0.05, | |
| "grad_norm": 14.367069244384766, | |
| "learning_rate": 9.420000000000001e-06, | |
| "loss": 11.5077, | |
| "reason_loss": NaN, | |
| "step": 950 | |
| }, | |
| { | |
| "decision_loss": 4.4373, | |
| "encoder_mlm_loss": 0.7737, | |
| "epoch": 0.05, | |
| "grad_norm": 14.129799842834473, | |
| "learning_rate": 9.92e-06, | |
| "loss": 11.2995, | |
| "reason_loss": NaN, | |
| "step": 1000 | |
| }, | |
| { | |
| "decision_loss": 4.374, | |
| "encoder_mlm_loss": 0.7633, | |
| "epoch": 0.05, | |
| "grad_norm": 14.180242538452148, | |
| "learning_rate": 1.042e-05, | |
| "loss": 11.1966, | |
| "reason_loss": 5.5783, | |
| "step": 1050 | |
| }, | |
| { | |
| "decision_loss": 4.4494, | |
| "encoder_mlm_loss": 0.7522, | |
| "epoch": 0.06, | |
| "grad_norm": 11.483891487121582, | |
| "learning_rate": 1.092e-05, | |
| "loss": 11.5075, | |
| "reason_loss": NaN, | |
| "step": 1100 | |
| }, | |
| { | |
| "decision_loss": 4.3591, | |
| "encoder_mlm_loss": 0.7654, | |
| "epoch": 0.06, | |
| "grad_norm": 12.64001178741455, | |
| "learning_rate": 1.142e-05, | |
| "loss": 11.0272, | |
| "reason_loss": NaN, | |
| "step": 1150 | |
| }, | |
| { | |
| "decision_loss": 4.3741, | |
| "encoder_mlm_loss": 0.7449, | |
| "epoch": 0.06, | |
| "grad_norm": 14.490357398986816, | |
| "learning_rate": 1.1920000000000001e-05, | |
| "loss": 11.2744, | |
| "reason_loss": NaN, | |
| "step": 1200 | |
| }, | |
| { | |
| "decision_loss": 4.3079, | |
| "encoder_mlm_loss": 0.724, | |
| "epoch": 0.06, | |
| "grad_norm": 12.611394882202148, | |
| "learning_rate": 1.2420000000000001e-05, | |
| "loss": 11.3553, | |
| "reason_loss": NaN, | |
| "step": 1250 | |
| }, | |
| { | |
| "decision_loss": 4.331, | |
| "encoder_mlm_loss": 0.7394, | |
| "epoch": 0.07, | |
| "grad_norm": 23.937461853027344, | |
| "learning_rate": 1.2920000000000002e-05, | |
| "loss": 11.4936, | |
| "reason_loss": NaN, | |
| "step": 1300 | |
| }, | |
| { | |
| "decision_loss": 4.3113, | |
| "encoder_mlm_loss": 0.7483, | |
| "epoch": 0.07, | |
| "grad_norm": 13.525325775146484, | |
| "learning_rate": 1.341e-05, | |
| "loss": 11.2692, | |
| "reason_loss": NaN, | |
| "step": 1350 | |
| }, | |
| { | |
| "decision_loss": 4.4514, | |
| "encoder_mlm_loss": 0.7134, | |
| "epoch": 0.07, | |
| "grad_norm": 12.281783103942871, | |
| "learning_rate": 1.391e-05, | |
| "loss": 11.3637, | |
| "reason_loss": NaN, | |
| "step": 1400 | |
| }, | |
| { | |
| "decision_loss": 4.2842, | |
| "encoder_mlm_loss": 0.707, | |
| "epoch": 0.07, | |
| "grad_norm": 13.143014907836914, | |
| "learning_rate": 1.4410000000000001e-05, | |
| "loss": 11.083, | |
| "reason_loss": NaN, | |
| "step": 1450 | |
| }, | |
| { | |
| "decision_loss": 4.1891, | |
| "encoder_mlm_loss": 0.7466, | |
| "epoch": 0.08, | |
| "grad_norm": 15.713329315185547, | |
| "learning_rate": 1.4910000000000001e-05, | |
| "loss": 11.0169, | |
| "reason_loss": NaN, | |
| "step": 1500 | |
| }, | |
| { | |
| "decision_loss": 4.2626, | |
| "encoder_mlm_loss": 0.7105, | |
| "epoch": 0.08, | |
| "grad_norm": 12.946088790893555, | |
| "learning_rate": 1.541e-05, | |
| "loss": 10.9707, | |
| "reason_loss": NaN, | |
| "step": 1550 | |
| }, | |
| { | |
| "decision_loss": 4.2396, | |
| "encoder_mlm_loss": 0.7035, | |
| "epoch": 0.08, | |
| "grad_norm": 14.146540641784668, | |
| "learning_rate": 1.591e-05, | |
| "loss": 10.8574, | |
| "reason_loss": NaN, | |
| "step": 1600 | |
| }, | |
| { | |
| "decision_loss": 4.2699, | |
| "encoder_mlm_loss": 0.6882, | |
| "epoch": 0.09, | |
| "grad_norm": 13.31047534942627, | |
| "learning_rate": 1.641e-05, | |
| "loss": 11.0437, | |
| "reason_loss": NaN, | |
| "step": 1650 | |
| }, | |
| { | |
| "decision_loss": 4.2453, | |
| "encoder_mlm_loss": 0.6995, | |
| "epoch": 0.09, | |
| "grad_norm": 12.856541633605957, | |
| "learning_rate": 1.6910000000000002e-05, | |
| "loss": 10.9505, | |
| "reason_loss": NaN, | |
| "step": 1700 | |
| }, | |
| { | |
| "decision_loss": 4.3523, | |
| "encoder_mlm_loss": 0.6896, | |
| "epoch": 0.09, | |
| "grad_norm": 13.268596649169922, | |
| "learning_rate": 1.741e-05, | |
| "loss": 10.9844, | |
| "reason_loss": NaN, | |
| "step": 1750 | |
| }, | |
| { | |
| "decision_loss": 4.262, | |
| "encoder_mlm_loss": 0.6877, | |
| "epoch": 0.09, | |
| "grad_norm": 13.404997825622559, | |
| "learning_rate": 1.7910000000000003e-05, | |
| "loss": 10.9554, | |
| "reason_loss": NaN, | |
| "step": 1800 | |
| }, | |
| { | |
| "decision_loss": 4.2655, | |
| "encoder_mlm_loss": 0.6793, | |
| "epoch": 0.1, | |
| "grad_norm": 12.608678817749023, | |
| "learning_rate": 1.841e-05, | |
| "loss": 10.9086, | |
| "reason_loss": NaN, | |
| "step": 1850 | |
| }, | |
| { | |
| "decision_loss": 4.2445, | |
| "encoder_mlm_loss": 0.6692, | |
| "epoch": 0.1, | |
| "grad_norm": 11.864866256713867, | |
| "learning_rate": 1.891e-05, | |
| "loss": 11.0079, | |
| "reason_loss": NaN, | |
| "step": 1900 | |
| }, | |
| { | |
| "decision_loss": 4.2461, | |
| "encoder_mlm_loss": 0.7055, | |
| "epoch": 0.1, | |
| "grad_norm": 12.110335350036621, | |
| "learning_rate": 1.941e-05, | |
| "loss": 10.9009, | |
| "reason_loss": NaN, | |
| "step": 1950 | |
| }, | |
| { | |
| "decision_loss": 4.1508, | |
| "encoder_mlm_loss": 0.6584, | |
| "epoch": 0.1, | |
| "grad_norm": 13.818020820617676, | |
| "learning_rate": 1.991e-05, | |
| "loss": 10.9391, | |
| "reason_loss": NaN, | |
| "step": 2000 | |
| }, | |
| { | |
| "decision_loss": 4.1503, | |
| "encoder_mlm_loss": 0.6471, | |
| "epoch": 0.11, | |
| "grad_norm": 11.85276985168457, | |
| "learning_rate": 2.0410000000000003e-05, | |
| "loss": 10.703, | |
| "reason_loss": NaN, | |
| "step": 2050 | |
| }, | |
| { | |
| "decision_loss": 4.1422, | |
| "encoder_mlm_loss": 0.6624, | |
| "epoch": 0.11, | |
| "grad_norm": 11.124549865722656, | |
| "learning_rate": 2.091e-05, | |
| "loss": 10.7727, | |
| "reason_loss": NaN, | |
| "step": 2100 | |
| }, | |
| { | |
| "decision_loss": 4.1424, | |
| "encoder_mlm_loss": 0.6796, | |
| "epoch": 0.11, | |
| "grad_norm": 12.21742057800293, | |
| "learning_rate": 2.1410000000000003e-05, | |
| "loss": 10.6846, | |
| "reason_loss": NaN, | |
| "step": 2150 | |
| }, | |
| { | |
| "decision_loss": 4.0898, | |
| "encoder_mlm_loss": 0.6717, | |
| "epoch": 0.11, | |
| "grad_norm": 11.365450859069824, | |
| "learning_rate": 2.191e-05, | |
| "loss": 10.662, | |
| "reason_loss": NaN, | |
| "step": 2200 | |
| }, | |
| { | |
| "decision_loss": 4.0713, | |
| "encoder_mlm_loss": 0.6881, | |
| "epoch": 0.12, | |
| "grad_norm": 12.824188232421875, | |
| "learning_rate": 2.241e-05, | |
| "loss": 10.8777, | |
| "reason_loss": NaN, | |
| "step": 2250 | |
| }, | |
| { | |
| "decision_loss": 4.1075, | |
| "encoder_mlm_loss": 0.6395, | |
| "epoch": 0.12, | |
| "grad_norm": 12.549530982971191, | |
| "learning_rate": 2.2910000000000003e-05, | |
| "loss": 10.6139, | |
| "reason_loss": NaN, | |
| "step": 2300 | |
| }, | |
| { | |
| "decision_loss": 4.1325, | |
| "encoder_mlm_loss": 0.6705, | |
| "epoch": 0.12, | |
| "grad_norm": 11.592597007751465, | |
| "learning_rate": 2.341e-05, | |
| "loss": 10.6118, | |
| "reason_loss": NaN, | |
| "step": 2350 | |
| }, | |
| { | |
| "decision_loss": 4.0534, | |
| "encoder_mlm_loss": 0.6424, | |
| "epoch": 0.12, | |
| "grad_norm": 12.411306381225586, | |
| "learning_rate": 2.3910000000000003e-05, | |
| "loss": 10.586, | |
| "reason_loss": NaN, | |
| "step": 2400 | |
| }, | |
| { | |
| "decision_loss": 4.0439, | |
| "encoder_mlm_loss": 0.6486, | |
| "epoch": 0.13, | |
| "grad_norm": 12.624602317810059, | |
| "learning_rate": 2.4410000000000002e-05, | |
| "loss": 10.5166, | |
| "reason_loss": NaN, | |
| "step": 2450 | |
| }, | |
| { | |
| "decision_loss": 4.1069, | |
| "encoder_mlm_loss": 0.6389, | |
| "epoch": 0.13, | |
| "grad_norm": 13.636293411254883, | |
| "learning_rate": 2.491e-05, | |
| "loss": 10.6555, | |
| "reason_loss": NaN, | |
| "step": 2500 | |
| }, | |
| { | |
| "decision_loss": 4.0365, | |
| "encoder_mlm_loss": 0.6552, | |
| "epoch": 0.13, | |
| "grad_norm": 11.545199394226074, | |
| "learning_rate": 2.541e-05, | |
| "loss": 10.4713, | |
| "reason_loss": NaN, | |
| "step": 2550 | |
| }, | |
| { | |
| "decision_loss": 4.0198, | |
| "encoder_mlm_loss": 0.6485, | |
| "epoch": 0.13, | |
| "grad_norm": 11.711776733398438, | |
| "learning_rate": 2.591e-05, | |
| "loss": 10.5863, | |
| "reason_loss": NaN, | |
| "step": 2600 | |
| }, | |
| { | |
| "decision_loss": 3.9609, | |
| "encoder_mlm_loss": 0.6039, | |
| "epoch": 0.14, | |
| "grad_norm": 13.17541217803955, | |
| "learning_rate": 2.6410000000000003e-05, | |
| "loss": 10.3168, | |
| "reason_loss": NaN, | |
| "step": 2650 | |
| }, | |
| { | |
| "decision_loss": 4.0235, | |
| "encoder_mlm_loss": 0.6256, | |
| "epoch": 0.14, | |
| "grad_norm": 11.82180404663086, | |
| "learning_rate": 2.691e-05, | |
| "loss": 10.5624, | |
| "reason_loss": NaN, | |
| "step": 2700 | |
| }, | |
| { | |
| "decision_loss": 3.839, | |
| "encoder_mlm_loss": 0.6519, | |
| "epoch": 0.14, | |
| "grad_norm": 12.721589088439941, | |
| "learning_rate": 2.7410000000000004e-05, | |
| "loss": 10.4343, | |
| "reason_loss": NaN, | |
| "step": 2750 | |
| }, | |
| { | |
| "decision_loss": 3.9386, | |
| "encoder_mlm_loss": 0.6115, | |
| "epoch": 0.14, | |
| "grad_norm": 12.1082763671875, | |
| "learning_rate": 2.7910000000000002e-05, | |
| "loss": 10.3518, | |
| "reason_loss": NaN, | |
| "step": 2800 | |
| }, | |
| { | |
| "decision_loss": 3.8515, | |
| "encoder_mlm_loss": 0.6094, | |
| "epoch": 0.15, | |
| "grad_norm": 12.88183307647705, | |
| "learning_rate": 2.8410000000000004e-05, | |
| "loss": 10.2022, | |
| "reason_loss": NaN, | |
| "step": 2850 | |
| }, | |
| { | |
| "decision_loss": 3.7709, | |
| "encoder_mlm_loss": 0.6229, | |
| "epoch": 0.15, | |
| "grad_norm": 11.533916473388672, | |
| "learning_rate": 2.8899999999999998e-05, | |
| "loss": 10.3799, | |
| "reason_loss": NaN, | |
| "step": 2900 | |
| }, | |
| { | |
| "decision_loss": 3.8497, | |
| "encoder_mlm_loss": 0.6279, | |
| "epoch": 0.15, | |
| "grad_norm": 12.61514663696289, | |
| "learning_rate": 2.94e-05, | |
| "loss": 10.1719, | |
| "reason_loss": NaN, | |
| "step": 2950 | |
| }, | |
| { | |
| "decision_loss": 3.8989, | |
| "encoder_mlm_loss": 0.6137, | |
| "epoch": 0.15, | |
| "grad_norm": 10.787571907043457, | |
| "learning_rate": 2.9900000000000002e-05, | |
| "loss": 10.3554, | |
| "reason_loss": NaN, | |
| "step": 3000 | |
| }, | |
| { | |
| "decision_loss": 3.7403, | |
| "encoder_mlm_loss": 0.6325, | |
| "epoch": 0.16, | |
| "grad_norm": 11.576037406921387, | |
| "learning_rate": 3.04e-05, | |
| "loss": 10.1061, | |
| "reason_loss": NaN, | |
| "step": 3050 | |
| }, | |
| { | |
| "decision_loss": 3.8257, | |
| "encoder_mlm_loss": 0.6188, | |
| "epoch": 0.16, | |
| "grad_norm": 11.931550025939941, | |
| "learning_rate": 3.09e-05, | |
| "loss": 10.0557, | |
| "reason_loss": NaN, | |
| "step": 3100 | |
| }, | |
| { | |
| "decision_loss": 3.7653, | |
| "encoder_mlm_loss": 0.6151, | |
| "epoch": 0.16, | |
| "grad_norm": 13.052062034606934, | |
| "learning_rate": 3.1400000000000004e-05, | |
| "loss": 9.9838, | |
| "reason_loss": NaN, | |
| "step": 3150 | |
| }, | |
| { | |
| "decision_loss": 3.7544, | |
| "encoder_mlm_loss": 0.6237, | |
| "epoch": 0.16, | |
| "grad_norm": 14.21654224395752, | |
| "learning_rate": 3.19e-05, | |
| "loss": 9.8657, | |
| "reason_loss": NaN, | |
| "step": 3200 | |
| }, | |
| { | |
| "decision_loss": 3.6772, | |
| "encoder_mlm_loss": 0.61, | |
| "epoch": 0.17, | |
| "grad_norm": 14.428752899169922, | |
| "learning_rate": 3.24e-05, | |
| "loss": 9.6336, | |
| "reason_loss": NaN, | |
| "step": 3250 | |
| }, | |
| { | |
| "decision_loss": 3.6212, | |
| "encoder_mlm_loss": 0.6203, | |
| "epoch": 0.17, | |
| "grad_norm": 15.353618621826172, | |
| "learning_rate": 3.29e-05, | |
| "loss": 9.4442, | |
| "reason_loss": NaN, | |
| "step": 3300 | |
| }, | |
| { | |
| "decision_loss": 3.6407, | |
| "encoder_mlm_loss": 0.6454, | |
| "epoch": 0.17, | |
| "grad_norm": 12.828969955444336, | |
| "learning_rate": 3.3400000000000005e-05, | |
| "loss": 9.3448, | |
| "reason_loss": NaN, | |
| "step": 3350 | |
| }, | |
| { | |
| "decision_loss": 3.5022, | |
| "encoder_mlm_loss": 0.6361, | |
| "epoch": 0.18, | |
| "grad_norm": 13.936616897583008, | |
| "learning_rate": 3.3900000000000004e-05, | |
| "loss": 9.354, | |
| "reason_loss": NaN, | |
| "step": 3400 | |
| }, | |
| { | |
| "decision_loss": 3.5961, | |
| "encoder_mlm_loss": 0.6405, | |
| "epoch": 0.18, | |
| "grad_norm": 13.668755531311035, | |
| "learning_rate": 3.4399999999999996e-05, | |
| "loss": 9.0563, | |
| "reason_loss": 4.3796, | |
| "step": 3450 | |
| }, | |
| { | |
| "decision_loss": 3.5257, | |
| "encoder_mlm_loss": 0.591, | |
| "epoch": 0.18, | |
| "grad_norm": 12.743650436401367, | |
| "learning_rate": 3.49e-05, | |
| "loss": 9.089, | |
| "reason_loss": NaN, | |
| "step": 3500 | |
| }, | |
| { | |
| "decision_loss": 3.688, | |
| "encoder_mlm_loss": 0.6529, | |
| "epoch": 0.18, | |
| "grad_norm": 13.899826049804688, | |
| "learning_rate": 3.54e-05, | |
| "loss": 8.7378, | |
| "reason_loss": NaN, | |
| "step": 3550 | |
| }, | |
| { | |
| "decision_loss": 3.5177, | |
| "encoder_mlm_loss": 0.6223, | |
| "epoch": 0.19, | |
| "grad_norm": 16.01102066040039, | |
| "learning_rate": 3.59e-05, | |
| "loss": 8.6978, | |
| "reason_loss": NaN, | |
| "step": 3600 | |
| }, | |
| { | |
| "decision_loss": 3.5152, | |
| "encoder_mlm_loss": 0.6192, | |
| "epoch": 0.19, | |
| "grad_norm": 14.593777656555176, | |
| "learning_rate": 3.6400000000000004e-05, | |
| "loss": 8.3674, | |
| "reason_loss": NaN, | |
| "step": 3650 | |
| }, | |
| { | |
| "decision_loss": 3.4812, | |
| "encoder_mlm_loss": 0.6087, | |
| "epoch": 0.19, | |
| "grad_norm": 20.128116607666016, | |
| "learning_rate": 3.69e-05, | |
| "loss": 8.2724, | |
| "reason_loss": NaN, | |
| "step": 3700 | |
| }, | |
| { | |
| "decision_loss": 3.5961, | |
| "encoder_mlm_loss": 0.596, | |
| "epoch": 0.19, | |
| "grad_norm": 15.367920875549316, | |
| "learning_rate": 3.74e-05, | |
| "loss": 8.1541, | |
| "reason_loss": NaN, | |
| "step": 3750 | |
| }, | |
| { | |
| "decision_loss": 3.438, | |
| "encoder_mlm_loss": 0.6147, | |
| "epoch": 0.2, | |
| "grad_norm": 13.092328071594238, | |
| "learning_rate": 3.79e-05, | |
| "loss": 8.0694, | |
| "reason_loss": NaN, | |
| "step": 3800 | |
| }, | |
| { | |
| "decision_loss": 3.3987, | |
| "encoder_mlm_loss": 0.6024, | |
| "epoch": 0.2, | |
| "grad_norm": 14.642455101013184, | |
| "learning_rate": 3.8400000000000005e-05, | |
| "loss": 7.8633, | |
| "reason_loss": NaN, | |
| "step": 3850 | |
| }, | |
| { | |
| "decision_loss": 3.3564, | |
| "encoder_mlm_loss": 0.5611, | |
| "epoch": 0.2, | |
| "grad_norm": 14.328669548034668, | |
| "learning_rate": 3.8900000000000004e-05, | |
| "loss": 7.4975, | |
| "reason_loss": NaN, | |
| "step": 3900 | |
| }, | |
| { | |
| "decision_loss": 3.4611, | |
| "encoder_mlm_loss": 0.6012, | |
| "epoch": 0.2, | |
| "grad_norm": 13.032312393188477, | |
| "learning_rate": 3.94e-05, | |
| "loss": 7.4354, | |
| "reason_loss": NaN, | |
| "step": 3950 | |
| }, | |
| { | |
| "decision_loss": 3.3266, | |
| "encoder_mlm_loss": 0.6198, | |
| "epoch": 0.21, | |
| "grad_norm": 13.335681915283203, | |
| "learning_rate": 3.99e-05, | |
| "loss": 7.3149, | |
| "reason_loss": NaN, | |
| "step": 4000 | |
| }, | |
| { | |
| "decision_loss": 3.3579, | |
| "encoder_mlm_loss": 0.589, | |
| "epoch": 0.21, | |
| "grad_norm": 13.334540367126465, | |
| "learning_rate": 4.0400000000000006e-05, | |
| "loss": 7.2804, | |
| "reason_loss": NaN, | |
| "step": 4050 | |
| }, | |
| { | |
| "decision_loss": 3.4332, | |
| "encoder_mlm_loss": 0.5642, | |
| "epoch": 0.21, | |
| "grad_norm": 13.725900650024414, | |
| "learning_rate": 4.09e-05, | |
| "loss": 7.2422, | |
| "reason_loss": NaN, | |
| "step": 4100 | |
| }, | |
| { | |
| "decision_loss": 3.3579, | |
| "encoder_mlm_loss": 0.638, | |
| "epoch": 0.21, | |
| "grad_norm": 12.601729393005371, | |
| "learning_rate": 4.14e-05, | |
| "loss": 7.2021, | |
| "reason_loss": NaN, | |
| "step": 4150 | |
| }, | |
| { | |
| "decision_loss": 3.3385, | |
| "encoder_mlm_loss": 0.6249, | |
| "epoch": 0.22, | |
| "grad_norm": 14.782654762268066, | |
| "learning_rate": 4.19e-05, | |
| "loss": 7.1519, | |
| "reason_loss": NaN, | |
| "step": 4200 | |
| }, | |
| { | |
| "decision_loss": 3.2423, | |
| "encoder_mlm_loss": 0.572, | |
| "epoch": 0.22, | |
| "grad_norm": 10.786784172058105, | |
| "learning_rate": 4.24e-05, | |
| "loss": 7.0259, | |
| "reason_loss": NaN, | |
| "step": 4250 | |
| }, | |
| { | |
| "decision_loss": 3.4216, | |
| "encoder_mlm_loss": 0.6259, | |
| "epoch": 0.22, | |
| "grad_norm": 14.012487411499023, | |
| "learning_rate": 4.29e-05, | |
| "loss": 6.8396, | |
| "reason_loss": NaN, | |
| "step": 4300 | |
| }, | |
| { | |
| "decision_loss": 3.294, | |
| "encoder_mlm_loss": 0.6021, | |
| "epoch": 0.22, | |
| "grad_norm": 13.132121086120605, | |
| "learning_rate": 4.3400000000000005e-05, | |
| "loss": 6.7559, | |
| "reason_loss": NaN, | |
| "step": 4350 | |
| }, | |
| { | |
| "decision_loss": 3.2271, | |
| "encoder_mlm_loss": 0.6381, | |
| "epoch": 0.23, | |
| "grad_norm": 14.553518295288086, | |
| "learning_rate": 4.39e-05, | |
| "loss": 6.8517, | |
| "reason_loss": 2.5116, | |
| "step": 4400 | |
| }, | |
| { | |
| "decision_loss": 3.1722, | |
| "encoder_mlm_loss": 0.5848, | |
| "epoch": 0.23, | |
| "grad_norm": 13.60247802734375, | |
| "learning_rate": 4.44e-05, | |
| "loss": 6.6558, | |
| "reason_loss": NaN, | |
| "step": 4450 | |
| }, | |
| { | |
| "decision_loss": 3.3701, | |
| "encoder_mlm_loss": 0.6151, | |
| "epoch": 0.23, | |
| "grad_norm": 14.23380184173584, | |
| "learning_rate": 4.49e-05, | |
| "loss": 6.6485, | |
| "reason_loss": NaN, | |
| "step": 4500 | |
| }, | |
| { | |
| "decision_loss": 3.214, | |
| "encoder_mlm_loss": 0.5903, | |
| "epoch": 0.23, | |
| "grad_norm": 12.637818336486816, | |
| "learning_rate": 4.5400000000000006e-05, | |
| "loss": 6.6434, | |
| "reason_loss": NaN, | |
| "step": 4550 | |
| }, | |
| { | |
| "decision_loss": 3.4377, | |
| "encoder_mlm_loss": 0.5688, | |
| "epoch": 0.24, | |
| "grad_norm": 12.161514282226562, | |
| "learning_rate": 4.5900000000000004e-05, | |
| "loss": 6.558, | |
| "reason_loss": NaN, | |
| "step": 4600 | |
| }, | |
| { | |
| "decision_loss": 3.2337, | |
| "encoder_mlm_loss": 0.5745, | |
| "epoch": 0.24, | |
| "grad_norm": 10.774749755859375, | |
| "learning_rate": 4.64e-05, | |
| "loss": 6.4569, | |
| "reason_loss": NaN, | |
| "step": 4650 | |
| }, | |
| { | |
| "decision_loss": 3.1934, | |
| "encoder_mlm_loss": 0.6216, | |
| "epoch": 0.24, | |
| "grad_norm": 12.889678001403809, | |
| "learning_rate": 4.69e-05, | |
| "loss": 6.588, | |
| "reason_loss": NaN, | |
| "step": 4700 | |
| }, | |
| { | |
| "decision_loss": 3.2473, | |
| "encoder_mlm_loss": 0.598, | |
| "epoch": 0.24, | |
| "grad_norm": 12.259153366088867, | |
| "learning_rate": 4.74e-05, | |
| "loss": 6.506, | |
| "reason_loss": NaN, | |
| "step": 4750 | |
| }, | |
| { | |
| "decision_loss": 3.3514, | |
| "encoder_mlm_loss": 0.5984, | |
| "epoch": 0.25, | |
| "grad_norm": 13.196783065795898, | |
| "learning_rate": 4.79e-05, | |
| "loss": 6.4249, | |
| "reason_loss": NaN, | |
| "step": 4800 | |
| }, | |
| { | |
| "decision_loss": 3.125, | |
| "encoder_mlm_loss": 0.5777, | |
| "epoch": 0.25, | |
| "grad_norm": 12.129148483276367, | |
| "learning_rate": 4.8400000000000004e-05, | |
| "loss": 6.1404, | |
| "reason_loss": NaN, | |
| "step": 4850 | |
| }, | |
| { | |
| "decision_loss": 3.184, | |
| "encoder_mlm_loss": 0.5889, | |
| "epoch": 0.25, | |
| "grad_norm": 12.139684677124023, | |
| "learning_rate": 4.89e-05, | |
| "loss": 6.1487, | |
| "reason_loss": NaN, | |
| "step": 4900 | |
| }, | |
| { | |
| "decision_loss": 3.2688, | |
| "encoder_mlm_loss": 0.583, | |
| "epoch": 0.26, | |
| "grad_norm": 12.38558578491211, | |
| "learning_rate": 4.94e-05, | |
| "loss": 6.2462, | |
| "reason_loss": NaN, | |
| "step": 4950 | |
| }, | |
| { | |
| "decision_loss": 3.0885, | |
| "encoder_mlm_loss": 0.5828, | |
| "epoch": 0.26, | |
| "grad_norm": 12.759172439575195, | |
| "learning_rate": 4.99e-05, | |
| "loss": 6.2834, | |
| "reason_loss": NaN, | |
| "step": 5000 | |
| }, | |
| { | |
| "decision_loss": 3.1414, | |
| "encoder_mlm_loss": 0.5995, | |
| "epoch": 0.26, | |
| "grad_norm": 13.554661750793457, | |
| "learning_rate": 4.986107252014449e-05, | |
| "loss": 6.158, | |
| "reason_loss": NaN, | |
| "step": 5050 | |
| }, | |
| { | |
| "decision_loss": 3.0832, | |
| "encoder_mlm_loss": 0.5885, | |
| "epoch": 0.26, | |
| "grad_norm": 14.811614990234375, | |
| "learning_rate": 4.9687413170325095e-05, | |
| "loss": 6.0866, | |
| "reason_loss": NaN, | |
| "step": 5100 | |
| }, | |
| { | |
| "decision_loss": 3.0845, | |
| "encoder_mlm_loss": 0.5737, | |
| "epoch": 0.27, | |
| "grad_norm": 11.42420482635498, | |
| "learning_rate": 4.9513753820505696e-05, | |
| "loss": 6.0264, | |
| "reason_loss": NaN, | |
| "step": 5150 | |
| }, | |
| { | |
| "decision_loss": 3.0805, | |
| "encoder_mlm_loss": 0.583, | |
| "epoch": 0.27, | |
| "grad_norm": 12.164910316467285, | |
| "learning_rate": 4.9340094470686304e-05, | |
| "loss": 6.0071, | |
| "reason_loss": NaN, | |
| "step": 5200 | |
| }, | |
| { | |
| "decision_loss": 3.0178, | |
| "encoder_mlm_loss": 0.5798, | |
| "epoch": 0.27, | |
| "grad_norm": 12.894425392150879, | |
| "learning_rate": 4.916643512086691e-05, | |
| "loss": 5.9767, | |
| "reason_loss": NaN, | |
| "step": 5250 | |
| }, | |
| { | |
| "decision_loss": 2.9613, | |
| "encoder_mlm_loss": 0.5629, | |
| "epoch": 0.27, | |
| "grad_norm": 12.356279373168945, | |
| "learning_rate": 4.899277577104751e-05, | |
| "loss": 5.9532, | |
| "reason_loss": NaN, | |
| "step": 5300 | |
| }, | |
| { | |
| "decision_loss": 2.9668, | |
| "encoder_mlm_loss": 0.5662, | |
| "epoch": 0.28, | |
| "grad_norm": 12.371698379516602, | |
| "learning_rate": 4.881911642122812e-05, | |
| "loss": 5.9129, | |
| "reason_loss": NaN, | |
| "step": 5350 | |
| }, | |
| { | |
| "decision_loss": 2.8377, | |
| "encoder_mlm_loss": 0.5764, | |
| "epoch": 0.28, | |
| "grad_norm": 11.876270294189453, | |
| "learning_rate": 4.864545707140873e-05, | |
| "loss": 5.7633, | |
| "reason_loss": NaN, | |
| "step": 5400 | |
| }, | |
| { | |
| "decision_loss": 3.0811, | |
| "encoder_mlm_loss": 0.5976, | |
| "epoch": 0.28, | |
| "grad_norm": 11.529359817504883, | |
| "learning_rate": 4.847179772158934e-05, | |
| "loss": 5.9185, | |
| "reason_loss": NaN, | |
| "step": 5450 | |
| }, | |
| { | |
| "decision_loss": 2.931, | |
| "encoder_mlm_loss": 0.5827, | |
| "epoch": 0.28, | |
| "grad_norm": 12.832273483276367, | |
| "learning_rate": 4.829813837176994e-05, | |
| "loss": 5.9475, | |
| "reason_loss": NaN, | |
| "step": 5500 | |
| }, | |
| { | |
| "decision_loss": 2.9643, | |
| "encoder_mlm_loss": 0.5972, | |
| "epoch": 0.29, | |
| "grad_norm": 12.522998809814453, | |
| "learning_rate": 4.812447902195054e-05, | |
| "loss": 5.9335, | |
| "reason_loss": NaN, | |
| "step": 5550 | |
| }, | |
| { | |
| "decision_loss": 2.9406, | |
| "encoder_mlm_loss": 0.5756, | |
| "epoch": 0.29, | |
| "grad_norm": 11.039345741271973, | |
| "learning_rate": 4.795081967213115e-05, | |
| "loss": 5.7175, | |
| "reason_loss": NaN, | |
| "step": 5600 | |
| }, | |
| { | |
| "decision_loss": 2.8629, | |
| "encoder_mlm_loss": 0.5527, | |
| "epoch": 0.29, | |
| "grad_norm": 12.582688331604004, | |
| "learning_rate": 4.7777160322311755e-05, | |
| "loss": 5.6501, | |
| "reason_loss": NaN, | |
| "step": 5650 | |
| }, | |
| { | |
| "decision_loss": 2.8334, | |
| "encoder_mlm_loss": 0.5859, | |
| "epoch": 0.29, | |
| "grad_norm": 12.082906723022461, | |
| "learning_rate": 4.760350097249236e-05, | |
| "loss": 5.7244, | |
| "reason_loss": NaN, | |
| "step": 5700 | |
| }, | |
| { | |
| "decision_loss": 2.8741, | |
| "encoder_mlm_loss": 0.5826, | |
| "epoch": 0.3, | |
| "grad_norm": 11.20120620727539, | |
| "learning_rate": 4.7429841622672964e-05, | |
| "loss": 5.6161, | |
| "reason_loss": NaN, | |
| "step": 5750 | |
| }, | |
| { | |
| "decision_loss": 2.9188, | |
| "encoder_mlm_loss": 0.5634, | |
| "epoch": 0.3, | |
| "grad_norm": 13.118001937866211, | |
| "learning_rate": 4.725618227285357e-05, | |
| "loss": 5.6669, | |
| "reason_loss": NaN, | |
| "step": 5800 | |
| }, | |
| { | |
| "decision_loss": 2.9045, | |
| "encoder_mlm_loss": 0.5613, | |
| "epoch": 0.3, | |
| "grad_norm": 11.6439208984375, | |
| "learning_rate": 4.708252292303418e-05, | |
| "loss": 5.6609, | |
| "reason_loss": NaN, | |
| "step": 5850 | |
| }, | |
| { | |
| "decision_loss": 2.9099, | |
| "encoder_mlm_loss": 0.5681, | |
| "epoch": 0.3, | |
| "grad_norm": 11.001469612121582, | |
| "learning_rate": 4.690886357321478e-05, | |
| "loss": 5.5608, | |
| "reason_loss": NaN, | |
| "step": 5900 | |
| }, | |
| { | |
| "decision_loss": 2.8081, | |
| "encoder_mlm_loss": 0.5748, | |
| "epoch": 0.31, | |
| "grad_norm": 11.262368202209473, | |
| "learning_rate": 4.673520422339539e-05, | |
| "loss": 5.6866, | |
| "reason_loss": NaN, | |
| "step": 5950 | |
| }, | |
| { | |
| "decision_loss": 2.9234, | |
| "encoder_mlm_loss": 0.5752, | |
| "epoch": 0.31, | |
| "grad_norm": 15.921095848083496, | |
| "learning_rate": 4.6561544873576e-05, | |
| "loss": 5.4638, | |
| "reason_loss": NaN, | |
| "step": 6000 | |
| }, | |
| { | |
| "decision_loss": 2.8734, | |
| "encoder_mlm_loss": 0.5436, | |
| "epoch": 0.31, | |
| "grad_norm": 11.366347312927246, | |
| "learning_rate": 4.6387885523756605e-05, | |
| "loss": 5.4932, | |
| "reason_loss": NaN, | |
| "step": 6050 | |
| }, | |
| { | |
| "decision_loss": 2.8849, | |
| "encoder_mlm_loss": 0.5758, | |
| "epoch": 0.31, | |
| "grad_norm": 13.834676742553711, | |
| "learning_rate": 4.6214226173937206e-05, | |
| "loss": 5.5872, | |
| "reason_loss": NaN, | |
| "step": 6100 | |
| }, | |
| { | |
| "decision_loss": 2.8112, | |
| "encoder_mlm_loss": 0.5829, | |
| "epoch": 0.32, | |
| "grad_norm": 12.526769638061523, | |
| "learning_rate": 4.6040566824117814e-05, | |
| "loss": 5.4441, | |
| "reason_loss": NaN, | |
| "step": 6150 | |
| }, | |
| { | |
| "decision_loss": 2.7713, | |
| "encoder_mlm_loss": 0.5631, | |
| "epoch": 0.32, | |
| "grad_norm": 11.834070205688477, | |
| "learning_rate": 4.586690747429842e-05, | |
| "loss": 5.4597, | |
| "reason_loss": NaN, | |
| "step": 6200 | |
| }, | |
| { | |
| "decision_loss": 2.8737, | |
| "encoder_mlm_loss": 0.5454, | |
| "epoch": 0.32, | |
| "grad_norm": 10.592991828918457, | |
| "learning_rate": 4.569324812447903e-05, | |
| "loss": 5.4016, | |
| "reason_loss": NaN, | |
| "step": 6250 | |
| }, | |
| { | |
| "decision_loss": 2.7021, | |
| "encoder_mlm_loss": 0.5667, | |
| "epoch": 0.32, | |
| "grad_norm": 11.339573860168457, | |
| "learning_rate": 4.5519588774659624e-05, | |
| "loss": 5.5297, | |
| "reason_loss": NaN, | |
| "step": 6300 | |
| }, | |
| { | |
| "decision_loss": 2.8387, | |
| "encoder_mlm_loss": 0.5498, | |
| "epoch": 0.33, | |
| "grad_norm": 14.197671890258789, | |
| "learning_rate": 4.534592942484023e-05, | |
| "loss": 5.3895, | |
| "reason_loss": 1.7433, | |
| "step": 6350 | |
| }, | |
| { | |
| "decision_loss": 2.6152, | |
| "encoder_mlm_loss": 0.555, | |
| "epoch": 0.33, | |
| "grad_norm": 12.190983772277832, | |
| "learning_rate": 4.517227007502084e-05, | |
| "loss": 5.342, | |
| "reason_loss": NaN, | |
| "step": 6400 | |
| }, | |
| { | |
| "decision_loss": 2.8126, | |
| "encoder_mlm_loss": 0.5843, | |
| "epoch": 0.33, | |
| "grad_norm": 20.80158805847168, | |
| "learning_rate": 4.499861072520145e-05, | |
| "loss": 5.4744, | |
| "reason_loss": NaN, | |
| "step": 6450 | |
| }, | |
| { | |
| "decision_loss": 2.8011, | |
| "encoder_mlm_loss": 0.5559, | |
| "epoch": 0.34, | |
| "grad_norm": 10.541352272033691, | |
| "learning_rate": 4.482495137538205e-05, | |
| "loss": 5.3561, | |
| "reason_loss": NaN, | |
| "step": 6500 | |
| }, | |
| { | |
| "decision_loss": 2.7874, | |
| "encoder_mlm_loss": 0.5694, | |
| "epoch": 0.34, | |
| "grad_norm": 15.321596145629883, | |
| "learning_rate": 4.465129202556266e-05, | |
| "loss": 5.3279, | |
| "reason_loss": NaN, | |
| "step": 6550 | |
| }, | |
| { | |
| "decision_loss": 2.7497, | |
| "encoder_mlm_loss": 0.5539, | |
| "epoch": 0.34, | |
| "grad_norm": 12.009075164794922, | |
| "learning_rate": 4.4477632675743265e-05, | |
| "loss": 5.3333, | |
| "reason_loss": NaN, | |
| "step": 6600 | |
| }, | |
| { | |
| "decision_loss": 2.641, | |
| "encoder_mlm_loss": 0.5661, | |
| "epoch": 0.34, | |
| "grad_norm": 19.22854995727539, | |
| "learning_rate": 4.430397332592387e-05, | |
| "loss": 5.3703, | |
| "reason_loss": NaN, | |
| "step": 6650 | |
| }, | |
| { | |
| "decision_loss": 2.6307, | |
| "encoder_mlm_loss": 0.541, | |
| "epoch": 0.35, | |
| "grad_norm": 11.551212310791016, | |
| "learning_rate": 4.4130313976104474e-05, | |
| "loss": 5.3032, | |
| "reason_loss": NaN, | |
| "step": 6700 | |
| }, | |
| { | |
| "decision_loss": 2.852, | |
| "encoder_mlm_loss": 0.5668, | |
| "epoch": 0.35, | |
| "grad_norm": 11.5441312789917, | |
| "learning_rate": 4.395665462628508e-05, | |
| "loss": 5.38, | |
| "reason_loss": NaN, | |
| "step": 6750 | |
| }, | |
| { | |
| "decision_loss": 2.7921, | |
| "encoder_mlm_loss": 0.5515, | |
| "epoch": 0.35, | |
| "grad_norm": 10.488402366638184, | |
| "learning_rate": 4.378299527646569e-05, | |
| "loss": 5.211, | |
| "reason_loss": NaN, | |
| "step": 6800 | |
| }, | |
| { | |
| "decision_loss": 2.7793, | |
| "encoder_mlm_loss": 0.5353, | |
| "epoch": 0.35, | |
| "grad_norm": 11.62839412689209, | |
| "learning_rate": 4.36093359266463e-05, | |
| "loss": 5.2583, | |
| "reason_loss": NaN, | |
| "step": 6850 | |
| }, | |
| { | |
| "decision_loss": 2.5967, | |
| "encoder_mlm_loss": 0.5202, | |
| "epoch": 0.36, | |
| "grad_norm": 11.641283988952637, | |
| "learning_rate": 4.34356765768269e-05, | |
| "loss": 5.1396, | |
| "reason_loss": NaN, | |
| "step": 6900 | |
| }, | |
| { | |
| "decision_loss": 2.6503, | |
| "encoder_mlm_loss": 0.5485, | |
| "epoch": 0.36, | |
| "grad_norm": 12.255797386169434, | |
| "learning_rate": 4.3262017227007506e-05, | |
| "loss": 5.2417, | |
| "reason_loss": NaN, | |
| "step": 6950 | |
| }, | |
| { | |
| "decision_loss": 2.6204, | |
| "encoder_mlm_loss": 0.5421, | |
| "epoch": 0.36, | |
| "grad_norm": 10.042210578918457, | |
| "learning_rate": 4.3088357877188114e-05, | |
| "loss": 5.1232, | |
| "reason_loss": NaN, | |
| "step": 7000 | |
| }, | |
| { | |
| "decision_loss": 2.7291, | |
| "encoder_mlm_loss": 0.5607, | |
| "epoch": 0.36, | |
| "grad_norm": 10.926862716674805, | |
| "learning_rate": 4.2914698527368716e-05, | |
| "loss": 5.1705, | |
| "reason_loss": NaN, | |
| "step": 7050 | |
| }, | |
| { | |
| "decision_loss": 2.7328, | |
| "encoder_mlm_loss": 0.5431, | |
| "epoch": 0.37, | |
| "grad_norm": 11.532885551452637, | |
| "learning_rate": 4.274103917754932e-05, | |
| "loss": 5.0906, | |
| "reason_loss": NaN, | |
| "step": 7100 | |
| }, | |
| { | |
| "decision_loss": 2.5302, | |
| "encoder_mlm_loss": 0.531, | |
| "epoch": 0.37, | |
| "grad_norm": 11.01016902923584, | |
| "learning_rate": 4.2567379827729925e-05, | |
| "loss": 4.9902, | |
| "reason_loss": NaN, | |
| "step": 7150 | |
| }, | |
| { | |
| "decision_loss": 2.7506, | |
| "encoder_mlm_loss": 0.5675, | |
| "epoch": 0.37, | |
| "grad_norm": 11.220144271850586, | |
| "learning_rate": 4.239372047791053e-05, | |
| "loss": 5.2234, | |
| "reason_loss": NaN, | |
| "step": 7200 | |
| }, | |
| { | |
| "decision_loss": 2.6282, | |
| "encoder_mlm_loss": 0.5864, | |
| "epoch": 0.37, | |
| "grad_norm": 10.498862266540527, | |
| "learning_rate": 4.222006112809114e-05, | |
| "loss": 5.1675, | |
| "reason_loss": NaN, | |
| "step": 7250 | |
| }, | |
| { | |
| "decision_loss": 2.5942, | |
| "encoder_mlm_loss": 0.5078, | |
| "epoch": 0.38, | |
| "grad_norm": 11.325239181518555, | |
| "learning_rate": 4.204640177827174e-05, | |
| "loss": 5.093, | |
| "reason_loss": NaN, | |
| "step": 7300 | |
| }, | |
| { | |
| "decision_loss": 2.6822, | |
| "encoder_mlm_loss": 0.509, | |
| "epoch": 0.38, | |
| "grad_norm": 10.894235610961914, | |
| "learning_rate": 4.187274242845235e-05, | |
| "loss": 5.1333, | |
| "reason_loss": NaN, | |
| "step": 7350 | |
| }, | |
| { | |
| "decision_loss": 2.6609, | |
| "encoder_mlm_loss": 0.5532, | |
| "epoch": 0.38, | |
| "grad_norm": 10.849638938903809, | |
| "learning_rate": 4.169908307863296e-05, | |
| "loss": 5.0219, | |
| "reason_loss": NaN, | |
| "step": 7400 | |
| }, | |
| { | |
| "decision_loss": 2.7642, | |
| "encoder_mlm_loss": 0.5628, | |
| "epoch": 0.38, | |
| "grad_norm": 9.63953685760498, | |
| "learning_rate": 4.152542372881356e-05, | |
| "loss": 5.0, | |
| "reason_loss": NaN, | |
| "step": 7450 | |
| }, | |
| { | |
| "decision_loss": 2.629, | |
| "encoder_mlm_loss": 0.5054, | |
| "epoch": 0.39, | |
| "grad_norm": 12.991473197937012, | |
| "learning_rate": 4.1351764378994166e-05, | |
| "loss": 5.0013, | |
| "reason_loss": NaN, | |
| "step": 7500 | |
| }, | |
| { | |
| "decision_loss": 2.6034, | |
| "encoder_mlm_loss": 0.5576, | |
| "epoch": 0.39, | |
| "grad_norm": 11.331198692321777, | |
| "learning_rate": 4.1178105029174774e-05, | |
| "loss": 5.1522, | |
| "reason_loss": NaN, | |
| "step": 7550 | |
| }, | |
| { | |
| "decision_loss": 2.5915, | |
| "encoder_mlm_loss": 0.5671, | |
| "epoch": 0.39, | |
| "grad_norm": 10.665949821472168, | |
| "learning_rate": 4.100444567935538e-05, | |
| "loss": 4.9226, | |
| "reason_loss": NaN, | |
| "step": 7600 | |
| }, | |
| { | |
| "decision_loss": 2.6619, | |
| "encoder_mlm_loss": 0.5314, | |
| "epoch": 0.39, | |
| "grad_norm": 9.840173721313477, | |
| "learning_rate": 4.083078632953598e-05, | |
| "loss": 4.98, | |
| "reason_loss": NaN, | |
| "step": 7650 | |
| }, | |
| { | |
| "decision_loss": 2.7066, | |
| "encoder_mlm_loss": 0.5427, | |
| "epoch": 0.4, | |
| "grad_norm": 10.543719291687012, | |
| "learning_rate": 4.065712697971659e-05, | |
| "loss": 4.9124, | |
| "reason_loss": NaN, | |
| "step": 7700 | |
| }, | |
| { | |
| "decision_loss": 2.5665, | |
| "encoder_mlm_loss": 0.5278, | |
| "epoch": 0.4, | |
| "grad_norm": 11.529766082763672, | |
| "learning_rate": 4.04834676298972e-05, | |
| "loss": 4.9287, | |
| "reason_loss": NaN, | |
| "step": 7750 | |
| }, | |
| { | |
| "decision_loss": 2.5556, | |
| "encoder_mlm_loss": 0.5677, | |
| "epoch": 0.4, | |
| "grad_norm": 11.010191917419434, | |
| "learning_rate": 4.03098082800778e-05, | |
| "loss": 4.9821, | |
| "reason_loss": NaN, | |
| "step": 7800 | |
| }, | |
| { | |
| "decision_loss": 2.5638, | |
| "encoder_mlm_loss": 0.5572, | |
| "epoch": 0.4, | |
| "grad_norm": 11.854859352111816, | |
| "learning_rate": 4.013614893025841e-05, | |
| "loss": 4.8495, | |
| "reason_loss": NaN, | |
| "step": 7850 | |
| }, | |
| { | |
| "decision_loss": 2.5787, | |
| "encoder_mlm_loss": 0.524, | |
| "epoch": 0.41, | |
| "grad_norm": 15.032380104064941, | |
| "learning_rate": 3.996248958043901e-05, | |
| "loss": 4.9092, | |
| "reason_loss": NaN, | |
| "step": 7900 | |
| }, | |
| { | |
| "decision_loss": 2.6208, | |
| "encoder_mlm_loss": 0.5749, | |
| "epoch": 0.41, | |
| "grad_norm": 10.260398864746094, | |
| "learning_rate": 3.978883023061962e-05, | |
| "loss": 4.96, | |
| "reason_loss": NaN, | |
| "step": 7950 | |
| }, | |
| { | |
| "decision_loss": 2.5406, | |
| "encoder_mlm_loss": 0.5159, | |
| "epoch": 0.41, | |
| "grad_norm": 10.527961730957031, | |
| "learning_rate": 3.9615170880800225e-05, | |
| "loss": 5.0116, | |
| "reason_loss": NaN, | |
| "step": 8000 | |
| }, | |
| { | |
| "decision_loss": 2.5387, | |
| "encoder_mlm_loss": 0.56, | |
| "epoch": 0.42, | |
| "grad_norm": 10.906159400939941, | |
| "learning_rate": 3.9441511530980826e-05, | |
| "loss": 4.9037, | |
| "reason_loss": NaN, | |
| "step": 8050 | |
| }, | |
| { | |
| "decision_loss": 2.5531, | |
| "encoder_mlm_loss": 0.5345, | |
| "epoch": 0.42, | |
| "grad_norm": 11.020978927612305, | |
| "learning_rate": 3.9267852181161434e-05, | |
| "loss": 4.7901, | |
| "reason_loss": NaN, | |
| "step": 8100 | |
| }, | |
| { | |
| "decision_loss": 2.3603, | |
| "encoder_mlm_loss": 0.5403, | |
| "epoch": 0.42, | |
| "grad_norm": 9.852022171020508, | |
| "learning_rate": 3.909419283134204e-05, | |
| "loss": 4.8887, | |
| "reason_loss": NaN, | |
| "step": 8150 | |
| }, | |
| { | |
| "decision_loss": 2.5552, | |
| "encoder_mlm_loss": 0.5263, | |
| "epoch": 0.42, | |
| "grad_norm": 10.380234718322754, | |
| "learning_rate": 3.892053348152265e-05, | |
| "loss": 4.823, | |
| "reason_loss": NaN, | |
| "step": 8200 | |
| }, | |
| { | |
| "decision_loss": 2.483, | |
| "encoder_mlm_loss": 0.5314, | |
| "epoch": 0.43, | |
| "grad_norm": 15.00808334350586, | |
| "learning_rate": 3.874687413170325e-05, | |
| "loss": 4.8393, | |
| "reason_loss": NaN, | |
| "step": 8250 | |
| }, | |
| { | |
| "decision_loss": 2.5897, | |
| "encoder_mlm_loss": 0.5297, | |
| "epoch": 0.43, | |
| "grad_norm": 11.162062644958496, | |
| "learning_rate": 3.857321478188386e-05, | |
| "loss": 4.8635, | |
| "reason_loss": NaN, | |
| "step": 8300 | |
| }, | |
| { | |
| "decision_loss": 2.5224, | |
| "encoder_mlm_loss": 0.5486, | |
| "epoch": 0.43, | |
| "grad_norm": 10.835860252380371, | |
| "learning_rate": 3.8403028619060855e-05, | |
| "loss": 4.7197, | |
| "reason_loss": NaN, | |
| "step": 8350 | |
| }, | |
| { | |
| "decision_loss": 2.5729, | |
| "encoder_mlm_loss": 0.5287, | |
| "epoch": 0.43, | |
| "grad_norm": 10.745417594909668, | |
| "learning_rate": 3.8229369269241456e-05, | |
| "loss": 4.863, | |
| "reason_loss": NaN, | |
| "step": 8400 | |
| }, | |
| { | |
| "decision_loss": 2.5483, | |
| "encoder_mlm_loss": 0.4999, | |
| "epoch": 0.44, | |
| "grad_norm": 11.162773132324219, | |
| "learning_rate": 3.8055709919422064e-05, | |
| "loss": 4.7945, | |
| "reason_loss": NaN, | |
| "step": 8450 | |
| }, | |
| { | |
| "decision_loss": 2.4687, | |
| "encoder_mlm_loss": 0.5278, | |
| "epoch": 0.44, | |
| "grad_norm": 9.60230541229248, | |
| "learning_rate": 3.7882050569602665e-05, | |
| "loss": 4.9069, | |
| "reason_loss": NaN, | |
| "step": 8500 | |
| }, | |
| { | |
| "decision_loss": 2.5931, | |
| "encoder_mlm_loss": 0.5477, | |
| "epoch": 0.44, | |
| "grad_norm": 10.537424087524414, | |
| "learning_rate": 3.770839121978327e-05, | |
| "loss": 4.81, | |
| "reason_loss": NaN, | |
| "step": 8550 | |
| }, | |
| { | |
| "decision_loss": 2.5087, | |
| "encoder_mlm_loss": 0.5229, | |
| "epoch": 0.44, | |
| "grad_norm": 10.352331161499023, | |
| "learning_rate": 3.753473186996388e-05, | |
| "loss": 4.7803, | |
| "reason_loss": NaN, | |
| "step": 8600 | |
| }, | |
| { | |
| "decision_loss": 2.5849, | |
| "encoder_mlm_loss": 0.5264, | |
| "epoch": 0.45, | |
| "grad_norm": 11.812186241149902, | |
| "learning_rate": 3.736107252014449e-05, | |
| "loss": 4.8995, | |
| "reason_loss": NaN, | |
| "step": 8650 | |
| }, | |
| { | |
| "decision_loss": 2.5873, | |
| "encoder_mlm_loss": 0.5366, | |
| "epoch": 0.45, | |
| "grad_norm": 10.932312965393066, | |
| "learning_rate": 3.718741317032509e-05, | |
| "loss": 4.9043, | |
| "reason_loss": NaN, | |
| "step": 8700 | |
| }, | |
| { | |
| "decision_loss": 2.5046, | |
| "encoder_mlm_loss": 0.5356, | |
| "epoch": 0.45, | |
| "grad_norm": 12.284061431884766, | |
| "learning_rate": 3.70137538205057e-05, | |
| "loss": 4.79, | |
| "reason_loss": NaN, | |
| "step": 8750 | |
| }, | |
| { | |
| "decision_loss": 2.4967, | |
| "encoder_mlm_loss": 0.5345, | |
| "epoch": 0.45, | |
| "grad_norm": 11.007719993591309, | |
| "learning_rate": 3.6840094470686305e-05, | |
| "loss": 4.7234, | |
| "reason_loss": NaN, | |
| "step": 8800 | |
| }, | |
| { | |
| "decision_loss": 2.4628, | |
| "encoder_mlm_loss": 0.5338, | |
| "epoch": 0.46, | |
| "grad_norm": 10.874073028564453, | |
| "learning_rate": 3.6666435120866907e-05, | |
| "loss": 4.7151, | |
| "reason_loss": NaN, | |
| "step": 8850 | |
| }, | |
| { | |
| "decision_loss": 2.4878, | |
| "encoder_mlm_loss": 0.5206, | |
| "epoch": 0.46, | |
| "grad_norm": 10.251517295837402, | |
| "learning_rate": 3.6492775771047514e-05, | |
| "loss": 4.7856, | |
| "reason_loss": NaN, | |
| "step": 8900 | |
| }, | |
| { | |
| "decision_loss": 2.3386, | |
| "encoder_mlm_loss": 0.5355, | |
| "epoch": 0.46, | |
| "grad_norm": 12.211389541625977, | |
| "learning_rate": 3.631911642122812e-05, | |
| "loss": 4.6161, | |
| "reason_loss": NaN, | |
| "step": 8950 | |
| }, | |
| { | |
| "decision_loss": 2.385, | |
| "encoder_mlm_loss": 0.5152, | |
| "epoch": 0.46, | |
| "grad_norm": 10.806251525878906, | |
| "learning_rate": 3.614545707140873e-05, | |
| "loss": 4.7703, | |
| "reason_loss": NaN, | |
| "step": 9000 | |
| }, | |
| { | |
| "decision_loss": 2.4064, | |
| "encoder_mlm_loss": 0.5589, | |
| "epoch": 0.47, | |
| "grad_norm": 10.69713020324707, | |
| "learning_rate": 3.597179772158933e-05, | |
| "loss": 4.6229, | |
| "reason_loss": NaN, | |
| "step": 9050 | |
| }, | |
| { | |
| "decision_loss": 2.5413, | |
| "encoder_mlm_loss": 0.5165, | |
| "epoch": 0.47, | |
| "grad_norm": 10.956335067749023, | |
| "learning_rate": 3.579813837176994e-05, | |
| "loss": 4.8993, | |
| "reason_loss": NaN, | |
| "step": 9100 | |
| }, | |
| { | |
| "decision_loss": 2.4698, | |
| "encoder_mlm_loss": 0.5077, | |
| "epoch": 0.47, | |
| "grad_norm": 10.868124961853027, | |
| "learning_rate": 3.562447902195054e-05, | |
| "loss": 4.6673, | |
| "reason_loss": NaN, | |
| "step": 9150 | |
| }, | |
| { | |
| "decision_loss": 2.4918, | |
| "encoder_mlm_loss": 0.5106, | |
| "epoch": 0.47, | |
| "grad_norm": 10.498440742492676, | |
| "learning_rate": 3.545081967213115e-05, | |
| "loss": 4.7566, | |
| "reason_loss": NaN, | |
| "step": 9200 | |
| }, | |
| { | |
| "decision_loss": 2.4684, | |
| "encoder_mlm_loss": 0.5478, | |
| "epoch": 0.48, | |
| "grad_norm": 9.486736297607422, | |
| "learning_rate": 3.5277160322311756e-05, | |
| "loss": 4.7814, | |
| "reason_loss": NaN, | |
| "step": 9250 | |
| }, | |
| { | |
| "decision_loss": 2.4031, | |
| "encoder_mlm_loss": 0.5311, | |
| "epoch": 0.48, | |
| "grad_norm": 10.80704116821289, | |
| "learning_rate": 3.510350097249236e-05, | |
| "loss": 4.6901, | |
| "reason_loss": NaN, | |
| "step": 9300 | |
| }, | |
| { | |
| "decision_loss": 2.4332, | |
| "encoder_mlm_loss": 0.5448, | |
| "epoch": 0.48, | |
| "grad_norm": 13.817115783691406, | |
| "learning_rate": 3.4929841622672965e-05, | |
| "loss": 4.5975, | |
| "reason_loss": NaN, | |
| "step": 9350 | |
| }, | |
| { | |
| "decision_loss": 2.5099, | |
| "encoder_mlm_loss": 0.5268, | |
| "epoch": 0.48, | |
| "grad_norm": 11.08284854888916, | |
| "learning_rate": 3.475618227285357e-05, | |
| "loss": 4.7289, | |
| "reason_loss": NaN, | |
| "step": 9400 | |
| }, | |
| { | |
| "decision_loss": 2.3983, | |
| "encoder_mlm_loss": 0.5269, | |
| "epoch": 0.49, | |
| "grad_norm": 10.758065223693848, | |
| "learning_rate": 3.4582522923034174e-05, | |
| "loss": 4.5931, | |
| "reason_loss": NaN, | |
| "step": 9450 | |
| }, | |
| { | |
| "decision_loss": 2.4453, | |
| "encoder_mlm_loss": 0.539, | |
| "epoch": 0.49, | |
| "grad_norm": 12.486990928649902, | |
| "learning_rate": 3.440886357321478e-05, | |
| "loss": 4.7225, | |
| "reason_loss": NaN, | |
| "step": 9500 | |
| }, | |
| { | |
| "decision_loss": 2.4529, | |
| "encoder_mlm_loss": 0.5225, | |
| "epoch": 0.49, | |
| "grad_norm": 10.317234992980957, | |
| "learning_rate": 3.423520422339539e-05, | |
| "loss": 4.6519, | |
| "reason_loss": NaN, | |
| "step": 9550 | |
| }, | |
| { | |
| "decision_loss": 2.5537, | |
| "encoder_mlm_loss": 0.5198, | |
| "epoch": 0.49, | |
| "grad_norm": 10.231575012207031, | |
| "learning_rate": 3.4061544873576e-05, | |
| "loss": 4.6996, | |
| "reason_loss": NaN, | |
| "step": 9600 | |
| }, | |
| { | |
| "decision_loss": 2.4312, | |
| "encoder_mlm_loss": 0.5115, | |
| "epoch": 0.5, | |
| "grad_norm": 11.516695976257324, | |
| "learning_rate": 3.38878855237566e-05, | |
| "loss": 4.7901, | |
| "reason_loss": NaN, | |
| "step": 9650 | |
| }, | |
| { | |
| "decision_loss": 2.3535, | |
| "encoder_mlm_loss": 0.5464, | |
| "epoch": 0.5, | |
| "grad_norm": 10.423235893249512, | |
| "learning_rate": 3.371422617393721e-05, | |
| "loss": 4.67, | |
| "reason_loss": NaN, | |
| "step": 9700 | |
| }, | |
| { | |
| "decision_loss": 2.3405, | |
| "encoder_mlm_loss": 0.5209, | |
| "epoch": 0.5, | |
| "grad_norm": 10.108226776123047, | |
| "learning_rate": 3.3540566824117815e-05, | |
| "loss": 4.6348, | |
| "reason_loss": NaN, | |
| "step": 9750 | |
| }, | |
| { | |
| "decision_loss": 2.4064, | |
| "encoder_mlm_loss": 0.5045, | |
| "epoch": 0.51, | |
| "grad_norm": 12.5283203125, | |
| "learning_rate": 3.336690747429842e-05, | |
| "loss": 4.6198, | |
| "reason_loss": NaN, | |
| "step": 9800 | |
| }, | |
| { | |
| "decision_loss": 2.3878, | |
| "encoder_mlm_loss": 0.4968, | |
| "epoch": 0.51, | |
| "grad_norm": 10.635274887084961, | |
| "learning_rate": 3.3193248124479024e-05, | |
| "loss": 4.6017, | |
| "reason_loss": NaN, | |
| "step": 9850 | |
| }, | |
| { | |
| "decision_loss": 2.4227, | |
| "encoder_mlm_loss": 0.5368, | |
| "epoch": 0.51, | |
| "grad_norm": 11.512763977050781, | |
| "learning_rate": 3.3019588774659625e-05, | |
| "loss": 4.5901, | |
| "reason_loss": NaN, | |
| "step": 9900 | |
| }, | |
| { | |
| "decision_loss": 2.5284, | |
| "encoder_mlm_loss": 0.5259, | |
| "epoch": 0.51, | |
| "grad_norm": 9.88230037689209, | |
| "learning_rate": 3.284592942484023e-05, | |
| "loss": 4.5671, | |
| "reason_loss": NaN, | |
| "step": 9950 | |
| }, | |
| { | |
| "decision_loss": 2.3601, | |
| "encoder_mlm_loss": 0.5258, | |
| "epoch": 0.52, | |
| "grad_norm": 11.324311256408691, | |
| "learning_rate": 3.267227007502084e-05, | |
| "loss": 4.5307, | |
| "reason_loss": NaN, | |
| "step": 10000 | |
| }, | |
| { | |
| "decision_loss": 2.2871, | |
| "encoder_mlm_loss": 0.5113, | |
| "epoch": 0.52, | |
| "grad_norm": 11.137092590332031, | |
| "learning_rate": 3.249861072520144e-05, | |
| "loss": 4.6016, | |
| "reason_loss": NaN, | |
| "step": 10050 | |
| }, | |
| { | |
| "decision_loss": 2.4347, | |
| "encoder_mlm_loss": 0.5186, | |
| "epoch": 0.52, | |
| "grad_norm": 12.242900848388672, | |
| "learning_rate": 3.232495137538205e-05, | |
| "loss": 4.697, | |
| "reason_loss": NaN, | |
| "step": 10100 | |
| }, | |
| { | |
| "decision_loss": 2.3998, | |
| "encoder_mlm_loss": 0.5311, | |
| "epoch": 0.52, | |
| "grad_norm": 12.156739234924316, | |
| "learning_rate": 3.215129202556266e-05, | |
| "loss": 4.5704, | |
| "reason_loss": NaN, | |
| "step": 10150 | |
| }, | |
| { | |
| "decision_loss": 2.5238, | |
| "encoder_mlm_loss": 0.5182, | |
| "epoch": 0.53, | |
| "grad_norm": 9.229127883911133, | |
| "learning_rate": 3.1977632675743266e-05, | |
| "loss": 4.5861, | |
| "reason_loss": NaN, | |
| "step": 10200 | |
| }, | |
| { | |
| "decision_loss": 2.378, | |
| "encoder_mlm_loss": 0.5352, | |
| "epoch": 0.53, | |
| "grad_norm": 10.308478355407715, | |
| "learning_rate": 3.180397332592387e-05, | |
| "loss": 4.5989, | |
| "reason_loss": NaN, | |
| "step": 10250 | |
| }, | |
| { | |
| "decision_loss": 2.4153, | |
| "encoder_mlm_loss": 0.4822, | |
| "epoch": 0.53, | |
| "grad_norm": 10.68014144897461, | |
| "learning_rate": 3.1630313976104475e-05, | |
| "loss": 4.5126, | |
| "reason_loss": NaN, | |
| "step": 10300 | |
| }, | |
| { | |
| "decision_loss": 2.4028, | |
| "encoder_mlm_loss": 0.5217, | |
| "epoch": 0.53, | |
| "grad_norm": 10.47494125366211, | |
| "learning_rate": 3.145665462628508e-05, | |
| "loss": 4.5665, | |
| "reason_loss": NaN, | |
| "step": 10350 | |
| }, | |
| { | |
| "decision_loss": 2.3192, | |
| "encoder_mlm_loss": 0.5054, | |
| "epoch": 0.54, | |
| "grad_norm": 12.187615394592285, | |
| "learning_rate": 3.128299527646569e-05, | |
| "loss": 4.4394, | |
| "reason_loss": NaN, | |
| "step": 10400 | |
| }, | |
| { | |
| "decision_loss": 2.3345, | |
| "encoder_mlm_loss": 0.514, | |
| "epoch": 0.54, | |
| "grad_norm": 10.271819114685059, | |
| "learning_rate": 3.110933592664629e-05, | |
| "loss": 4.574, | |
| "reason_loss": NaN, | |
| "step": 10450 | |
| }, | |
| { | |
| "decision_loss": 2.1869, | |
| "encoder_mlm_loss": 0.49, | |
| "epoch": 0.54, | |
| "grad_norm": 10.244134902954102, | |
| "learning_rate": 3.09356765768269e-05, | |
| "loss": 4.3925, | |
| "reason_loss": NaN, | |
| "step": 10500 | |
| }, | |
| { | |
| "decision_loss": 2.5201, | |
| "encoder_mlm_loss": 0.4986, | |
| "epoch": 0.54, | |
| "grad_norm": 9.698922157287598, | |
| "learning_rate": 3.076201722700751e-05, | |
| "loss": 4.5421, | |
| "reason_loss": NaN, | |
| "step": 10550 | |
| }, | |
| { | |
| "decision_loss": 2.3565, | |
| "encoder_mlm_loss": 0.4935, | |
| "epoch": 0.55, | |
| "grad_norm": 10.40183162689209, | |
| "learning_rate": 3.058835787718811e-05, | |
| "loss": 4.5004, | |
| "reason_loss": NaN, | |
| "step": 10600 | |
| }, | |
| { | |
| "decision_loss": 2.2853, | |
| "encoder_mlm_loss": 0.4831, | |
| "epoch": 0.55, | |
| "grad_norm": 10.580133438110352, | |
| "learning_rate": 3.0414698527368713e-05, | |
| "loss": 4.5389, | |
| "reason_loss": NaN, | |
| "step": 10650 | |
| }, | |
| { | |
| "decision_loss": 2.3804, | |
| "encoder_mlm_loss": 0.4932, | |
| "epoch": 0.55, | |
| "grad_norm": 10.064751625061035, | |
| "learning_rate": 3.0241039177549318e-05, | |
| "loss": 4.5354, | |
| "reason_loss": NaN, | |
| "step": 10700 | |
| }, | |
| { | |
| "decision_loss": 2.4321, | |
| "encoder_mlm_loss": 0.4953, | |
| "epoch": 0.55, | |
| "grad_norm": 11.207749366760254, | |
| "learning_rate": 3.0067379827729926e-05, | |
| "loss": 4.5193, | |
| "reason_loss": NaN, | |
| "step": 10750 | |
| }, | |
| { | |
| "decision_loss": 2.4432, | |
| "encoder_mlm_loss": 0.5103, | |
| "epoch": 0.56, | |
| "grad_norm": 9.562568664550781, | |
| "learning_rate": 2.989372047791053e-05, | |
| "loss": 4.4666, | |
| "reason_loss": NaN, | |
| "step": 10800 | |
| }, | |
| { | |
| "decision_loss": 2.3398, | |
| "encoder_mlm_loss": 0.4874, | |
| "epoch": 0.56, | |
| "grad_norm": 10.37126350402832, | |
| "learning_rate": 2.9720061128091138e-05, | |
| "loss": 4.401, | |
| "reason_loss": 1.3739, | |
| "step": 10850 | |
| }, | |
| { | |
| "decision_loss": 2.2467, | |
| "encoder_mlm_loss": 0.4961, | |
| "epoch": 0.56, | |
| "grad_norm": 10.654061317443848, | |
| "learning_rate": 2.9546401778271743e-05, | |
| "loss": 4.5209, | |
| "reason_loss": NaN, | |
| "step": 10900 | |
| }, | |
| { | |
| "decision_loss": 2.3339, | |
| "encoder_mlm_loss": 0.5158, | |
| "epoch": 0.56, | |
| "grad_norm": 9.291821479797363, | |
| "learning_rate": 2.937274242845235e-05, | |
| "loss": 4.5287, | |
| "reason_loss": NaN, | |
| "step": 10950 | |
| }, | |
| { | |
| "decision_loss": 2.4592, | |
| "encoder_mlm_loss": 0.504, | |
| "epoch": 0.57, | |
| "grad_norm": 21.14087677001953, | |
| "learning_rate": 2.9199083078632955e-05, | |
| "loss": 4.4862, | |
| "reason_loss": NaN, | |
| "step": 11000 | |
| }, | |
| { | |
| "decision_loss": 2.4528, | |
| "encoder_mlm_loss": 0.5224, | |
| "epoch": 0.57, | |
| "grad_norm": 14.19636058807373, | |
| "learning_rate": 2.902542372881356e-05, | |
| "loss": 4.5173, | |
| "reason_loss": NaN, | |
| "step": 11050 | |
| }, | |
| { | |
| "decision_loss": 2.227, | |
| "encoder_mlm_loss": 0.5062, | |
| "epoch": 0.57, | |
| "grad_norm": 10.437397956848145, | |
| "learning_rate": 2.8851764378994167e-05, | |
| "loss": 4.416, | |
| "reason_loss": NaN, | |
| "step": 11100 | |
| }, | |
| { | |
| "decision_loss": 2.3802, | |
| "encoder_mlm_loss": 0.4667, | |
| "epoch": 0.57, | |
| "grad_norm": 10.666945457458496, | |
| "learning_rate": 2.8678105029174772e-05, | |
| "loss": 4.5593, | |
| "reason_loss": NaN, | |
| "step": 11150 | |
| }, | |
| { | |
| "decision_loss": 2.4548, | |
| "encoder_mlm_loss": 0.5221, | |
| "epoch": 0.58, | |
| "grad_norm": 10.959080696105957, | |
| "learning_rate": 2.850444567935538e-05, | |
| "loss": 4.363, | |
| "reason_loss": NaN, | |
| "step": 11200 | |
| }, | |
| { | |
| "decision_loss": 2.3382, | |
| "encoder_mlm_loss": 0.4761, | |
| "epoch": 0.58, | |
| "grad_norm": 9.787275314331055, | |
| "learning_rate": 2.8330786329535984e-05, | |
| "loss": 4.5643, | |
| "reason_loss": NaN, | |
| "step": 11250 | |
| }, | |
| { | |
| "decision_loss": 2.3277, | |
| "encoder_mlm_loss": 0.4821, | |
| "epoch": 0.58, | |
| "grad_norm": 9.431136131286621, | |
| "learning_rate": 2.8157126979716592e-05, | |
| "loss": 4.4429, | |
| "reason_loss": NaN, | |
| "step": 11300 | |
| }, | |
| { | |
| "decision_loss": 2.3421, | |
| "encoder_mlm_loss": 0.4861, | |
| "epoch": 0.59, | |
| "grad_norm": 9.479231834411621, | |
| "learning_rate": 2.7983467629897197e-05, | |
| "loss": 4.4545, | |
| "reason_loss": NaN, | |
| "step": 11350 | |
| }, | |
| { | |
| "decision_loss": 2.2753, | |
| "encoder_mlm_loss": 0.5042, | |
| "epoch": 0.59, | |
| "grad_norm": 11.047704696655273, | |
| "learning_rate": 2.7813281467074186e-05, | |
| "loss": 4.3748, | |
| "reason_loss": NaN, | |
| "step": 11400 | |
| }, | |
| { | |
| "decision_loss": 2.2527, | |
| "encoder_mlm_loss": 0.5045, | |
| "epoch": 0.59, | |
| "grad_norm": 11.358301162719727, | |
| "learning_rate": 2.7639622117254794e-05, | |
| "loss": 4.5086, | |
| "reason_loss": NaN, | |
| "step": 11450 | |
| }, | |
| { | |
| "decision_loss": 2.3986, | |
| "encoder_mlm_loss": 0.5054, | |
| "epoch": 0.59, | |
| "grad_norm": 12.649896621704102, | |
| "learning_rate": 2.7465962767435398e-05, | |
| "loss": 4.394, | |
| "reason_loss": NaN, | |
| "step": 11500 | |
| }, | |
| { | |
| "decision_loss": 2.3123, | |
| "encoder_mlm_loss": 0.5175, | |
| "epoch": 0.6, | |
| "grad_norm": 17.416446685791016, | |
| "learning_rate": 2.7292303417616006e-05, | |
| "loss": 4.4608, | |
| "reason_loss": NaN, | |
| "step": 11550 | |
| }, | |
| { | |
| "decision_loss": 2.3739, | |
| "encoder_mlm_loss": 0.4938, | |
| "epoch": 0.6, | |
| "grad_norm": 10.390993118286133, | |
| "learning_rate": 2.711864406779661e-05, | |
| "loss": 4.3458, | |
| "reason_loss": NaN, | |
| "step": 11600 | |
| }, | |
| { | |
| "decision_loss": 2.3226, | |
| "encoder_mlm_loss": 0.4896, | |
| "epoch": 0.6, | |
| "grad_norm": 11.228699684143066, | |
| "learning_rate": 2.694498471797722e-05, | |
| "loss": 4.505, | |
| "reason_loss": NaN, | |
| "step": 11650 | |
| }, | |
| { | |
| "decision_loss": 2.2692, | |
| "encoder_mlm_loss": 0.502, | |
| "epoch": 0.6, | |
| "grad_norm": 10.9651517868042, | |
| "learning_rate": 2.6771325368157823e-05, | |
| "loss": 4.3374, | |
| "reason_loss": NaN, | |
| "step": 11700 | |
| }, | |
| { | |
| "decision_loss": 2.252, | |
| "encoder_mlm_loss": 0.4922, | |
| "epoch": 0.61, | |
| "grad_norm": 16.753633499145508, | |
| "learning_rate": 2.659766601833843e-05, | |
| "loss": 4.332, | |
| "reason_loss": NaN, | |
| "step": 11750 | |
| }, | |
| { | |
| "decision_loss": 2.3924, | |
| "encoder_mlm_loss": 0.5105, | |
| "epoch": 0.61, | |
| "grad_norm": 11.315303802490234, | |
| "learning_rate": 2.6424006668519035e-05, | |
| "loss": 4.3855, | |
| "reason_loss": NaN, | |
| "step": 11800 | |
| }, | |
| { | |
| "decision_loss": 2.2546, | |
| "encoder_mlm_loss": 0.5138, | |
| "epoch": 0.61, | |
| "grad_norm": 10.216447830200195, | |
| "learning_rate": 2.6250347318699643e-05, | |
| "loss": 4.4484, | |
| "reason_loss": NaN, | |
| "step": 11850 | |
| }, | |
| { | |
| "decision_loss": 2.4346, | |
| "encoder_mlm_loss": 0.524, | |
| "epoch": 0.61, | |
| "grad_norm": 10.381539344787598, | |
| "learning_rate": 2.6076687968880248e-05, | |
| "loss": 4.4378, | |
| "reason_loss": NaN, | |
| "step": 11900 | |
| }, | |
| { | |
| "decision_loss": 2.4519, | |
| "encoder_mlm_loss": 0.5054, | |
| "epoch": 0.62, | |
| "grad_norm": 15.222419738769531, | |
| "learning_rate": 2.5903028619060856e-05, | |
| "loss": 4.5089, | |
| "reason_loss": NaN, | |
| "step": 11950 | |
| }, | |
| { | |
| "decision_loss": 2.3408, | |
| "encoder_mlm_loss": 0.5097, | |
| "epoch": 0.62, | |
| "grad_norm": 10.49439525604248, | |
| "learning_rate": 2.5729369269241453e-05, | |
| "loss": 4.3041, | |
| "reason_loss": NaN, | |
| "step": 12000 | |
| }, | |
| { | |
| "decision_loss": 2.3097, | |
| "encoder_mlm_loss": 0.486, | |
| "epoch": 0.62, | |
| "grad_norm": 10.5269136428833, | |
| "learning_rate": 2.555570991942206e-05, | |
| "loss": 4.3974, | |
| "reason_loss": NaN, | |
| "step": 12050 | |
| }, | |
| { | |
| "decision_loss": 2.3363, | |
| "encoder_mlm_loss": 0.517, | |
| "epoch": 0.62, | |
| "grad_norm": 10.9442138671875, | |
| "learning_rate": 2.5382050569602666e-05, | |
| "loss": 4.3362, | |
| "reason_loss": NaN, | |
| "step": 12100 | |
| }, | |
| { | |
| "decision_loss": 2.2017, | |
| "encoder_mlm_loss": 0.5125, | |
| "epoch": 0.63, | |
| "grad_norm": 10.069433212280273, | |
| "learning_rate": 2.5208391219783274e-05, | |
| "loss": 4.4084, | |
| "reason_loss": NaN, | |
| "step": 12150 | |
| }, | |
| { | |
| "decision_loss": 2.3537, | |
| "encoder_mlm_loss": 0.5052, | |
| "epoch": 0.63, | |
| "grad_norm": 10.322944641113281, | |
| "learning_rate": 2.5034731869963878e-05, | |
| "loss": 4.4116, | |
| "reason_loss": NaN, | |
| "step": 12200 | |
| }, | |
| { | |
| "decision_loss": 2.4167, | |
| "encoder_mlm_loss": 0.5104, | |
| "epoch": 0.63, | |
| "grad_norm": 9.33908462524414, | |
| "learning_rate": 2.4861072520144486e-05, | |
| "loss": 4.4243, | |
| "reason_loss": NaN, | |
| "step": 12250 | |
| }, | |
| { | |
| "decision_loss": 2.3596, | |
| "encoder_mlm_loss": 0.4972, | |
| "epoch": 0.63, | |
| "grad_norm": 21.888282775878906, | |
| "learning_rate": 2.468741317032509e-05, | |
| "loss": 4.3607, | |
| "reason_loss": NaN, | |
| "step": 12300 | |
| }, | |
| { | |
| "decision_loss": 2.3476, | |
| "encoder_mlm_loss": 0.4891, | |
| "epoch": 0.64, | |
| "grad_norm": 10.44153118133545, | |
| "learning_rate": 2.45137538205057e-05, | |
| "loss": 4.4001, | |
| "reason_loss": NaN, | |
| "step": 12350 | |
| }, | |
| { | |
| "decision_loss": 2.2677, | |
| "encoder_mlm_loss": 0.4686, | |
| "epoch": 0.64, | |
| "grad_norm": 10.878878593444824, | |
| "learning_rate": 2.4340094470686303e-05, | |
| "loss": 4.2944, | |
| "reason_loss": NaN, | |
| "step": 12400 | |
| }, | |
| { | |
| "decision_loss": 2.3666, | |
| "encoder_mlm_loss": 0.495, | |
| "epoch": 0.64, | |
| "grad_norm": 10.5595121383667, | |
| "learning_rate": 2.4166435120866908e-05, | |
| "loss": 4.243, | |
| "reason_loss": NaN, | |
| "step": 12450 | |
| }, | |
| { | |
| "decision_loss": 2.2113, | |
| "encoder_mlm_loss": 0.4877, | |
| "epoch": 0.64, | |
| "grad_norm": 10.240670204162598, | |
| "learning_rate": 2.3992775771047516e-05, | |
| "loss": 4.321, | |
| "reason_loss": NaN, | |
| "step": 12500 | |
| }, | |
| { | |
| "decision_loss": 2.2091, | |
| "encoder_mlm_loss": 0.5136, | |
| "epoch": 0.65, | |
| "grad_norm": 10.264183044433594, | |
| "learning_rate": 2.381911642122812e-05, | |
| "loss": 4.5137, | |
| "reason_loss": NaN, | |
| "step": 12550 | |
| }, | |
| { | |
| "decision_loss": 2.3554, | |
| "encoder_mlm_loss": 0.4939, | |
| "epoch": 0.65, | |
| "grad_norm": 9.599092483520508, | |
| "learning_rate": 2.3645457071408725e-05, | |
| "loss": 4.1951, | |
| "reason_loss": NaN, | |
| "step": 12600 | |
| }, | |
| { | |
| "decision_loss": 2.3426, | |
| "encoder_mlm_loss": 0.5021, | |
| "epoch": 0.65, | |
| "grad_norm": 10.048467636108398, | |
| "learning_rate": 2.3471797721589332e-05, | |
| "loss": 4.3319, | |
| "reason_loss": NaN, | |
| "step": 12650 | |
| }, | |
| { | |
| "decision_loss": 2.4104, | |
| "encoder_mlm_loss": 0.5072, | |
| "epoch": 0.65, | |
| "grad_norm": 9.640985488891602, | |
| "learning_rate": 2.3298138371769937e-05, | |
| "loss": 4.4168, | |
| "reason_loss": NaN, | |
| "step": 12700 | |
| }, | |
| { | |
| "decision_loss": 2.3776, | |
| "encoder_mlm_loss": 0.4877, | |
| "epoch": 0.66, | |
| "grad_norm": 11.355530738830566, | |
| "learning_rate": 2.312447902195054e-05, | |
| "loss": 4.4094, | |
| "reason_loss": NaN, | |
| "step": 12750 | |
| }, | |
| { | |
| "decision_loss": 2.2999, | |
| "encoder_mlm_loss": 0.506, | |
| "epoch": 0.66, | |
| "grad_norm": 9.723401069641113, | |
| "learning_rate": 2.295081967213115e-05, | |
| "loss": 4.3139, | |
| "reason_loss": NaN, | |
| "step": 12800 | |
| }, | |
| { | |
| "decision_loss": 2.1432, | |
| "encoder_mlm_loss": 0.4898, | |
| "epoch": 0.66, | |
| "grad_norm": 9.709599494934082, | |
| "learning_rate": 2.2777160322311754e-05, | |
| "loss": 4.3763, | |
| "reason_loss": NaN, | |
| "step": 12850 | |
| }, | |
| { | |
| "decision_loss": 2.2003, | |
| "encoder_mlm_loss": 0.4835, | |
| "epoch": 0.67, | |
| "grad_norm": 10.115660667419434, | |
| "learning_rate": 2.2603500972492362e-05, | |
| "loss": 4.262, | |
| "reason_loss": NaN, | |
| "step": 12900 | |
| }, | |
| { | |
| "decision_loss": 2.2885, | |
| "encoder_mlm_loss": 0.5165, | |
| "epoch": 0.67, | |
| "grad_norm": 9.499692916870117, | |
| "learning_rate": 2.2429841622672963e-05, | |
| "loss": 4.3976, | |
| "reason_loss": NaN, | |
| "step": 12950 | |
| }, | |
| { | |
| "decision_loss": 2.3993, | |
| "encoder_mlm_loss": 0.49, | |
| "epoch": 0.67, | |
| "grad_norm": 10.184179306030273, | |
| "learning_rate": 2.225618227285357e-05, | |
| "loss": 4.3492, | |
| "reason_loss": NaN, | |
| "step": 13000 | |
| }, | |
| { | |
| "decision_loss": 2.3194, | |
| "encoder_mlm_loss": 0.5021, | |
| "epoch": 0.67, | |
| "grad_norm": 10.164281845092773, | |
| "learning_rate": 2.2082522923034175e-05, | |
| "loss": 4.2254, | |
| "reason_loss": NaN, | |
| "step": 13050 | |
| }, | |
| { | |
| "decision_loss": 2.202, | |
| "encoder_mlm_loss": 0.4979, | |
| "epoch": 0.68, | |
| "grad_norm": 10.495577812194824, | |
| "learning_rate": 2.1908863573214783e-05, | |
| "loss": 4.2881, | |
| "reason_loss": NaN, | |
| "step": 13100 | |
| }, | |
| { | |
| "decision_loss": 2.3032, | |
| "encoder_mlm_loss": 0.5051, | |
| "epoch": 0.68, | |
| "grad_norm": 11.644469261169434, | |
| "learning_rate": 2.1735204223395388e-05, | |
| "loss": 4.2034, | |
| "reason_loss": NaN, | |
| "step": 13150 | |
| }, | |
| { | |
| "decision_loss": 2.2369, | |
| "encoder_mlm_loss": 0.494, | |
| "epoch": 0.68, | |
| "grad_norm": 10.125290870666504, | |
| "learning_rate": 2.1561544873575996e-05, | |
| "loss": 4.2488, | |
| "reason_loss": NaN, | |
| "step": 13200 | |
| }, | |
| { | |
| "decision_loss": 2.2961, | |
| "encoder_mlm_loss": 0.4633, | |
| "epoch": 0.68, | |
| "grad_norm": 9.7282075881958, | |
| "learning_rate": 2.13878855237566e-05, | |
| "loss": 4.3103, | |
| "reason_loss": NaN, | |
| "step": 13250 | |
| }, | |
| { | |
| "decision_loss": 2.2269, | |
| "encoder_mlm_loss": 0.5088, | |
| "epoch": 0.69, | |
| "grad_norm": 10.021200180053711, | |
| "learning_rate": 2.1214226173937205e-05, | |
| "loss": 4.2561, | |
| "reason_loss": NaN, | |
| "step": 13300 | |
| }, | |
| { | |
| "decision_loss": 2.2716, | |
| "encoder_mlm_loss": 0.4972, | |
| "epoch": 0.69, | |
| "grad_norm": 9.810166358947754, | |
| "learning_rate": 2.104056682411781e-05, | |
| "loss": 4.2693, | |
| "reason_loss": NaN, | |
| "step": 13350 | |
| }, | |
| { | |
| "decision_loss": 2.4064, | |
| "encoder_mlm_loss": 0.5313, | |
| "epoch": 0.69, | |
| "grad_norm": 9.662689208984375, | |
| "learning_rate": 2.0866907474298417e-05, | |
| "loss": 4.2489, | |
| "reason_loss": NaN, | |
| "step": 13400 | |
| }, | |
| { | |
| "decision_loss": 2.2747, | |
| "encoder_mlm_loss": 0.4795, | |
| "epoch": 0.69, | |
| "grad_norm": 9.8899507522583, | |
| "learning_rate": 2.0693248124479022e-05, | |
| "loss": 4.3083, | |
| "reason_loss": NaN, | |
| "step": 13450 | |
| }, | |
| { | |
| "decision_loss": 2.1515, | |
| "encoder_mlm_loss": 0.4694, | |
| "epoch": 0.7, | |
| "grad_norm": 10.865926742553711, | |
| "learning_rate": 2.051958877465963e-05, | |
| "loss": 4.2286, | |
| "reason_loss": NaN, | |
| "step": 13500 | |
| }, | |
| { | |
| "decision_loss": 2.2069, | |
| "encoder_mlm_loss": 0.4688, | |
| "epoch": 0.7, | |
| "grad_norm": 11.5147123336792, | |
| "learning_rate": 2.0345929424840234e-05, | |
| "loss": 4.2459, | |
| "reason_loss": NaN, | |
| "step": 13550 | |
| }, | |
| { | |
| "decision_loss": 2.196, | |
| "encoder_mlm_loss": 0.4853, | |
| "epoch": 0.7, | |
| "grad_norm": 10.14575481414795, | |
| "learning_rate": 2.0172270075020842e-05, | |
| "loss": 4.2824, | |
| "reason_loss": NaN, | |
| "step": 13600 | |
| }, | |
| { | |
| "decision_loss": 2.0801, | |
| "encoder_mlm_loss": 0.4571, | |
| "epoch": 0.7, | |
| "grad_norm": 10.563467979431152, | |
| "learning_rate": 1.9998610725201447e-05, | |
| "loss": 4.2221, | |
| "reason_loss": NaN, | |
| "step": 13650 | |
| }, | |
| { | |
| "decision_loss": 2.2988, | |
| "encoder_mlm_loss": 0.5028, | |
| "epoch": 0.71, | |
| "grad_norm": 11.101655006408691, | |
| "learning_rate": 1.982495137538205e-05, | |
| "loss": 4.3191, | |
| "reason_loss": NaN, | |
| "step": 13700 | |
| }, | |
| { | |
| "decision_loss": 2.1795, | |
| "encoder_mlm_loss": 0.4518, | |
| "epoch": 0.71, | |
| "grad_norm": 9.590519905090332, | |
| "learning_rate": 1.9651292025562656e-05, | |
| "loss": 4.2817, | |
| "reason_loss": NaN, | |
| "step": 13750 | |
| }, | |
| { | |
| "decision_loss": 2.2984, | |
| "encoder_mlm_loss": 0.4861, | |
| "epoch": 0.71, | |
| "grad_norm": 9.86621379852295, | |
| "learning_rate": 1.9477632675743263e-05, | |
| "loss": 4.3165, | |
| "reason_loss": NaN, | |
| "step": 13800 | |
| }, | |
| { | |
| "decision_loss": 2.36, | |
| "encoder_mlm_loss": 0.4777, | |
| "epoch": 0.71, | |
| "grad_norm": 10.772181510925293, | |
| "learning_rate": 1.9303973325923868e-05, | |
| "loss": 4.2868, | |
| "reason_loss": NaN, | |
| "step": 13850 | |
| }, | |
| { | |
| "decision_loss": 2.3243, | |
| "encoder_mlm_loss": 0.4844, | |
| "epoch": 0.72, | |
| "grad_norm": 11.583396911621094, | |
| "learning_rate": 1.9130313976104476e-05, | |
| "loss": 4.2421, | |
| "reason_loss": NaN, | |
| "step": 13900 | |
| }, | |
| { | |
| "decision_loss": 2.1823, | |
| "encoder_mlm_loss": 0.4749, | |
| "epoch": 0.72, | |
| "grad_norm": 10.409939765930176, | |
| "learning_rate": 1.895665462628508e-05, | |
| "loss": 4.2622, | |
| "reason_loss": NaN, | |
| "step": 13950 | |
| }, | |
| { | |
| "decision_loss": 2.1868, | |
| "encoder_mlm_loss": 0.5075, | |
| "epoch": 0.72, | |
| "grad_norm": 9.551398277282715, | |
| "learning_rate": 1.878299527646569e-05, | |
| "loss": 4.2983, | |
| "reason_loss": NaN, | |
| "step": 14000 | |
| }, | |
| { | |
| "decision_loss": 2.1898, | |
| "encoder_mlm_loss": 0.4868, | |
| "epoch": 0.72, | |
| "grad_norm": 10.858856201171875, | |
| "learning_rate": 1.860933592664629e-05, | |
| "loss": 4.2047, | |
| "reason_loss": NaN, | |
| "step": 14050 | |
| }, | |
| { | |
| "decision_loss": 2.3408, | |
| "encoder_mlm_loss": 0.4726, | |
| "epoch": 0.73, | |
| "grad_norm": 9.92735481262207, | |
| "learning_rate": 1.8435676576826897e-05, | |
| "loss": 4.3045, | |
| "reason_loss": NaN, | |
| "step": 14100 | |
| }, | |
| { | |
| "decision_loss": 2.1705, | |
| "encoder_mlm_loss": 0.4829, | |
| "epoch": 0.73, | |
| "grad_norm": 9.894718170166016, | |
| "learning_rate": 1.8262017227007502e-05, | |
| "loss": 4.1323, | |
| "reason_loss": NaN, | |
| "step": 14150 | |
| }, | |
| { | |
| "decision_loss": 2.2165, | |
| "encoder_mlm_loss": 0.4986, | |
| "epoch": 0.73, | |
| "grad_norm": 9.643003463745117, | |
| "learning_rate": 1.808835787718811e-05, | |
| "loss": 4.1831, | |
| "reason_loss": NaN, | |
| "step": 14200 | |
| }, | |
| { | |
| "decision_loss": 2.219, | |
| "encoder_mlm_loss": 0.5092, | |
| "epoch": 0.73, | |
| "grad_norm": 10.380711555480957, | |
| "learning_rate": 1.7914698527368714e-05, | |
| "loss": 4.2177, | |
| "reason_loss": NaN, | |
| "step": 14250 | |
| }, | |
| { | |
| "decision_loss": 2.2572, | |
| "encoder_mlm_loss": 0.4603, | |
| "epoch": 0.74, | |
| "grad_norm": 10.050085067749023, | |
| "learning_rate": 1.7741039177549322e-05, | |
| "loss": 4.2955, | |
| "reason_loss": NaN, | |
| "step": 14300 | |
| }, | |
| { | |
| "decision_loss": 2.3477, | |
| "encoder_mlm_loss": 0.4771, | |
| "epoch": 0.74, | |
| "grad_norm": 11.214825630187988, | |
| "learning_rate": 1.7567379827729927e-05, | |
| "loss": 4.1521, | |
| "reason_loss": NaN, | |
| "step": 14350 | |
| }, | |
| { | |
| "decision_loss": 2.1234, | |
| "encoder_mlm_loss": 0.4793, | |
| "epoch": 0.74, | |
| "grad_norm": 10.145916938781738, | |
| "learning_rate": 1.739372047791053e-05, | |
| "loss": 4.1726, | |
| "reason_loss": NaN, | |
| "step": 14400 | |
| }, | |
| { | |
| "decision_loss": 2.2479, | |
| "encoder_mlm_loss": 0.4941, | |
| "epoch": 0.74, | |
| "grad_norm": 11.086326599121094, | |
| "learning_rate": 1.7220061128091136e-05, | |
| "loss": 4.2883, | |
| "reason_loss": NaN, | |
| "step": 14450 | |
| }, | |
| { | |
| "decision_loss": 2.0809, | |
| "encoder_mlm_loss": 0.4895, | |
| "epoch": 0.75, | |
| "grad_norm": 9.647579193115234, | |
| "learning_rate": 1.7046401778271744e-05, | |
| "loss": 4.0984, | |
| "reason_loss": NaN, | |
| "step": 14500 | |
| }, | |
| { | |
| "decision_loss": 2.2474, | |
| "encoder_mlm_loss": 0.4863, | |
| "epoch": 0.75, | |
| "grad_norm": 9.199592590332031, | |
| "learning_rate": 1.6872742428452348e-05, | |
| "loss": 4.145, | |
| "reason_loss": NaN, | |
| "step": 14550 | |
| }, | |
| { | |
| "decision_loss": 2.2259, | |
| "encoder_mlm_loss": 0.467, | |
| "epoch": 0.75, | |
| "grad_norm": 10.98985767364502, | |
| "learning_rate": 1.6699083078632953e-05, | |
| "loss": 4.2657, | |
| "reason_loss": NaN, | |
| "step": 14600 | |
| }, | |
| { | |
| "decision_loss": 2.3778, | |
| "encoder_mlm_loss": 0.4754, | |
| "epoch": 0.76, | |
| "grad_norm": 12.066682815551758, | |
| "learning_rate": 1.652542372881356e-05, | |
| "loss": 4.2173, | |
| "reason_loss": NaN, | |
| "step": 14650 | |
| }, | |
| { | |
| "decision_loss": 2.292, | |
| "encoder_mlm_loss": 0.4871, | |
| "epoch": 0.76, | |
| "grad_norm": 11.109633445739746, | |
| "learning_rate": 1.6351764378994165e-05, | |
| "loss": 4.2824, | |
| "reason_loss": NaN, | |
| "step": 14700 | |
| }, | |
| { | |
| "decision_loss": 2.3616, | |
| "encoder_mlm_loss": 0.4859, | |
| "epoch": 0.76, | |
| "grad_norm": 10.090713500976562, | |
| "learning_rate": 1.6178105029174773e-05, | |
| "loss": 4.2862, | |
| "reason_loss": NaN, | |
| "step": 14750 | |
| }, | |
| { | |
| "decision_loss": 2.23, | |
| "encoder_mlm_loss": 0.488, | |
| "epoch": 0.76, | |
| "grad_norm": 12.84520149230957, | |
| "learning_rate": 1.6004445679355378e-05, | |
| "loss": 4.3239, | |
| "reason_loss": NaN, | |
| "step": 14800 | |
| }, | |
| { | |
| "decision_loss": 2.199, | |
| "encoder_mlm_loss": 0.4561, | |
| "epoch": 0.77, | |
| "grad_norm": 13.205440521240234, | |
| "learning_rate": 1.5830786329535982e-05, | |
| "loss": 4.2294, | |
| "reason_loss": NaN, | |
| "step": 14850 | |
| }, | |
| { | |
| "decision_loss": 2.2446, | |
| "encoder_mlm_loss": 0.4848, | |
| "epoch": 0.77, | |
| "grad_norm": 10.617562294006348, | |
| "learning_rate": 1.5657126979716587e-05, | |
| "loss": 4.1444, | |
| "reason_loss": NaN, | |
| "step": 14900 | |
| }, | |
| { | |
| "decision_loss": 2.2661, | |
| "encoder_mlm_loss": 0.4872, | |
| "epoch": 0.77, | |
| "grad_norm": 10.414337158203125, | |
| "learning_rate": 1.5483467629897195e-05, | |
| "loss": 4.0975, | |
| "reason_loss": NaN, | |
| "step": 14950 | |
| }, | |
| { | |
| "decision_loss": 2.1838, | |
| "encoder_mlm_loss": 0.4605, | |
| "epoch": 0.77, | |
| "grad_norm": 9.546090126037598, | |
| "learning_rate": 1.53098082800778e-05, | |
| "loss": 4.2057, | |
| "reason_loss": NaN, | |
| "step": 15000 | |
| }, | |
| { | |
| "decision_loss": 2.284, | |
| "encoder_mlm_loss": 0.4937, | |
| "epoch": 0.78, | |
| "grad_norm": 10.734001159667969, | |
| "learning_rate": 1.5136148930258407e-05, | |
| "loss": 4.204, | |
| "reason_loss": NaN, | |
| "step": 15050 | |
| }, | |
| { | |
| "decision_loss": 2.2667, | |
| "encoder_mlm_loss": 0.4813, | |
| "epoch": 0.78, | |
| "grad_norm": 11.810020446777344, | |
| "learning_rate": 1.4962489580439013e-05, | |
| "loss": 4.3455, | |
| "reason_loss": NaN, | |
| "step": 15100 | |
| }, | |
| { | |
| "decision_loss": 2.272, | |
| "encoder_mlm_loss": 0.4816, | |
| "epoch": 0.78, | |
| "grad_norm": 9.174554824829102, | |
| "learning_rate": 1.478883023061962e-05, | |
| "loss": 4.3054, | |
| "reason_loss": NaN, | |
| "step": 15150 | |
| }, | |
| { | |
| "decision_loss": 2.2562, | |
| "encoder_mlm_loss": 0.4649, | |
| "epoch": 0.78, | |
| "grad_norm": 10.114463806152344, | |
| "learning_rate": 1.4615170880800222e-05, | |
| "loss": 4.1423, | |
| "reason_loss": NaN, | |
| "step": 15200 | |
| }, | |
| { | |
| "decision_loss": 2.2579, | |
| "encoder_mlm_loss": 0.4779, | |
| "epoch": 0.79, | |
| "grad_norm": 9.515824317932129, | |
| "learning_rate": 1.4441511530980828e-05, | |
| "loss": 4.2965, | |
| "reason_loss": NaN, | |
| "step": 15250 | |
| }, | |
| { | |
| "decision_loss": 2.2857, | |
| "encoder_mlm_loss": 0.456, | |
| "epoch": 0.79, | |
| "grad_norm": 10.084831237792969, | |
| "learning_rate": 1.4267852181161435e-05, | |
| "loss": 4.1492, | |
| "reason_loss": NaN, | |
| "step": 15300 | |
| }, | |
| { | |
| "decision_loss": 2.1108, | |
| "encoder_mlm_loss": 0.4592, | |
| "epoch": 0.79, | |
| "grad_norm": 10.310980796813965, | |
| "learning_rate": 1.409419283134204e-05, | |
| "loss": 4.255, | |
| "reason_loss": NaN, | |
| "step": 15350 | |
| }, | |
| { | |
| "decision_loss": 2.3223, | |
| "encoder_mlm_loss": 0.4853, | |
| "epoch": 0.79, | |
| "grad_norm": 10.398266792297363, | |
| "learning_rate": 1.3920533481522647e-05, | |
| "loss": 4.2794, | |
| "reason_loss": NaN, | |
| "step": 15400 | |
| }, | |
| { | |
| "decision_loss": 2.1705, | |
| "encoder_mlm_loss": 0.484, | |
| "epoch": 0.8, | |
| "grad_norm": 9.45158576965332, | |
| "learning_rate": 1.3746874131703252e-05, | |
| "loss": 4.1371, | |
| "reason_loss": NaN, | |
| "step": 15450 | |
| }, | |
| { | |
| "decision_loss": 2.2301, | |
| "encoder_mlm_loss": 0.4763, | |
| "epoch": 0.8, | |
| "grad_norm": 10.345832824707031, | |
| "learning_rate": 1.3573214781883858e-05, | |
| "loss": 4.1141, | |
| "reason_loss": NaN, | |
| "step": 15500 | |
| }, | |
| { | |
| "decision_loss": 2.1053, | |
| "encoder_mlm_loss": 0.4808, | |
| "epoch": 0.8, | |
| "grad_norm": 10.891772270202637, | |
| "learning_rate": 1.3399555432064462e-05, | |
| "loss": 4.0825, | |
| "reason_loss": NaN, | |
| "step": 15550 | |
| }, | |
| { | |
| "decision_loss": 2.1519, | |
| "encoder_mlm_loss": 0.4767, | |
| "epoch": 0.8, | |
| "grad_norm": 9.94306755065918, | |
| "learning_rate": 1.3225896082245068e-05, | |
| "loss": 4.0361, | |
| "reason_loss": NaN, | |
| "step": 15600 | |
| }, | |
| { | |
| "decision_loss": 2.4022, | |
| "encoder_mlm_loss": 0.4708, | |
| "epoch": 0.81, | |
| "grad_norm": 10.663613319396973, | |
| "learning_rate": 1.3052236732425675e-05, | |
| "loss": 4.2401, | |
| "reason_loss": NaN, | |
| "step": 15650 | |
| }, | |
| { | |
| "decision_loss": 2.1628, | |
| "encoder_mlm_loss": 0.4709, | |
| "epoch": 0.81, | |
| "grad_norm": 10.070218086242676, | |
| "learning_rate": 1.287857738260628e-05, | |
| "loss": 4.098, | |
| "reason_loss": NaN, | |
| "step": 15700 | |
| }, | |
| { | |
| "decision_loss": 2.1725, | |
| "encoder_mlm_loss": 0.4752, | |
| "epoch": 0.81, | |
| "grad_norm": 10.272323608398438, | |
| "learning_rate": 1.2704918032786885e-05, | |
| "loss": 4.1846, | |
| "reason_loss": NaN, | |
| "step": 15750 | |
| }, | |
| { | |
| "decision_loss": 2.1207, | |
| "encoder_mlm_loss": 0.4822, | |
| "epoch": 0.81, | |
| "grad_norm": 10.824825286865234, | |
| "learning_rate": 1.2531258682967492e-05, | |
| "loss": 4.0792, | |
| "reason_loss": NaN, | |
| "step": 15800 | |
| }, | |
| { | |
| "decision_loss": 2.1834, | |
| "encoder_mlm_loss": 0.4447, | |
| "epoch": 0.82, | |
| "grad_norm": 10.454733848571777, | |
| "learning_rate": 1.2357599333148096e-05, | |
| "loss": 4.2325, | |
| "reason_loss": NaN, | |
| "step": 15850 | |
| }, | |
| { | |
| "decision_loss": 2.164, | |
| "encoder_mlm_loss": 0.4736, | |
| "epoch": 0.82, | |
| "grad_norm": 9.149787902832031, | |
| "learning_rate": 1.2183939983328702e-05, | |
| "loss": 4.1617, | |
| "reason_loss": NaN, | |
| "step": 15900 | |
| }, | |
| { | |
| "decision_loss": 2.2075, | |
| "encoder_mlm_loss": 0.4746, | |
| "epoch": 0.82, | |
| "grad_norm": 10.008898735046387, | |
| "learning_rate": 1.2010280633509309e-05, | |
| "loss": 4.0822, | |
| "reason_loss": NaN, | |
| "step": 15950 | |
| }, | |
| { | |
| "decision_loss": 2.1779, | |
| "encoder_mlm_loss": 0.4737, | |
| "epoch": 0.82, | |
| "grad_norm": 9.24558162689209, | |
| "learning_rate": 1.1836621283689913e-05, | |
| "loss": 4.104, | |
| "reason_loss": NaN, | |
| "step": 16000 | |
| }, | |
| { | |
| "decision_loss": 2.1552, | |
| "encoder_mlm_loss": 0.4443, | |
| "epoch": 0.83, | |
| "grad_norm": 10.271194458007812, | |
| "learning_rate": 1.166296193387052e-05, | |
| "loss": 4.1249, | |
| "reason_loss": NaN, | |
| "step": 16050 | |
| }, | |
| { | |
| "decision_loss": 2.2418, | |
| "encoder_mlm_loss": 0.476, | |
| "epoch": 0.83, | |
| "grad_norm": 11.877102851867676, | |
| "learning_rate": 1.1489302584051126e-05, | |
| "loss": 4.1547, | |
| "reason_loss": NaN, | |
| "step": 16100 | |
| }, | |
| { | |
| "decision_loss": 2.0779, | |
| "encoder_mlm_loss": 0.4786, | |
| "epoch": 0.83, | |
| "grad_norm": 11.429306983947754, | |
| "learning_rate": 1.1315643234231732e-05, | |
| "loss": 4.0876, | |
| "reason_loss": NaN, | |
| "step": 16150 | |
| }, | |
| { | |
| "decision_loss": 2.178, | |
| "encoder_mlm_loss": 0.4587, | |
| "epoch": 0.84, | |
| "grad_norm": 9.763303756713867, | |
| "learning_rate": 1.1141983884412336e-05, | |
| "loss": 4.1172, | |
| "reason_loss": NaN, | |
| "step": 16200 | |
| }, | |
| { | |
| "decision_loss": 2.1244, | |
| "encoder_mlm_loss": 0.4733, | |
| "epoch": 0.84, | |
| "grad_norm": 9.711063385009766, | |
| "learning_rate": 1.0968324534592942e-05, | |
| "loss": 4.2619, | |
| "reason_loss": NaN, | |
| "step": 16250 | |
| }, | |
| { | |
| "decision_loss": 2.2836, | |
| "encoder_mlm_loss": 0.4826, | |
| "epoch": 0.84, | |
| "grad_norm": 9.272294044494629, | |
| "learning_rate": 1.0794665184773549e-05, | |
| "loss": 4.1551, | |
| "reason_loss": NaN, | |
| "step": 16300 | |
| }, | |
| { | |
| "decision_loss": 2.1646, | |
| "encoder_mlm_loss": 0.4682, | |
| "epoch": 0.84, | |
| "grad_norm": 13.107208251953125, | |
| "learning_rate": 1.0621005834954155e-05, | |
| "loss": 4.1731, | |
| "reason_loss": NaN, | |
| "step": 16350 | |
| }, | |
| { | |
| "decision_loss": 2.3081, | |
| "encoder_mlm_loss": 0.4661, | |
| "epoch": 0.85, | |
| "grad_norm": 9.991193771362305, | |
| "learning_rate": 1.044734648513476e-05, | |
| "loss": 4.225, | |
| "reason_loss": NaN, | |
| "step": 16400 | |
| }, | |
| { | |
| "decision_loss": 2.3434, | |
| "encoder_mlm_loss": 0.4999, | |
| "epoch": 0.85, | |
| "grad_norm": 10.09374713897705, | |
| "learning_rate": 1.0273687135315366e-05, | |
| "loss": 4.1445, | |
| "reason_loss": NaN, | |
| "step": 16450 | |
| }, | |
| { | |
| "decision_loss": 2.1455, | |
| "encoder_mlm_loss": 0.4582, | |
| "epoch": 0.85, | |
| "grad_norm": 11.907501220703125, | |
| "learning_rate": 1.0100027785495972e-05, | |
| "loss": 4.0988, | |
| "reason_loss": NaN, | |
| "step": 16500 | |
| }, | |
| { | |
| "decision_loss": 2.1118, | |
| "encoder_mlm_loss": 0.456, | |
| "epoch": 0.85, | |
| "grad_norm": 10.339010238647461, | |
| "learning_rate": 9.926368435676578e-06, | |
| "loss": 4.0563, | |
| "reason_loss": NaN, | |
| "step": 16550 | |
| }, | |
| { | |
| "decision_loss": 2.2369, | |
| "encoder_mlm_loss": 0.4785, | |
| "epoch": 0.86, | |
| "grad_norm": 11.294181823730469, | |
| "learning_rate": 9.752709085857183e-06, | |
| "loss": 4.0886, | |
| "reason_loss": NaN, | |
| "step": 16600 | |
| }, | |
| { | |
| "decision_loss": 2.1362, | |
| "encoder_mlm_loss": 0.4651, | |
| "epoch": 0.86, | |
| "grad_norm": 9.752805709838867, | |
| "learning_rate": 9.579049736037789e-06, | |
| "loss": 4.2024, | |
| "reason_loss": NaN, | |
| "step": 16650 | |
| }, | |
| { | |
| "decision_loss": 2.1695, | |
| "encoder_mlm_loss": 0.4615, | |
| "epoch": 0.86, | |
| "grad_norm": 9.915719985961914, | |
| "learning_rate": 9.405390386218395e-06, | |
| "loss": 4.1516, | |
| "reason_loss": NaN, | |
| "step": 16700 | |
| }, | |
| { | |
| "decision_loss": 2.1229, | |
| "encoder_mlm_loss": 0.4496, | |
| "epoch": 0.86, | |
| "grad_norm": 10.295191764831543, | |
| "learning_rate": 9.231731036399e-06, | |
| "loss": 4.0744, | |
| "reason_loss": NaN, | |
| "step": 16750 | |
| }, | |
| { | |
| "decision_loss": 2.0994, | |
| "encoder_mlm_loss": 0.4744, | |
| "epoch": 0.87, | |
| "grad_norm": 9.025927543640137, | |
| "learning_rate": 9.058071686579606e-06, | |
| "loss": 4.1056, | |
| "reason_loss": NaN, | |
| "step": 16800 | |
| }, | |
| { | |
| "decision_loss": 2.3615, | |
| "encoder_mlm_loss": 0.4828, | |
| "epoch": 0.87, | |
| "grad_norm": 10.910390853881836, | |
| "learning_rate": 8.884412336760212e-06, | |
| "loss": 4.098, | |
| "reason_loss": NaN, | |
| "step": 16850 | |
| }, | |
| { | |
| "decision_loss": 2.2499, | |
| "encoder_mlm_loss": 0.4482, | |
| "epoch": 0.87, | |
| "grad_norm": 10.158785820007324, | |
| "learning_rate": 8.710752986940818e-06, | |
| "loss": 4.0801, | |
| "reason_loss": NaN, | |
| "step": 16900 | |
| }, | |
| { | |
| "decision_loss": 2.259, | |
| "encoder_mlm_loss": 0.4799, | |
| "epoch": 0.87, | |
| "grad_norm": 10.046515464782715, | |
| "learning_rate": 8.537093637121423e-06, | |
| "loss": 4.0939, | |
| "reason_loss": NaN, | |
| "step": 16950 | |
| }, | |
| { | |
| "decision_loss": 2.311, | |
| "encoder_mlm_loss": 0.4741, | |
| "epoch": 0.88, | |
| "grad_norm": 11.217142105102539, | |
| "learning_rate": 8.363434287302029e-06, | |
| "loss": 4.211, | |
| "reason_loss": NaN, | |
| "step": 17000 | |
| }, | |
| { | |
| "decision_loss": 2.1602, | |
| "encoder_mlm_loss": 0.483, | |
| "epoch": 0.88, | |
| "grad_norm": 10.031744003295898, | |
| "learning_rate": 8.189774937482635e-06, | |
| "loss": 4.2076, | |
| "reason_loss": NaN, | |
| "step": 17050 | |
| }, | |
| { | |
| "decision_loss": 2.1981, | |
| "encoder_mlm_loss": 0.4738, | |
| "epoch": 0.88, | |
| "grad_norm": 9.74100399017334, | |
| "learning_rate": 8.016115587663241e-06, | |
| "loss": 4.1243, | |
| "reason_loss": NaN, | |
| "step": 17100 | |
| }, | |
| { | |
| "decision_loss": 2.0827, | |
| "encoder_mlm_loss": 0.4343, | |
| "epoch": 0.88, | |
| "grad_norm": 10.940787315368652, | |
| "learning_rate": 7.842456237843846e-06, | |
| "loss": 4.1619, | |
| "reason_loss": NaN, | |
| "step": 17150 | |
| }, | |
| { | |
| "decision_loss": 2.1925, | |
| "encoder_mlm_loss": 0.4737, | |
| "epoch": 0.89, | |
| "grad_norm": 9.71260929107666, | |
| "learning_rate": 7.668796888024452e-06, | |
| "loss": 4.1807, | |
| "reason_loss": NaN, | |
| "step": 17200 | |
| }, | |
| { | |
| "decision_loss": 2.013, | |
| "encoder_mlm_loss": 0.4542, | |
| "epoch": 0.89, | |
| "grad_norm": 14.651382446289062, | |
| "learning_rate": 7.495137538205057e-06, | |
| "loss": 4.0187, | |
| "reason_loss": NaN, | |
| "step": 17250 | |
| }, | |
| { | |
| "decision_loss": 2.1625, | |
| "encoder_mlm_loss": 0.4688, | |
| "epoch": 0.89, | |
| "grad_norm": 9.133673667907715, | |
| "learning_rate": 7.321478188385664e-06, | |
| "loss": 3.9995, | |
| "reason_loss": NaN, | |
| "step": 17300 | |
| }, | |
| { | |
| "decision_loss": 2.2081, | |
| "encoder_mlm_loss": 0.4961, | |
| "epoch": 0.89, | |
| "grad_norm": 10.863176345825195, | |
| "learning_rate": 7.147818838566268e-06, | |
| "loss": 4.127, | |
| "reason_loss": NaN, | |
| "step": 17350 | |
| }, | |
| { | |
| "decision_loss": 2.2125, | |
| "encoder_mlm_loss": 0.4484, | |
| "epoch": 0.9, | |
| "grad_norm": 9.56596851348877, | |
| "learning_rate": 6.974159488746874e-06, | |
| "loss": 4.1691, | |
| "reason_loss": NaN, | |
| "step": 17400 | |
| }, | |
| { | |
| "decision_loss": 1.9912, | |
| "encoder_mlm_loss": 0.4631, | |
| "epoch": 0.9, | |
| "grad_norm": 9.751823425292969, | |
| "learning_rate": 6.8005001389274806e-06, | |
| "loss": 4.0431, | |
| "reason_loss": NaN, | |
| "step": 17450 | |
| }, | |
| { | |
| "decision_loss": 2.1434, | |
| "encoder_mlm_loss": 0.4535, | |
| "epoch": 0.9, | |
| "grad_norm": 11.732806205749512, | |
| "learning_rate": 6.626840789108085e-06, | |
| "loss": 4.1323, | |
| "reason_loss": NaN, | |
| "step": 17500 | |
| }, | |
| { | |
| "decision_loss": 2.1993, | |
| "encoder_mlm_loss": 0.447, | |
| "epoch": 0.9, | |
| "grad_norm": 9.667863845825195, | |
| "learning_rate": 6.453181439288691e-06, | |
| "loss": 4.1652, | |
| "reason_loss": NaN, | |
| "step": 17550 | |
| }, | |
| { | |
| "decision_loss": 2.1847, | |
| "encoder_mlm_loss": 0.4521, | |
| "epoch": 0.91, | |
| "grad_norm": 10.698508262634277, | |
| "learning_rate": 6.2795220894692975e-06, | |
| "loss": 3.9859, | |
| "reason_loss": NaN, | |
| "step": 17600 | |
| }, | |
| { | |
| "decision_loss": 2.2392, | |
| "encoder_mlm_loss": 0.4419, | |
| "epoch": 0.91, | |
| "grad_norm": 10.42066764831543, | |
| "learning_rate": 6.105862739649903e-06, | |
| "loss": 4.1219, | |
| "reason_loss": 1.1305, | |
| "step": 17650 | |
| }, | |
| { | |
| "decision_loss": 2.1109, | |
| "encoder_mlm_loss": 0.4651, | |
| "epoch": 0.91, | |
| "grad_norm": 10.604533195495605, | |
| "learning_rate": 5.932203389830509e-06, | |
| "loss": 4.0701, | |
| "reason_loss": NaN, | |
| "step": 17700 | |
| }, | |
| { | |
| "decision_loss": 1.9828, | |
| "encoder_mlm_loss": 0.4467, | |
| "epoch": 0.92, | |
| "grad_norm": 11.114700317382812, | |
| "learning_rate": 5.7585440400111144e-06, | |
| "loss": 4.0831, | |
| "reason_loss": NaN, | |
| "step": 17750 | |
| }, | |
| { | |
| "decision_loss": 2.1991, | |
| "encoder_mlm_loss": 0.4913, | |
| "epoch": 0.92, | |
| "grad_norm": 11.457265853881836, | |
| "learning_rate": 5.584884690191721e-06, | |
| "loss": 4.2505, | |
| "reason_loss": NaN, | |
| "step": 17800 | |
| }, | |
| { | |
| "decision_loss": 2.1172, | |
| "encoder_mlm_loss": 0.4694, | |
| "epoch": 0.92, | |
| "grad_norm": 10.09403133392334, | |
| "learning_rate": 5.411225340372326e-06, | |
| "loss": 4.0314, | |
| "reason_loss": NaN, | |
| "step": 17850 | |
| }, | |
| { | |
| "decision_loss": 2.1877, | |
| "encoder_mlm_loss": 0.4449, | |
| "epoch": 0.92, | |
| "grad_norm": 10.144293785095215, | |
| "learning_rate": 5.237565990552932e-06, | |
| "loss": 4.2075, | |
| "reason_loss": NaN, | |
| "step": 17900 | |
| }, | |
| { | |
| "decision_loss": 2.2545, | |
| "encoder_mlm_loss": 0.4551, | |
| "epoch": 0.93, | |
| "grad_norm": 10.994632720947266, | |
| "learning_rate": 5.063906640733538e-06, | |
| "loss": 4.2875, | |
| "reason_loss": NaN, | |
| "step": 17950 | |
| }, | |
| { | |
| "decision_loss": 2.181, | |
| "encoder_mlm_loss": 0.4575, | |
| "epoch": 0.93, | |
| "grad_norm": 13.253012657165527, | |
| "learning_rate": 4.890247290914143e-06, | |
| "loss": 4.1868, | |
| "reason_loss": NaN, | |
| "step": 18000 | |
| }, | |
| { | |
| "decision_loss": 2.1486, | |
| "encoder_mlm_loss": 0.457, | |
| "epoch": 0.93, | |
| "grad_norm": 10.766643524169922, | |
| "learning_rate": 4.716587941094748e-06, | |
| "loss": 4.0513, | |
| "reason_loss": NaN, | |
| "step": 18050 | |
| }, | |
| { | |
| "decision_loss": 2.1202, | |
| "encoder_mlm_loss": 0.4788, | |
| "epoch": 0.93, | |
| "grad_norm": 9.282386779785156, | |
| "learning_rate": 4.5429285912753545e-06, | |
| "loss": 4.0717, | |
| "reason_loss": NaN, | |
| "step": 18100 | |
| }, | |
| { | |
| "decision_loss": 2.2307, | |
| "encoder_mlm_loss": 0.4714, | |
| "epoch": 0.94, | |
| "grad_norm": 11.088687896728516, | |
| "learning_rate": 4.36926924145596e-06, | |
| "loss": 4.0528, | |
| "reason_loss": NaN, | |
| "step": 18150 | |
| }, | |
| { | |
| "decision_loss": 2.1483, | |
| "encoder_mlm_loss": 0.4543, | |
| "epoch": 0.94, | |
| "grad_norm": 10.724303245544434, | |
| "learning_rate": 4.195609891636565e-06, | |
| "loss": 4.1388, | |
| "reason_loss": NaN, | |
| "step": 18200 | |
| }, | |
| { | |
| "decision_loss": 2.212, | |
| "encoder_mlm_loss": 0.4605, | |
| "epoch": 0.94, | |
| "grad_norm": 9.722859382629395, | |
| "learning_rate": 4.0219505418171715e-06, | |
| "loss": 4.0295, | |
| "reason_loss": NaN, | |
| "step": 18250 | |
| }, | |
| { | |
| "decision_loss": 2.2006, | |
| "encoder_mlm_loss": 0.445, | |
| "epoch": 0.94, | |
| "grad_norm": 9.893216133117676, | |
| "learning_rate": 3.848291191997777e-06, | |
| "loss": 4.1058, | |
| "reason_loss": NaN, | |
| "step": 18300 | |
| }, | |
| { | |
| "decision_loss": 2.1725, | |
| "encoder_mlm_loss": 0.483, | |
| "epoch": 0.95, | |
| "grad_norm": 10.373659133911133, | |
| "learning_rate": 3.674631842178383e-06, | |
| "loss": 4.0683, | |
| "reason_loss": NaN, | |
| "step": 18350 | |
| }, | |
| { | |
| "decision_loss": 2.3838, | |
| "encoder_mlm_loss": 0.485, | |
| "epoch": 0.95, | |
| "grad_norm": 9.621594429016113, | |
| "learning_rate": 3.5009724923589884e-06, | |
| "loss": 4.1663, | |
| "reason_loss": NaN, | |
| "step": 18400 | |
| }, | |
| { | |
| "decision_loss": 2.1828, | |
| "encoder_mlm_loss": 0.456, | |
| "epoch": 0.95, | |
| "grad_norm": 10.567208290100098, | |
| "learning_rate": 3.3273131425395946e-06, | |
| "loss": 4.1053, | |
| "reason_loss": NaN, | |
| "step": 18450 | |
| }, | |
| { | |
| "decision_loss": 2.3296, | |
| "encoder_mlm_loss": 0.4781, | |
| "epoch": 0.95, | |
| "grad_norm": 9.499934196472168, | |
| "learning_rate": 3.1536537927202e-06, | |
| "loss": 4.0483, | |
| "reason_loss": NaN, | |
| "step": 18500 | |
| }, | |
| { | |
| "decision_loss": 1.9933, | |
| "encoder_mlm_loss": 0.4588, | |
| "epoch": 0.96, | |
| "grad_norm": 9.636274337768555, | |
| "learning_rate": 2.9799944429008058e-06, | |
| "loss": 3.9718, | |
| "reason_loss": NaN, | |
| "step": 18550 | |
| }, | |
| { | |
| "decision_loss": 2.167, | |
| "encoder_mlm_loss": 0.4472, | |
| "epoch": 0.96, | |
| "grad_norm": 9.905817985534668, | |
| "learning_rate": 2.8063350930814116e-06, | |
| "loss": 4.0502, | |
| "reason_loss": NaN, | |
| "step": 18600 | |
| }, | |
| { | |
| "decision_loss": 2.2164, | |
| "encoder_mlm_loss": 0.4483, | |
| "epoch": 0.96, | |
| "grad_norm": 9.581639289855957, | |
| "learning_rate": 2.636148930258405e-06, | |
| "loss": 4.0439, | |
| "reason_loss": NaN, | |
| "step": 18650 | |
| }, | |
| { | |
| "decision_loss": 2.2307, | |
| "encoder_mlm_loss": 0.466, | |
| "epoch": 0.96, | |
| "grad_norm": 10.538454055786133, | |
| "learning_rate": 2.462489580439011e-06, | |
| "loss": 4.079, | |
| "reason_loss": NaN, | |
| "step": 18700 | |
| }, | |
| { | |
| "decision_loss": 2.1576, | |
| "encoder_mlm_loss": 0.4452, | |
| "epoch": 0.97, | |
| "grad_norm": 10.088532447814941, | |
| "learning_rate": 2.2888302306196166e-06, | |
| "loss": 4.0738, | |
| "reason_loss": NaN, | |
| "step": 18750 | |
| }, | |
| { | |
| "decision_loss": 2.1127, | |
| "encoder_mlm_loss": 0.4568, | |
| "epoch": 0.97, | |
| "grad_norm": 9.137553215026855, | |
| "learning_rate": 2.1151708808002224e-06, | |
| "loss": 4.0466, | |
| "reason_loss": NaN, | |
| "step": 18800 | |
| }, | |
| { | |
| "decision_loss": 2.1179, | |
| "encoder_mlm_loss": 0.4432, | |
| "epoch": 0.97, | |
| "grad_norm": 9.486440658569336, | |
| "learning_rate": 1.9415115309808282e-06, | |
| "loss": 4.1585, | |
| "reason_loss": NaN, | |
| "step": 18850 | |
| }, | |
| { | |
| "decision_loss": 2.1924, | |
| "encoder_mlm_loss": 0.4469, | |
| "epoch": 0.97, | |
| "grad_norm": 9.941400527954102, | |
| "learning_rate": 1.7678521811614338e-06, | |
| "loss": 4.0744, | |
| "reason_loss": NaN, | |
| "step": 18900 | |
| }, | |
| { | |
| "decision_loss": 2.1206, | |
| "encoder_mlm_loss": 0.4569, | |
| "epoch": 0.98, | |
| "grad_norm": 9.793261528015137, | |
| "learning_rate": 1.5941928313420396e-06, | |
| "loss": 4.1067, | |
| "reason_loss": NaN, | |
| "step": 18950 | |
| }, | |
| { | |
| "decision_loss": 2.0999, | |
| "encoder_mlm_loss": 0.4246, | |
| "epoch": 0.98, | |
| "grad_norm": 9.890857696533203, | |
| "learning_rate": 1.4205334815226454e-06, | |
| "loss": 4.1567, | |
| "reason_loss": NaN, | |
| "step": 19000 | |
| }, | |
| { | |
| "decision_loss": 2.1566, | |
| "encoder_mlm_loss": 0.4574, | |
| "epoch": 0.98, | |
| "grad_norm": 10.175832748413086, | |
| "learning_rate": 1.246874131703251e-06, | |
| "loss": 4.0561, | |
| "reason_loss": NaN, | |
| "step": 19050 | |
| }, | |
| { | |
| "decision_loss": 2.3035, | |
| "encoder_mlm_loss": 0.4631, | |
| "epoch": 0.98, | |
| "grad_norm": 10.100351333618164, | |
| "learning_rate": 1.0732147818838565e-06, | |
| "loss": 4.1019, | |
| "reason_loss": NaN, | |
| "step": 19100 | |
| }, | |
| { | |
| "decision_loss": 2.0727, | |
| "encoder_mlm_loss": 0.4451, | |
| "epoch": 0.99, | |
| "grad_norm": 10.576125144958496, | |
| "learning_rate": 8.995554320644623e-07, | |
| "loss": 4.151, | |
| "reason_loss": NaN, | |
| "step": 19150 | |
| }, | |
| { | |
| "decision_loss": 2.2445, | |
| "encoder_mlm_loss": 0.4302, | |
| "epoch": 0.99, | |
| "grad_norm": 9.79544448852539, | |
| "learning_rate": 7.258960822450681e-07, | |
| "loss": 4.1227, | |
| "reason_loss": NaN, | |
| "step": 19200 | |
| }, | |
| { | |
| "decision_loss": 2.2291, | |
| "encoder_mlm_loss": 0.4474, | |
| "epoch": 0.99, | |
| "grad_norm": 10.398290634155273, | |
| "learning_rate": 5.522367324256738e-07, | |
| "loss": 3.9649, | |
| "reason_loss": NaN, | |
| "step": 19250 | |
| }, | |
| { | |
| "decision_loss": 2.2452, | |
| "encoder_mlm_loss": 0.4784, | |
| "epoch": 1.0, | |
| "grad_norm": 11.05727481842041, | |
| "learning_rate": 3.7857738260627953e-07, | |
| "loss": 4.0789, | |
| "reason_loss": NaN, | |
| "step": 19300 | |
| }, | |
| { | |
| "decision_loss": 2.2477, | |
| "encoder_mlm_loss": 0.4668, | |
| "epoch": 1.0, | |
| "grad_norm": 10.685857772827148, | |
| "learning_rate": 2.0491803278688526e-07, | |
| "loss": 4.0863, | |
| "reason_loss": NaN, | |
| "step": 19350 | |
| } | |
| ], | |
| "logging_steps": 50, | |
| "max_steps": 19396, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 1, | |
| "save_steps": 5000, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": true | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 1.0030959011014115e+19, | |
| "train_batch_size": 2, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |