File size: 3,829 Bytes
9fc815a
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
{
  "best_global_step": 780,
  "best_metric": 1.4478473663330078,
  "best_model_checkpoint": "results/models/distilgpt2_split_3/checkpoint-780",
  "epoch": 2.0,
  "eval_steps": 500,
  "global_step": 1560,
  "is_hyper_param_search": false,
  "is_local_process_zero": true,
  "is_world_process_zero": true,
  "log_history": [
    {
      "epoch": 0.1282051282051282,
      "grad_norm": 2.1318576335906982,
      "learning_rate": 4.689102564102564e-05,
      "loss": 1.8462,
      "step": 100
    },
    {
      "epoch": 0.2564102564102564,
      "grad_norm": 1.8293204307556152,
      "learning_rate": 4.368589743589744e-05,
      "loss": 1.5051,
      "step": 200
    },
    {
      "epoch": 0.38461538461538464,
      "grad_norm": 2.407285690307617,
      "learning_rate": 4.0480769230769236e-05,
      "loss": 1.4097,
      "step": 300
    },
    {
      "epoch": 0.5128205128205128,
      "grad_norm": 2.0177223682403564,
      "learning_rate": 3.727564102564103e-05,
      "loss": 1.3136,
      "step": 400
    },
    {
      "epoch": 0.6410256410256411,
      "grad_norm": 1.5830180644989014,
      "learning_rate": 3.4070512820512825e-05,
      "loss": 1.2698,
      "step": 500
    },
    {
      "epoch": 0.7692307692307693,
      "grad_norm": 1.6503092050552368,
      "learning_rate": 3.0865384615384616e-05,
      "loss": 1.2133,
      "step": 600
    },
    {
      "epoch": 0.8974358974358975,
      "grad_norm": 1.608035922050476,
      "learning_rate": 2.7660256410256413e-05,
      "loss": 1.1817,
      "step": 700
    },
    {
      "epoch": 1.0,
      "eval_loss": 1.4478473663330078,
      "eval_runtime": 14.1171,
      "eval_samples_per_second": 370.259,
      "eval_steps_per_second": 23.163,
      "step": 780
    },
    {
      "epoch": 1.0256410256410255,
      "grad_norm": 1.399240493774414,
      "learning_rate": 2.4455128205128204e-05,
      "loss": 1.1556,
      "step": 800
    },
    {
      "epoch": 1.1538461538461537,
      "grad_norm": 1.8416355848312378,
      "learning_rate": 2.125e-05,
      "loss": 1.1005,
      "step": 900
    },
    {
      "epoch": 1.282051282051282,
      "grad_norm": 1.4166646003723145,
      "learning_rate": 1.8044871794871796e-05,
      "loss": 1.0782,
      "step": 1000
    },
    {
      "epoch": 1.4102564102564101,
      "grad_norm": 1.3811384439468384,
      "learning_rate": 1.483974358974359e-05,
      "loss": 1.0568,
      "step": 1100
    },
    {
      "epoch": 1.5384615384615383,
      "grad_norm": 1.5326135158538818,
      "learning_rate": 1.1634615384615386e-05,
      "loss": 1.0446,
      "step": 1200
    },
    {
      "epoch": 1.6666666666666665,
      "grad_norm": 1.2560216188430786,
      "learning_rate": 8.42948717948718e-06,
      "loss": 1.0604,
      "step": 1300
    },
    {
      "epoch": 1.7948717948717947,
      "grad_norm": 1.4168336391448975,
      "learning_rate": 5.224358974358975e-06,
      "loss": 1.0371,
      "step": 1400
    },
    {
      "epoch": 1.9230769230769231,
      "grad_norm": 1.3679345846176147,
      "learning_rate": 2.0192307692307692e-06,
      "loss": 1.0274,
      "step": 1500
    },
    {
      "epoch": 2.0,
      "eval_loss": 1.4688283205032349,
      "eval_runtime": 14.0926,
      "eval_samples_per_second": 370.904,
      "eval_steps_per_second": 23.204,
      "step": 1560
    }
  ],
  "logging_steps": 100,
  "max_steps": 1560,
  "num_input_tokens_seen": 0,
  "num_train_epochs": 2,
  "save_steps": 500,
  "stateful_callbacks": {
    "TrainerControl": {
      "args": {
        "should_epoch_stop": false,
        "should_evaluate": false,
        "should_log": false,
        "should_save": true,
        "should_training_stop": true
      },
      "attributes": {}
    }
  },
  "total_flos": 6521966894776320.0,
  "train_batch_size": 4,
  "trial_name": null,
  "trial_params": null
}