Pooya-Fallah commited on
Commit
b1b6460
·
verified ·
1 Parent(s): 798344d

End of training

Browse files
.ipynb_checkpoints/trainer_state-checkpoint.json ADDED
@@ -0,0 +1,442 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": 0.24260137975215912,
3
+ "best_model_checkpoint": "wave2vec2-xlsr-Persian/checkpoint-240000",
4
+ "epoch": 3.0,
5
+ "eval_steps": 10000,
6
+ "global_step": 255012,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 0.1176415227518705,
13
+ "grad_norm": 1.416466236114502,
14
+ "learning_rate": 9.646079712769477e-06,
15
+ "loss": 3.7691,
16
+ "step": 10000
17
+ },
18
+ {
19
+ "epoch": 0.1176415227518705,
20
+ "eval_loss": 0.7940966486930847,
21
+ "eval_runtime": 534.4277,
22
+ "eval_samples_per_second": 16.743,
23
+ "eval_steps_per_second": 2.094,
24
+ "eval_wer": 0.6079105283797882,
25
+ "step": 10000
26
+ },
27
+ {
28
+ "epoch": 0.235283045503741,
29
+ "grad_norm": 2.3572049140930176,
30
+ "learning_rate": 9.252436892745226e-06,
31
+ "loss": 0.8658,
32
+ "step": 20000
33
+ },
34
+ {
35
+ "epoch": 0.235283045503741,
36
+ "eval_loss": 0.5118501782417297,
37
+ "eval_runtime": 536.8454,
38
+ "eval_samples_per_second": 16.668,
39
+ "eval_steps_per_second": 2.084,
40
+ "eval_wer": 0.4575246579700923,
41
+ "step": 20000
42
+ },
43
+ {
44
+ "epoch": 0.3529245682556115,
45
+ "grad_norm": 2.209596872329712,
46
+ "learning_rate": 8.858872809158622e-06,
47
+ "loss": 0.6829,
48
+ "step": 30000
49
+ },
50
+ {
51
+ "epoch": 0.3529245682556115,
52
+ "eval_loss": 0.4285117983818054,
53
+ "eval_runtime": 539.6896,
54
+ "eval_samples_per_second": 16.58,
55
+ "eval_steps_per_second": 2.073,
56
+ "eval_wer": 0.40392825696083584,
57
+ "step": 30000
58
+ },
59
+ {
60
+ "epoch": 0.470566091007482,
61
+ "grad_norm": 2.2252326011657715,
62
+ "learning_rate": 8.465348093790845e-06,
63
+ "loss": 0.6078,
64
+ "step": 40000
65
+ },
66
+ {
67
+ "epoch": 0.470566091007482,
68
+ "eval_loss": 0.38793477416038513,
69
+ "eval_runtime": 547.0046,
70
+ "eval_samples_per_second": 16.358,
71
+ "eval_steps_per_second": 2.046,
72
+ "eval_wer": 0.3664437028760849,
73
+ "step": 40000
74
+ },
75
+ {
76
+ "epoch": 0.5882076137593525,
77
+ "grad_norm": 2.7398128509521484,
78
+ "learning_rate": 8.071784010204244e-06,
79
+ "loss": 0.5561,
80
+ "step": 50000
81
+ },
82
+ {
83
+ "epoch": 0.5882076137593525,
84
+ "eval_loss": 0.3593791723251343,
85
+ "eval_runtime": 551.125,
86
+ "eval_samples_per_second": 16.236,
87
+ "eval_steps_per_second": 2.03,
88
+ "eval_wer": 0.34282162650112097,
89
+ "step": 50000
90
+ },
91
+ {
92
+ "epoch": 0.705849136511223,
93
+ "grad_norm": 2.960217237472534,
94
+ "learning_rate": 7.678259294836465e-06,
95
+ "loss": 0.5168,
96
+ "step": 60000
97
+ },
98
+ {
99
+ "epoch": 0.705849136511223,
100
+ "eval_loss": 0.33377397060394287,
101
+ "eval_runtime": 546.4831,
102
+ "eval_samples_per_second": 16.374,
103
+ "eval_steps_per_second": 2.048,
104
+ "eval_wer": 0.3202835389089079,
105
+ "step": 60000
106
+ },
107
+ {
108
+ "epoch": 0.8234906592630935,
109
+ "grad_norm": 2.531191349029541,
110
+ "learning_rate": 7.284734579468687e-06,
111
+ "loss": 0.499,
112
+ "step": 70000
113
+ },
114
+ {
115
+ "epoch": 0.8234906592630935,
116
+ "eval_loss": 0.3143016993999481,
117
+ "eval_runtime": 550.3802,
118
+ "eval_samples_per_second": 16.258,
119
+ "eval_steps_per_second": 2.033,
120
+ "eval_wer": 0.30924387157877603,
121
+ "step": 70000
122
+ },
123
+ {
124
+ "epoch": 0.941132182014964,
125
+ "grad_norm": 2.0980894565582275,
126
+ "learning_rate": 6.891131127663261e-06,
127
+ "loss": 0.4768,
128
+ "step": 80000
129
+ },
130
+ {
131
+ "epoch": 0.941132182014964,
132
+ "eval_loss": 0.3023754954338074,
133
+ "eval_runtime": 546.9538,
134
+ "eval_samples_per_second": 16.36,
135
+ "eval_steps_per_second": 2.046,
136
+ "eval_wer": 0.29365367113334173,
137
+ "step": 80000
138
+ },
139
+ {
140
+ "epoch": 1.0587737047668344,
141
+ "grad_norm": 1.7364046573638916,
142
+ "learning_rate": 6.497606412295483e-06,
143
+ "loss": 0.4529,
144
+ "step": 90000
145
+ },
146
+ {
147
+ "epoch": 1.0587737047668344,
148
+ "eval_loss": 0.29402047395706177,
149
+ "eval_runtime": 544.3376,
150
+ "eval_samples_per_second": 16.438,
151
+ "eval_steps_per_second": 2.056,
152
+ "eval_wer": 0.2827878859629002,
153
+ "step": 90000
154
+ },
155
+ {
156
+ "epoch": 1.176415227518705,
157
+ "grad_norm": 3.4313299655914307,
158
+ "learning_rate": 6.1040816969277054e-06,
159
+ "loss": 0.44,
160
+ "step": 100000
161
+ },
162
+ {
163
+ "epoch": 1.176415227518705,
164
+ "eval_loss": 0.2909528613090515,
165
+ "eval_runtime": 543.2034,
166
+ "eval_samples_per_second": 16.473,
167
+ "eval_steps_per_second": 2.06,
168
+ "eval_wer": 0.27463762217996435,
169
+ "step": 100000
170
+ },
171
+ {
172
+ "epoch": 1.2940567502705755,
173
+ "grad_norm": 1.1621043682098389,
174
+ "learning_rate": 5.710478245122278e-06,
175
+ "loss": 0.4264,
176
+ "step": 110000
177
+ },
178
+ {
179
+ "epoch": 1.2940567502705755,
180
+ "eval_loss": 0.2814837098121643,
181
+ "eval_runtime": 547.8907,
182
+ "eval_samples_per_second": 16.332,
183
+ "eval_steps_per_second": 2.042,
184
+ "eval_wer": 0.2683001724023115,
185
+ "step": 110000
186
+ },
187
+ {
188
+ "epoch": 1.4116982730224459,
189
+ "grad_norm": 2.507716178894043,
190
+ "learning_rate": 5.316914161535675e-06,
191
+ "loss": 0.4189,
192
+ "step": 120000
193
+ },
194
+ {
195
+ "epoch": 1.4116982730224459,
196
+ "eval_loss": 0.27175650000572205,
197
+ "eval_runtime": 548.1953,
198
+ "eval_samples_per_second": 16.323,
199
+ "eval_steps_per_second": 2.041,
200
+ "eval_wer": 0.26368304611946813,
201
+ "step": 120000
202
+ },
203
+ {
204
+ "epoch": 1.5293397957743164,
205
+ "grad_norm": 2.3356528282165527,
206
+ "learning_rate": 4.923428814386722e-06,
207
+ "loss": 0.4052,
208
+ "step": 130000
209
+ },
210
+ {
211
+ "epoch": 1.5293397957743164,
212
+ "eval_loss": 0.2673029899597168,
213
+ "eval_runtime": 544.6822,
214
+ "eval_samples_per_second": 16.428,
215
+ "eval_steps_per_second": 2.054,
216
+ "eval_wer": 0.25851467639420195,
217
+ "step": 130000
218
+ },
219
+ {
220
+ "epoch": 1.646981318526187,
221
+ "grad_norm": 2.476557970046997,
222
+ "learning_rate": 4.529864730800119e-06,
223
+ "loss": 0.4044,
224
+ "step": 140000
225
+ },
226
+ {
227
+ "epoch": 1.646981318526187,
228
+ "eval_loss": 0.26591917872428894,
229
+ "eval_runtime": 545.1182,
230
+ "eval_samples_per_second": 16.415,
231
+ "eval_steps_per_second": 2.053,
232
+ "eval_wer": 0.2534720937631799,
233
+ "step": 140000
234
+ },
235
+ {
236
+ "epoch": 1.7646228412780576,
237
+ "grad_norm": 1.8470633029937744,
238
+ "learning_rate": 4.136418751869991e-06,
239
+ "loss": 0.4046,
240
+ "step": 150000
241
+ },
242
+ {
243
+ "epoch": 1.7646228412780576,
244
+ "eval_loss": 0.2603091299533844,
245
+ "eval_runtime": 545.5751,
246
+ "eval_samples_per_second": 16.401,
247
+ "eval_steps_per_second": 2.051,
248
+ "eval_wer": 0.24952459877616556,
249
+ "step": 150000
250
+ },
251
+ {
252
+ "epoch": 1.8822643640299281,
253
+ "grad_norm": 2.6165308952331543,
254
+ "learning_rate": 3.742815300064564e-06,
255
+ "loss": 0.3944,
256
+ "step": 160000
257
+ },
258
+ {
259
+ "epoch": 1.8822643640299281,
260
+ "eval_loss": 0.2575734555721283,
261
+ "eval_runtime": 545.6155,
262
+ "eval_samples_per_second": 16.4,
263
+ "eval_steps_per_second": 2.051,
264
+ "eval_wer": 0.24606545368445198,
265
+ "step": 160000
266
+ },
267
+ {
268
+ "epoch": 1.9999058867817985,
269
+ "grad_norm": 2.0350422859191895,
270
+ "learning_rate": 3.3492512164779615e-06,
271
+ "loss": 0.3876,
272
+ "step": 170000
273
+ },
274
+ {
275
+ "epoch": 1.9999058867817985,
276
+ "eval_loss": 0.25538763403892517,
277
+ "eval_runtime": 543.8449,
278
+ "eval_samples_per_second": 16.453,
279
+ "eval_steps_per_second": 2.058,
280
+ "eval_wer": 0.24299106911630866,
281
+ "step": 170000
282
+ },
283
+ {
284
+ "epoch": 2.117547409533669,
285
+ "grad_norm": 2.234062433242798,
286
+ "learning_rate": 2.955726501110184e-06,
287
+ "loss": 0.3836,
288
+ "step": 180000
289
+ },
290
+ {
291
+ "epoch": 2.117547409533669,
292
+ "eval_loss": 0.25172147154808044,
293
+ "eval_runtime": 543.9926,
294
+ "eval_samples_per_second": 16.449,
295
+ "eval_steps_per_second": 2.057,
296
+ "eval_wer": 0.24226224389377649,
297
+ "step": 180000
298
+ },
299
+ {
300
+ "epoch": 2.2351889322855394,
301
+ "grad_norm": 2.7782626152038574,
302
+ "learning_rate": 2.5621624175235817e-06,
303
+ "loss": 0.3767,
304
+ "step": 190000
305
+ },
306
+ {
307
+ "epoch": 2.2351889322855394,
308
+ "eval_loss": 0.2503082752227783,
309
+ "eval_runtime": 546.7703,
310
+ "eval_samples_per_second": 16.365,
311
+ "eval_steps_per_second": 2.047,
312
+ "eval_wer": 0.23942833465286462,
313
+ "step": 190000
314
+ },
315
+ {
316
+ "epoch": 2.35283045503741,
317
+ "grad_norm": 2.365490198135376,
318
+ "learning_rate": 2.1686770703746284e-06,
319
+ "loss": 0.3738,
320
+ "step": 200000
321
+ },
322
+ {
323
+ "epoch": 2.35283045503741,
324
+ "eval_loss": 0.24804162979125977,
325
+ "eval_runtime": 545.3342,
326
+ "eval_samples_per_second": 16.408,
327
+ "eval_steps_per_second": 2.052,
328
+ "eval_wer": 0.23909166919474062,
329
+ "step": 200000
330
+ },
331
+ {
332
+ "epoch": 2.4704719777892805,
333
+ "grad_norm": 3.105099678039551,
334
+ "learning_rate": 1.7751523550068502e-06,
335
+ "loss": 0.3707,
336
+ "step": 210000
337
+ },
338
+ {
339
+ "epoch": 2.4704719777892805,
340
+ "eval_loss": 0.24506914615631104,
341
+ "eval_runtime": 547.7088,
342
+ "eval_samples_per_second": 16.337,
343
+ "eval_steps_per_second": 2.043,
344
+ "eval_wer": 0.23790409104025928,
345
+ "step": 210000
346
+ },
347
+ {
348
+ "epoch": 2.588113500541151,
349
+ "grad_norm": 0.9898041486740112,
350
+ "learning_rate": 1.3815882714202481e-06,
351
+ "loss": 0.3649,
352
+ "step": 220000
353
+ },
354
+ {
355
+ "epoch": 2.588113500541151,
356
+ "eval_loss": 0.24370211362838745,
357
+ "eval_runtime": 547.4398,
358
+ "eval_samples_per_second": 16.345,
359
+ "eval_steps_per_second": 2.044,
360
+ "eval_wer": 0.23637614780723498,
361
+ "step": 220000
362
+ },
363
+ {
364
+ "epoch": 2.7057550232930216,
365
+ "grad_norm": 4.53593635559082,
366
+ "learning_rate": 9.880241878336459e-07,
367
+ "loss": 0.369,
368
+ "step": 230000
369
+ },
370
+ {
371
+ "epoch": 2.7057550232930216,
372
+ "eval_loss": 0.24421393871307373,
373
+ "eval_runtime": 549.6273,
374
+ "eval_samples_per_second": 16.28,
375
+ "eval_steps_per_second": 2.036,
376
+ "eval_wer": 0.23538094991453876,
377
+ "step": 230000
378
+ },
379
+ {
380
+ "epoch": 2.8233965460448918,
381
+ "grad_norm": 3.995215654373169,
382
+ "learning_rate": 5.945388406846922e-07,
383
+ "loss": 0.3608,
384
+ "step": 240000
385
+ },
386
+ {
387
+ "epoch": 2.8233965460448918,
388
+ "eval_loss": 0.24260137975215912,
389
+ "eval_runtime": 548.4399,
390
+ "eval_samples_per_second": 16.315,
391
+ "eval_steps_per_second": 2.04,
392
+ "eval_wer": 0.23452633759776248,
393
+ "step": 240000
394
+ },
395
+ {
396
+ "epoch": 2.9410380687967628,
397
+ "grad_norm": 1.9774836301803589,
398
+ "learning_rate": 2.0097475709808989e-07,
399
+ "loss": 0.3644,
400
+ "step": 250000
401
+ },
402
+ {
403
+ "epoch": 2.9410380687967628,
404
+ "eval_loss": 0.24260272085666656,
405
+ "eval_runtime": 548.9964,
406
+ "eval_samples_per_second": 16.299,
407
+ "eval_steps_per_second": 2.038,
408
+ "eval_wer": 0.234019489600367,
409
+ "step": 250000
410
+ },
411
+ {
412
+ "epoch": 3.0,
413
+ "step": 255012,
414
+ "total_flos": 2.7896646019563717e+20,
415
+ "train_loss": 0.5826510110653187,
416
+ "train_runtime": 84804.3792,
417
+ "train_samples_per_second": 6.014,
418
+ "train_steps_per_second": 3.007
419
+ }
420
+ ],
421
+ "logging_steps": 10000,
422
+ "max_steps": 255012,
423
+ "num_input_tokens_seen": 0,
424
+ "num_train_epochs": 3,
425
+ "save_steps": 10000,
426
+ "stateful_callbacks": {
427
+ "TrainerControl": {
428
+ "args": {
429
+ "should_epoch_stop": false,
430
+ "should_evaluate": false,
431
+ "should_log": false,
432
+ "should_save": true,
433
+ "should_training_stop": true
434
+ },
435
+ "attributes": {}
436
+ }
437
+ },
438
+ "total_flos": 2.7896646019563717e+20,
439
+ "train_batch_size": 2,
440
+ "trial_name": null,
441
+ "trial_params": null
442
+ }
README.md ADDED
@@ -0,0 +1,54 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ tags:
3
+ - generated_from_trainer
4
+ model-index:
5
+ - name: wave2vec2-xlsr-Persian
6
+ results: []
7
+ ---
8
+
9
+ <!-- This model card has been generated automatically according to the information the Trainer had access to. You
10
+ should probably proofread and complete it, then remove this comment. -->
11
+
12
+ # wave2vec2-xlsr-Persian
13
+
14
+ This model was trained from scratch on an unknown dataset.
15
+ It achieves the following results on the evaluation set:
16
+ - epoch: 2.9410
17
+ - eval_loss: 0.2426
18
+ - eval_runtime: 548.9964
19
+ - eval_samples_per_second: 16.299
20
+ - eval_steps_per_second: 2.038
21
+ - eval_wer: 0.2340
22
+ - step: 250000
23
+
24
+ ## Model description
25
+
26
+ More information needed
27
+
28
+ ## Intended uses & limitations
29
+
30
+ More information needed
31
+
32
+ ## Training and evaluation data
33
+
34
+ More information needed
35
+
36
+ ## Training procedure
37
+
38
+ ### Training hyperparameters
39
+
40
+ The following hyperparameters were used during training:
41
+ - learning_rate: 5e-05
42
+ - train_batch_size: 8
43
+ - eval_batch_size: 8
44
+ - seed: 42
45
+ - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
46
+ - lr_scheduler_type: linear
47
+ - num_epochs: 3.0
48
+
49
+ ### Framework versions
50
+
51
+ - Transformers 4.44.0
52
+ - Pytorch 2.4.0.dev20240522
53
+ - Datasets 2.19.1
54
+ - Tokenizers 0.19.1
config.json ADDED
@@ -0,0 +1,117 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "./wave2vec2-xlsr-Persian/checkpoint-255012",
3
+ "activation_dropout": 0.0,
4
+ "adapter_attn_dim": null,
5
+ "adapter_kernel_size": 3,
6
+ "adapter_stride": 2,
7
+ "add_adapter": false,
8
+ "apply_spec_augment": true,
9
+ "architectures": [
10
+ "Wav2Vec2ForCTC"
11
+ ],
12
+ "attention_dropout": 0.1,
13
+ "bos_token_id": 1,
14
+ "classifier_proj_size": 256,
15
+ "codevector_dim": 768,
16
+ "contrastive_logits_temperature": 0.1,
17
+ "conv_bias": true,
18
+ "conv_dim": [
19
+ 512,
20
+ 512,
21
+ 512,
22
+ 512,
23
+ 512,
24
+ 512,
25
+ 512
26
+ ],
27
+ "conv_kernel": [
28
+ 10,
29
+ 3,
30
+ 3,
31
+ 3,
32
+ 3,
33
+ 2,
34
+ 2
35
+ ],
36
+ "conv_stride": [
37
+ 5,
38
+ 2,
39
+ 2,
40
+ 2,
41
+ 2,
42
+ 2,
43
+ 2
44
+ ],
45
+ "ctc_loss_reduction": "mean",
46
+ "ctc_zero_infinity": false,
47
+ "diversity_loss_weight": 0.1,
48
+ "do_stable_layer_norm": true,
49
+ "eos_token_id": 2,
50
+ "feat_extract_activation": "gelu",
51
+ "feat_extract_dropout": 0.0,
52
+ "feat_extract_norm": "layer",
53
+ "feat_proj_dropout": 0.1,
54
+ "feat_quantizer_dropout": 0.0,
55
+ "final_dropout": 0.0,
56
+ "gradient_checkpointing": false,
57
+ "hidden_act": "gelu",
58
+ "hidden_dropout": 0.1,
59
+ "hidden_size": 1024,
60
+ "initializer_range": 0.02,
61
+ "intermediate_size": 4096,
62
+ "layer_norm_eps": 1e-05,
63
+ "layerdrop": 0.1,
64
+ "mask_channel_length": 10,
65
+ "mask_channel_min_space": 1,
66
+ "mask_channel_other": 0.0,
67
+ "mask_channel_prob": 0.0,
68
+ "mask_channel_selection": "static",
69
+ "mask_feature_length": 10,
70
+ "mask_feature_min_masks": 0,
71
+ "mask_feature_prob": 0.0,
72
+ "mask_time_length": 10,
73
+ "mask_time_min_masks": 2,
74
+ "mask_time_min_space": 1,
75
+ "mask_time_other": 0.0,
76
+ "mask_time_prob": 0.075,
77
+ "mask_time_selection": "static",
78
+ "model_type": "wav2vec2",
79
+ "num_adapter_layers": 3,
80
+ "num_attention_heads": 16,
81
+ "num_codevector_groups": 2,
82
+ "num_codevectors_per_group": 320,
83
+ "num_conv_pos_embedding_groups": 16,
84
+ "num_conv_pos_embeddings": 128,
85
+ "num_feat_extract_layers": 7,
86
+ "num_hidden_layers": 24,
87
+ "num_negatives": 100,
88
+ "output_hidden_size": 1024,
89
+ "pad_token_id": 0,
90
+ "proj_codevector_dim": 768,
91
+ "tdnn_dilation": [
92
+ 1,
93
+ 2,
94
+ 3,
95
+ 1,
96
+ 1
97
+ ],
98
+ "tdnn_dim": [
99
+ 512,
100
+ 512,
101
+ 512,
102
+ 512,
103
+ 1500
104
+ ],
105
+ "tdnn_kernel": [
106
+ 5,
107
+ 3,
108
+ 3,
109
+ 1,
110
+ 1
111
+ ],
112
+ "torch_dtype": "float32",
113
+ "transformers_version": "4.44.0",
114
+ "use_weighted_layer_sum": false,
115
+ "vocab_size": 40,
116
+ "xvector_output_dim": 512
117
+ }
model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:83745a27f317bf60c598c88317bf8f940a3731cc01f0ce362e86ca7c2ce2047b
3
+ size 1261971480
trainer_state.json ADDED
@@ -0,0 +1,442 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": 0.24260137975215912,
3
+ "best_model_checkpoint": "wave2vec2-xlsr-Persian/checkpoint-240000",
4
+ "epoch": 3.0,
5
+ "eval_steps": 10000,
6
+ "global_step": 255012,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 0.1176415227518705,
13
+ "grad_norm": 1.416466236114502,
14
+ "learning_rate": 9.646079712769477e-06,
15
+ "loss": 3.7691,
16
+ "step": 10000
17
+ },
18
+ {
19
+ "epoch": 0.1176415227518705,
20
+ "eval_loss": 0.7940966486930847,
21
+ "eval_runtime": 534.4277,
22
+ "eval_samples_per_second": 16.743,
23
+ "eval_steps_per_second": 2.094,
24
+ "eval_wer": 0.6079105283797882,
25
+ "step": 10000
26
+ },
27
+ {
28
+ "epoch": 0.235283045503741,
29
+ "grad_norm": 2.3572049140930176,
30
+ "learning_rate": 9.252436892745226e-06,
31
+ "loss": 0.8658,
32
+ "step": 20000
33
+ },
34
+ {
35
+ "epoch": 0.235283045503741,
36
+ "eval_loss": 0.5118501782417297,
37
+ "eval_runtime": 536.8454,
38
+ "eval_samples_per_second": 16.668,
39
+ "eval_steps_per_second": 2.084,
40
+ "eval_wer": 0.4575246579700923,
41
+ "step": 20000
42
+ },
43
+ {
44
+ "epoch": 0.3529245682556115,
45
+ "grad_norm": 2.209596872329712,
46
+ "learning_rate": 8.858872809158622e-06,
47
+ "loss": 0.6829,
48
+ "step": 30000
49
+ },
50
+ {
51
+ "epoch": 0.3529245682556115,
52
+ "eval_loss": 0.4285117983818054,
53
+ "eval_runtime": 539.6896,
54
+ "eval_samples_per_second": 16.58,
55
+ "eval_steps_per_second": 2.073,
56
+ "eval_wer": 0.40392825696083584,
57
+ "step": 30000
58
+ },
59
+ {
60
+ "epoch": 0.470566091007482,
61
+ "grad_norm": 2.2252326011657715,
62
+ "learning_rate": 8.465348093790845e-06,
63
+ "loss": 0.6078,
64
+ "step": 40000
65
+ },
66
+ {
67
+ "epoch": 0.470566091007482,
68
+ "eval_loss": 0.38793477416038513,
69
+ "eval_runtime": 547.0046,
70
+ "eval_samples_per_second": 16.358,
71
+ "eval_steps_per_second": 2.046,
72
+ "eval_wer": 0.3664437028760849,
73
+ "step": 40000
74
+ },
75
+ {
76
+ "epoch": 0.5882076137593525,
77
+ "grad_norm": 2.7398128509521484,
78
+ "learning_rate": 8.071784010204244e-06,
79
+ "loss": 0.5561,
80
+ "step": 50000
81
+ },
82
+ {
83
+ "epoch": 0.5882076137593525,
84
+ "eval_loss": 0.3593791723251343,
85
+ "eval_runtime": 551.125,
86
+ "eval_samples_per_second": 16.236,
87
+ "eval_steps_per_second": 2.03,
88
+ "eval_wer": 0.34282162650112097,
89
+ "step": 50000
90
+ },
91
+ {
92
+ "epoch": 0.705849136511223,
93
+ "grad_norm": 2.960217237472534,
94
+ "learning_rate": 7.678259294836465e-06,
95
+ "loss": 0.5168,
96
+ "step": 60000
97
+ },
98
+ {
99
+ "epoch": 0.705849136511223,
100
+ "eval_loss": 0.33377397060394287,
101
+ "eval_runtime": 546.4831,
102
+ "eval_samples_per_second": 16.374,
103
+ "eval_steps_per_second": 2.048,
104
+ "eval_wer": 0.3202835389089079,
105
+ "step": 60000
106
+ },
107
+ {
108
+ "epoch": 0.8234906592630935,
109
+ "grad_norm": 2.531191349029541,
110
+ "learning_rate": 7.284734579468687e-06,
111
+ "loss": 0.499,
112
+ "step": 70000
113
+ },
114
+ {
115
+ "epoch": 0.8234906592630935,
116
+ "eval_loss": 0.3143016993999481,
117
+ "eval_runtime": 550.3802,
118
+ "eval_samples_per_second": 16.258,
119
+ "eval_steps_per_second": 2.033,
120
+ "eval_wer": 0.30924387157877603,
121
+ "step": 70000
122
+ },
123
+ {
124
+ "epoch": 0.941132182014964,
125
+ "grad_norm": 2.0980894565582275,
126
+ "learning_rate": 6.891131127663261e-06,
127
+ "loss": 0.4768,
128
+ "step": 80000
129
+ },
130
+ {
131
+ "epoch": 0.941132182014964,
132
+ "eval_loss": 0.3023754954338074,
133
+ "eval_runtime": 546.9538,
134
+ "eval_samples_per_second": 16.36,
135
+ "eval_steps_per_second": 2.046,
136
+ "eval_wer": 0.29365367113334173,
137
+ "step": 80000
138
+ },
139
+ {
140
+ "epoch": 1.0587737047668344,
141
+ "grad_norm": 1.7364046573638916,
142
+ "learning_rate": 6.497606412295483e-06,
143
+ "loss": 0.4529,
144
+ "step": 90000
145
+ },
146
+ {
147
+ "epoch": 1.0587737047668344,
148
+ "eval_loss": 0.29402047395706177,
149
+ "eval_runtime": 544.3376,
150
+ "eval_samples_per_second": 16.438,
151
+ "eval_steps_per_second": 2.056,
152
+ "eval_wer": 0.2827878859629002,
153
+ "step": 90000
154
+ },
155
+ {
156
+ "epoch": 1.176415227518705,
157
+ "grad_norm": 3.4313299655914307,
158
+ "learning_rate": 6.1040816969277054e-06,
159
+ "loss": 0.44,
160
+ "step": 100000
161
+ },
162
+ {
163
+ "epoch": 1.176415227518705,
164
+ "eval_loss": 0.2909528613090515,
165
+ "eval_runtime": 543.2034,
166
+ "eval_samples_per_second": 16.473,
167
+ "eval_steps_per_second": 2.06,
168
+ "eval_wer": 0.27463762217996435,
169
+ "step": 100000
170
+ },
171
+ {
172
+ "epoch": 1.2940567502705755,
173
+ "grad_norm": 1.1621043682098389,
174
+ "learning_rate": 5.710478245122278e-06,
175
+ "loss": 0.4264,
176
+ "step": 110000
177
+ },
178
+ {
179
+ "epoch": 1.2940567502705755,
180
+ "eval_loss": 0.2814837098121643,
181
+ "eval_runtime": 547.8907,
182
+ "eval_samples_per_second": 16.332,
183
+ "eval_steps_per_second": 2.042,
184
+ "eval_wer": 0.2683001724023115,
185
+ "step": 110000
186
+ },
187
+ {
188
+ "epoch": 1.4116982730224459,
189
+ "grad_norm": 2.507716178894043,
190
+ "learning_rate": 5.316914161535675e-06,
191
+ "loss": 0.4189,
192
+ "step": 120000
193
+ },
194
+ {
195
+ "epoch": 1.4116982730224459,
196
+ "eval_loss": 0.27175650000572205,
197
+ "eval_runtime": 548.1953,
198
+ "eval_samples_per_second": 16.323,
199
+ "eval_steps_per_second": 2.041,
200
+ "eval_wer": 0.26368304611946813,
201
+ "step": 120000
202
+ },
203
+ {
204
+ "epoch": 1.5293397957743164,
205
+ "grad_norm": 2.3356528282165527,
206
+ "learning_rate": 4.923428814386722e-06,
207
+ "loss": 0.4052,
208
+ "step": 130000
209
+ },
210
+ {
211
+ "epoch": 1.5293397957743164,
212
+ "eval_loss": 0.2673029899597168,
213
+ "eval_runtime": 544.6822,
214
+ "eval_samples_per_second": 16.428,
215
+ "eval_steps_per_second": 2.054,
216
+ "eval_wer": 0.25851467639420195,
217
+ "step": 130000
218
+ },
219
+ {
220
+ "epoch": 1.646981318526187,
221
+ "grad_norm": 2.476557970046997,
222
+ "learning_rate": 4.529864730800119e-06,
223
+ "loss": 0.4044,
224
+ "step": 140000
225
+ },
226
+ {
227
+ "epoch": 1.646981318526187,
228
+ "eval_loss": 0.26591917872428894,
229
+ "eval_runtime": 545.1182,
230
+ "eval_samples_per_second": 16.415,
231
+ "eval_steps_per_second": 2.053,
232
+ "eval_wer": 0.2534720937631799,
233
+ "step": 140000
234
+ },
235
+ {
236
+ "epoch": 1.7646228412780576,
237
+ "grad_norm": 1.8470633029937744,
238
+ "learning_rate": 4.136418751869991e-06,
239
+ "loss": 0.4046,
240
+ "step": 150000
241
+ },
242
+ {
243
+ "epoch": 1.7646228412780576,
244
+ "eval_loss": 0.2603091299533844,
245
+ "eval_runtime": 545.5751,
246
+ "eval_samples_per_second": 16.401,
247
+ "eval_steps_per_second": 2.051,
248
+ "eval_wer": 0.24952459877616556,
249
+ "step": 150000
250
+ },
251
+ {
252
+ "epoch": 1.8822643640299281,
253
+ "grad_norm": 2.6165308952331543,
254
+ "learning_rate": 3.742815300064564e-06,
255
+ "loss": 0.3944,
256
+ "step": 160000
257
+ },
258
+ {
259
+ "epoch": 1.8822643640299281,
260
+ "eval_loss": 0.2575734555721283,
261
+ "eval_runtime": 545.6155,
262
+ "eval_samples_per_second": 16.4,
263
+ "eval_steps_per_second": 2.051,
264
+ "eval_wer": 0.24606545368445198,
265
+ "step": 160000
266
+ },
267
+ {
268
+ "epoch": 1.9999058867817985,
269
+ "grad_norm": 2.0350422859191895,
270
+ "learning_rate": 3.3492512164779615e-06,
271
+ "loss": 0.3876,
272
+ "step": 170000
273
+ },
274
+ {
275
+ "epoch": 1.9999058867817985,
276
+ "eval_loss": 0.25538763403892517,
277
+ "eval_runtime": 543.8449,
278
+ "eval_samples_per_second": 16.453,
279
+ "eval_steps_per_second": 2.058,
280
+ "eval_wer": 0.24299106911630866,
281
+ "step": 170000
282
+ },
283
+ {
284
+ "epoch": 2.117547409533669,
285
+ "grad_norm": 2.234062433242798,
286
+ "learning_rate": 2.955726501110184e-06,
287
+ "loss": 0.3836,
288
+ "step": 180000
289
+ },
290
+ {
291
+ "epoch": 2.117547409533669,
292
+ "eval_loss": 0.25172147154808044,
293
+ "eval_runtime": 543.9926,
294
+ "eval_samples_per_second": 16.449,
295
+ "eval_steps_per_second": 2.057,
296
+ "eval_wer": 0.24226224389377649,
297
+ "step": 180000
298
+ },
299
+ {
300
+ "epoch": 2.2351889322855394,
301
+ "grad_norm": 2.7782626152038574,
302
+ "learning_rate": 2.5621624175235817e-06,
303
+ "loss": 0.3767,
304
+ "step": 190000
305
+ },
306
+ {
307
+ "epoch": 2.2351889322855394,
308
+ "eval_loss": 0.2503082752227783,
309
+ "eval_runtime": 546.7703,
310
+ "eval_samples_per_second": 16.365,
311
+ "eval_steps_per_second": 2.047,
312
+ "eval_wer": 0.23942833465286462,
313
+ "step": 190000
314
+ },
315
+ {
316
+ "epoch": 2.35283045503741,
317
+ "grad_norm": 2.365490198135376,
318
+ "learning_rate": 2.1686770703746284e-06,
319
+ "loss": 0.3738,
320
+ "step": 200000
321
+ },
322
+ {
323
+ "epoch": 2.35283045503741,
324
+ "eval_loss": 0.24804162979125977,
325
+ "eval_runtime": 545.3342,
326
+ "eval_samples_per_second": 16.408,
327
+ "eval_steps_per_second": 2.052,
328
+ "eval_wer": 0.23909166919474062,
329
+ "step": 200000
330
+ },
331
+ {
332
+ "epoch": 2.4704719777892805,
333
+ "grad_norm": 3.105099678039551,
334
+ "learning_rate": 1.7751523550068502e-06,
335
+ "loss": 0.3707,
336
+ "step": 210000
337
+ },
338
+ {
339
+ "epoch": 2.4704719777892805,
340
+ "eval_loss": 0.24506914615631104,
341
+ "eval_runtime": 547.7088,
342
+ "eval_samples_per_second": 16.337,
343
+ "eval_steps_per_second": 2.043,
344
+ "eval_wer": 0.23790409104025928,
345
+ "step": 210000
346
+ },
347
+ {
348
+ "epoch": 2.588113500541151,
349
+ "grad_norm": 0.9898041486740112,
350
+ "learning_rate": 1.3815882714202481e-06,
351
+ "loss": 0.3649,
352
+ "step": 220000
353
+ },
354
+ {
355
+ "epoch": 2.588113500541151,
356
+ "eval_loss": 0.24370211362838745,
357
+ "eval_runtime": 547.4398,
358
+ "eval_samples_per_second": 16.345,
359
+ "eval_steps_per_second": 2.044,
360
+ "eval_wer": 0.23637614780723498,
361
+ "step": 220000
362
+ },
363
+ {
364
+ "epoch": 2.7057550232930216,
365
+ "grad_norm": 4.53593635559082,
366
+ "learning_rate": 9.880241878336459e-07,
367
+ "loss": 0.369,
368
+ "step": 230000
369
+ },
370
+ {
371
+ "epoch": 2.7057550232930216,
372
+ "eval_loss": 0.24421393871307373,
373
+ "eval_runtime": 549.6273,
374
+ "eval_samples_per_second": 16.28,
375
+ "eval_steps_per_second": 2.036,
376
+ "eval_wer": 0.23538094991453876,
377
+ "step": 230000
378
+ },
379
+ {
380
+ "epoch": 2.8233965460448918,
381
+ "grad_norm": 3.995215654373169,
382
+ "learning_rate": 5.945388406846922e-07,
383
+ "loss": 0.3608,
384
+ "step": 240000
385
+ },
386
+ {
387
+ "epoch": 2.8233965460448918,
388
+ "eval_loss": 0.24260137975215912,
389
+ "eval_runtime": 548.4399,
390
+ "eval_samples_per_second": 16.315,
391
+ "eval_steps_per_second": 2.04,
392
+ "eval_wer": 0.23452633759776248,
393
+ "step": 240000
394
+ },
395
+ {
396
+ "epoch": 2.9410380687967628,
397
+ "grad_norm": 1.9774836301803589,
398
+ "learning_rate": 2.0097475709808989e-07,
399
+ "loss": 0.3644,
400
+ "step": 250000
401
+ },
402
+ {
403
+ "epoch": 2.9410380687967628,
404
+ "eval_loss": 0.24260272085666656,
405
+ "eval_runtime": 548.9964,
406
+ "eval_samples_per_second": 16.299,
407
+ "eval_steps_per_second": 2.038,
408
+ "eval_wer": 0.234019489600367,
409
+ "step": 250000
410
+ },
411
+ {
412
+ "epoch": 3.0,
413
+ "step": 255012,
414
+ "total_flos": 2.7896646019563717e+20,
415
+ "train_loss": 0.5826510110653187,
416
+ "train_runtime": 84804.3792,
417
+ "train_samples_per_second": 6.014,
418
+ "train_steps_per_second": 3.007
419
+ }
420
+ ],
421
+ "logging_steps": 10000,
422
+ "max_steps": 255012,
423
+ "num_input_tokens_seen": 0,
424
+ "num_train_epochs": 3,
425
+ "save_steps": 10000,
426
+ "stateful_callbacks": {
427
+ "TrainerControl": {
428
+ "args": {
429
+ "should_epoch_stop": false,
430
+ "should_evaluate": false,
431
+ "should_log": false,
432
+ "should_save": true,
433
+ "should_training_stop": true
434
+ },
435
+ "attributes": {}
436
+ }
437
+ },
438
+ "total_flos": 2.7896646019563717e+20,
439
+ "train_batch_size": 2,
440
+ "trial_name": null,
441
+ "trial_params": null
442
+ }
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:865bee174cb34efab19ff1a39c004b8e7705ebe4b7c71fde74c2b8dc1d0e5280
3
+ size 5176