KarelDO's picture
./output/roberta-base.CEBaB_confounding.observational.absa.5-class.seed_43
c11b77e
{
"best_metric": null,
"best_model_checkpoint": null,
"epoch": 5.0,
"global_step": 675,
"is_hyper_param_search": false,
"is_local_process_zero": true,
"is_world_process_zero": true,
"log_history": [
{
"epoch": 0.07,
"learning_rate": 1.9703703703703704e-05,
"loss": 1.0915,
"step": 10
},
{
"epoch": 0.15,
"learning_rate": 1.9407407407407407e-05,
"loss": 1.1,
"step": 20
},
{
"epoch": 0.22,
"learning_rate": 1.9111111111111113e-05,
"loss": 1.0328,
"step": 30
},
{
"epoch": 0.3,
"learning_rate": 1.8814814814814816e-05,
"loss": 0.9874,
"step": 40
},
{
"epoch": 0.37,
"learning_rate": 1.851851851851852e-05,
"loss": 0.824,
"step": 50
},
{
"epoch": 0.44,
"learning_rate": 1.8222222222222224e-05,
"loss": 0.7368,
"step": 60
},
{
"epoch": 0.52,
"learning_rate": 1.7925925925925927e-05,
"loss": 0.6747,
"step": 70
},
{
"epoch": 0.59,
"learning_rate": 1.7629629629629633e-05,
"loss": 0.6592,
"step": 80
},
{
"epoch": 0.67,
"learning_rate": 1.7333333333333336e-05,
"loss": 0.5993,
"step": 90
},
{
"epoch": 0.74,
"learning_rate": 1.7037037037037038e-05,
"loss": 0.4979,
"step": 100
},
{
"epoch": 0.81,
"learning_rate": 1.674074074074074e-05,
"loss": 0.509,
"step": 110
},
{
"epoch": 0.89,
"learning_rate": 1.6444444444444444e-05,
"loss": 0.4791,
"step": 120
},
{
"epoch": 0.96,
"learning_rate": 1.614814814814815e-05,
"loss": 0.4344,
"step": 130
},
{
"epoch": 1.04,
"learning_rate": 1.5851851851851852e-05,
"loss": 0.3372,
"step": 140
},
{
"epoch": 1.11,
"learning_rate": 1.555555555555556e-05,
"loss": 0.3557,
"step": 150
},
{
"epoch": 1.19,
"learning_rate": 1.525925925925926e-05,
"loss": 0.3696,
"step": 160
},
{
"epoch": 1.26,
"learning_rate": 1.4962962962962964e-05,
"loss": 0.3183,
"step": 170
},
{
"epoch": 1.33,
"learning_rate": 1.4666666666666666e-05,
"loss": 0.2915,
"step": 180
},
{
"epoch": 1.41,
"learning_rate": 1.4370370370370372e-05,
"loss": 0.3132,
"step": 190
},
{
"epoch": 1.48,
"learning_rate": 1.4074074074074075e-05,
"loss": 0.3036,
"step": 200
},
{
"epoch": 1.56,
"learning_rate": 1.377777777777778e-05,
"loss": 0.27,
"step": 210
},
{
"epoch": 1.63,
"learning_rate": 1.3481481481481482e-05,
"loss": 0.2615,
"step": 220
},
{
"epoch": 1.7,
"learning_rate": 1.3185185185185185e-05,
"loss": 0.3576,
"step": 230
},
{
"epoch": 1.78,
"learning_rate": 1.288888888888889e-05,
"loss": 0.3569,
"step": 240
},
{
"epoch": 1.85,
"learning_rate": 1.2592592592592593e-05,
"loss": 0.3082,
"step": 250
},
{
"epoch": 1.93,
"learning_rate": 1.2296296296296298e-05,
"loss": 0.3245,
"step": 260
},
{
"epoch": 2.0,
"learning_rate": 1.2e-05,
"loss": 0.3101,
"step": 270
},
{
"epoch": 2.07,
"learning_rate": 1.1703703703703703e-05,
"loss": 0.2436,
"step": 280
},
{
"epoch": 2.15,
"learning_rate": 1.1407407407407409e-05,
"loss": 0.2519,
"step": 290
},
{
"epoch": 2.22,
"learning_rate": 1.1111111111111113e-05,
"loss": 0.1802,
"step": 300
},
{
"epoch": 2.3,
"learning_rate": 1.0814814814814816e-05,
"loss": 0.1915,
"step": 310
},
{
"epoch": 2.37,
"learning_rate": 1.0518518518518519e-05,
"loss": 0.2336,
"step": 320
},
{
"epoch": 2.44,
"learning_rate": 1.0222222222222223e-05,
"loss": 0.2449,
"step": 330
},
{
"epoch": 2.52,
"learning_rate": 9.925925925925927e-06,
"loss": 0.1734,
"step": 340
},
{
"epoch": 2.59,
"learning_rate": 9.62962962962963e-06,
"loss": 0.2296,
"step": 350
},
{
"epoch": 2.67,
"learning_rate": 9.333333333333334e-06,
"loss": 0.1868,
"step": 360
},
{
"epoch": 2.74,
"learning_rate": 9.037037037037037e-06,
"loss": 0.1636,
"step": 370
},
{
"epoch": 2.81,
"learning_rate": 8.740740740740741e-06,
"loss": 0.2715,
"step": 380
},
{
"epoch": 2.89,
"learning_rate": 8.444444444444446e-06,
"loss": 0.2108,
"step": 390
},
{
"epoch": 2.96,
"learning_rate": 8.148148148148148e-06,
"loss": 0.2238,
"step": 400
},
{
"epoch": 3.04,
"learning_rate": 7.851851851851853e-06,
"loss": 0.1861,
"step": 410
},
{
"epoch": 3.11,
"learning_rate": 7.555555555555556e-06,
"loss": 0.2073,
"step": 420
},
{
"epoch": 3.19,
"learning_rate": 7.2592592592592605e-06,
"loss": 0.1836,
"step": 430
},
{
"epoch": 3.26,
"learning_rate": 6.962962962962964e-06,
"loss": 0.1992,
"step": 440
},
{
"epoch": 3.33,
"learning_rate": 6.666666666666667e-06,
"loss": 0.1743,
"step": 450
},
{
"epoch": 3.41,
"learning_rate": 6.370370370370371e-06,
"loss": 0.1875,
"step": 460
},
{
"epoch": 3.48,
"learning_rate": 6.0740740740740745e-06,
"loss": 0.1722,
"step": 470
},
{
"epoch": 3.56,
"learning_rate": 5.777777777777778e-06,
"loss": 0.1797,
"step": 480
},
{
"epoch": 3.63,
"learning_rate": 5.481481481481482e-06,
"loss": 0.0975,
"step": 490
},
{
"epoch": 3.7,
"learning_rate": 5.185185185185185e-06,
"loss": 0.1544,
"step": 500
},
{
"epoch": 3.78,
"learning_rate": 4.888888888888889e-06,
"loss": 0.1597,
"step": 510
},
{
"epoch": 3.85,
"learning_rate": 4.592592592592593e-06,
"loss": 0.111,
"step": 520
},
{
"epoch": 3.93,
"learning_rate": 4.296296296296296e-06,
"loss": 0.1499,
"step": 530
},
{
"epoch": 4.0,
"learning_rate": 4.000000000000001e-06,
"loss": 0.172,
"step": 540
},
{
"epoch": 4.07,
"learning_rate": 3.7037037037037037e-06,
"loss": 0.1167,
"step": 550
},
{
"epoch": 4.15,
"learning_rate": 3.4074074074074077e-06,
"loss": 0.1435,
"step": 560
},
{
"epoch": 4.22,
"learning_rate": 3.1111111111111116e-06,
"loss": 0.1135,
"step": 570
},
{
"epoch": 4.3,
"learning_rate": 2.814814814814815e-06,
"loss": 0.1451,
"step": 580
},
{
"epoch": 4.37,
"learning_rate": 2.5185185185185186e-06,
"loss": 0.0731,
"step": 590
},
{
"epoch": 4.44,
"learning_rate": 2.222222222222222e-06,
"loss": 0.1417,
"step": 600
},
{
"epoch": 4.52,
"learning_rate": 1.925925925925926e-06,
"loss": 0.0895,
"step": 610
},
{
"epoch": 4.59,
"learning_rate": 1.62962962962963e-06,
"loss": 0.2134,
"step": 620
},
{
"epoch": 4.67,
"learning_rate": 1.3333333333333334e-06,
"loss": 0.1301,
"step": 630
},
{
"epoch": 4.74,
"learning_rate": 1.0370370370370371e-06,
"loss": 0.1575,
"step": 640
},
{
"epoch": 4.81,
"learning_rate": 7.407407407407407e-07,
"loss": 0.1265,
"step": 650
},
{
"epoch": 4.89,
"learning_rate": 4.444444444444445e-07,
"loss": 0.1399,
"step": 660
},
{
"epoch": 4.96,
"learning_rate": 1.4814814814814817e-07,
"loss": 0.0575,
"step": 670
},
{
"epoch": 5.0,
"step": 675,
"total_flos": 1420483563959040.0,
"train_loss": 0.31028794209162397,
"train_runtime": 186.9323,
"train_samples_per_second": 115.523,
"train_steps_per_second": 3.611
}
],
"max_steps": 675,
"num_train_epochs": 5,
"total_flos": 1420483563959040.0,
"trial_name": null,
"trial_params": null
}