| { | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 1.0, | |
| "eval_steps": 200, | |
| "global_step": 921, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.0, | |
| "learning_rate": 5.3763440860215056e-09, | |
| "logits/chosen": -1.3876744508743286, | |
| "logits/rejected": -1.355853796005249, | |
| "logps/chosen": -672.8331298828125, | |
| "logps/rejected": -549.942138671875, | |
| "loss": 0.6931, | |
| "rewards/accuracies": 0.0, | |
| "rewards/chosen": 0.0, | |
| "rewards/margins": 0.0, | |
| "rewards/rejected": 0.0, | |
| "step": 1 | |
| }, | |
| { | |
| "epoch": 0.01, | |
| "learning_rate": 5.3763440860215054e-08, | |
| "logits/chosen": -1.1944549083709717, | |
| "logits/rejected": -1.1097763776779175, | |
| "logps/chosen": -368.380859375, | |
| "logps/rejected": -353.98101806640625, | |
| "loss": 0.709, | |
| "rewards/accuracies": 0.4305555522441864, | |
| "rewards/chosen": 0.015228834003210068, | |
| "rewards/margins": 0.0040851375088095665, | |
| "rewards/rejected": 0.011143695563077927, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 0.02, | |
| "learning_rate": 1.0752688172043011e-07, | |
| "logits/chosen": -1.4370033740997314, | |
| "logits/rejected": -1.4178457260131836, | |
| "logps/chosen": -391.20050048828125, | |
| "logps/rejected": -409.3836364746094, | |
| "loss": 0.7198, | |
| "rewards/accuracies": 0.512499988079071, | |
| "rewards/chosen": -0.015771254897117615, | |
| "rewards/margins": 0.035423360764980316, | |
| "rewards/rejected": -0.05119461938738823, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 0.03, | |
| "learning_rate": 1.6129032258064515e-07, | |
| "logits/chosen": -1.416802167892456, | |
| "logits/rejected": -1.4156275987625122, | |
| "logps/chosen": -411.7056579589844, | |
| "logps/rejected": -425.6934509277344, | |
| "loss": 0.6959, | |
| "rewards/accuracies": 0.5625, | |
| "rewards/chosen": -0.15962687134742737, | |
| "rewards/margins": 0.045185185968875885, | |
| "rewards/rejected": -0.20481204986572266, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 0.04, | |
| "learning_rate": 2.1505376344086022e-07, | |
| "logits/chosen": -1.2398654222488403, | |
| "logits/rejected": -1.2564724683761597, | |
| "logps/chosen": -385.45782470703125, | |
| "logps/rejected": -452.55535888671875, | |
| "loss": 0.6365, | |
| "rewards/accuracies": 0.7124999761581421, | |
| "rewards/chosen": -0.3178340792655945, | |
| "rewards/margins": 0.27168726921081543, | |
| "rewards/rejected": -0.5895214080810547, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 0.05, | |
| "learning_rate": 2.6881720430107523e-07, | |
| "logits/chosen": -1.2720705270767212, | |
| "logits/rejected": -0.9971787333488464, | |
| "logps/chosen": -339.6200866699219, | |
| "logps/rejected": -361.6040954589844, | |
| "loss": 0.5605, | |
| "rewards/accuracies": 0.6625000238418579, | |
| "rewards/chosen": -0.790237307548523, | |
| "rewards/margins": 0.61395663022995, | |
| "rewards/rejected": -1.4041939973831177, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 0.07, | |
| "learning_rate": 3.225806451612903e-07, | |
| "logits/chosen": -1.350678563117981, | |
| "logits/rejected": -1.3656706809997559, | |
| "logps/chosen": -417.0431213378906, | |
| "logps/rejected": -416.76165771484375, | |
| "loss": 0.5485, | |
| "rewards/accuracies": 0.75, | |
| "rewards/chosen": -1.473722219467163, | |
| "rewards/margins": 1.0274063348770142, | |
| "rewards/rejected": -2.5011284351348877, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 0.08, | |
| "learning_rate": 3.7634408602150537e-07, | |
| "logits/chosen": -1.2629830837249756, | |
| "logits/rejected": -1.415208339691162, | |
| "logps/chosen": -377.01275634765625, | |
| "logps/rejected": -445.7503967285156, | |
| "loss": 0.5203, | |
| "rewards/accuracies": 0.675000011920929, | |
| "rewards/chosen": -1.3981420993804932, | |
| "rewards/margins": 0.6980546116828918, | |
| "rewards/rejected": -2.0961966514587402, | |
| "step": 70 | |
| }, | |
| { | |
| "epoch": 0.09, | |
| "learning_rate": 4.3010752688172043e-07, | |
| "logits/chosen": -1.3798706531524658, | |
| "logits/rejected": -1.3563942909240723, | |
| "logps/chosen": -411.2007751464844, | |
| "logps/rejected": -434.94476318359375, | |
| "loss": 0.5067, | |
| "rewards/accuracies": 0.762499988079071, | |
| "rewards/chosen": -0.8918856382369995, | |
| "rewards/margins": 1.0622823238372803, | |
| "rewards/rejected": -1.9541680812835693, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 0.1, | |
| "learning_rate": 4.838709677419355e-07, | |
| "logits/chosen": -1.6255207061767578, | |
| "logits/rejected": -1.610365629196167, | |
| "logps/chosen": -355.00128173828125, | |
| "logps/rejected": -383.69866943359375, | |
| "loss": 0.511, | |
| "rewards/accuracies": 0.7250000238418579, | |
| "rewards/chosen": -0.8219491839408875, | |
| "rewards/margins": 0.6381314992904663, | |
| "rewards/rejected": -1.460080623626709, | |
| "step": 90 | |
| }, | |
| { | |
| "epoch": 0.11, | |
| "learning_rate": 4.957729468599034e-07, | |
| "logits/chosen": -1.3692975044250488, | |
| "logits/rejected": -1.3177874088287354, | |
| "logps/chosen": -378.91265869140625, | |
| "logps/rejected": -361.2559509277344, | |
| "loss": 0.4586, | |
| "rewards/accuracies": 0.7749999761581421, | |
| "rewards/chosen": -0.2890338897705078, | |
| "rewards/margins": 1.1477439403533936, | |
| "rewards/rejected": -1.4367780685424805, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 0.12, | |
| "learning_rate": 4.897342995169082e-07, | |
| "logits/chosen": -1.3526300191879272, | |
| "logits/rejected": -1.2792448997497559, | |
| "logps/chosen": -378.30126953125, | |
| "logps/rejected": -415.57464599609375, | |
| "loss": 0.4725, | |
| "rewards/accuracies": 0.762499988079071, | |
| "rewards/chosen": -0.5904244184494019, | |
| "rewards/margins": 1.3282153606414795, | |
| "rewards/rejected": -1.9186397790908813, | |
| "step": 110 | |
| }, | |
| { | |
| "epoch": 0.13, | |
| "learning_rate": 4.83695652173913e-07, | |
| "logits/chosen": -1.4887703657150269, | |
| "logits/rejected": -1.3336336612701416, | |
| "logps/chosen": -374.025390625, | |
| "logps/rejected": -446.5819396972656, | |
| "loss": 0.4726, | |
| "rewards/accuracies": 0.8125, | |
| "rewards/chosen": -0.5127348899841309, | |
| "rewards/margins": 1.7226130962371826, | |
| "rewards/rejected": -2.2353477478027344, | |
| "step": 120 | |
| }, | |
| { | |
| "epoch": 0.14, | |
| "learning_rate": 4.776570048309178e-07, | |
| "logits/chosen": -1.2795524597167969, | |
| "logits/rejected": -1.3249471187591553, | |
| "logps/chosen": -375.96710205078125, | |
| "logps/rejected": -457.1031188964844, | |
| "loss": 0.4781, | |
| "rewards/accuracies": 0.7749999761581421, | |
| "rewards/chosen": -0.473715215921402, | |
| "rewards/margins": 1.3638942241668701, | |
| "rewards/rejected": -1.8376095294952393, | |
| "step": 130 | |
| }, | |
| { | |
| "epoch": 0.15, | |
| "learning_rate": 4.716183574879227e-07, | |
| "logits/chosen": -0.8936523199081421, | |
| "logits/rejected": -0.731735348701477, | |
| "logps/chosen": -377.02410888671875, | |
| "logps/rejected": -374.2434387207031, | |
| "loss": 0.4771, | |
| "rewards/accuracies": 0.7749999761581421, | |
| "rewards/chosen": -0.4292902946472168, | |
| "rewards/margins": 1.5224840641021729, | |
| "rewards/rejected": -1.9517742395401, | |
| "step": 140 | |
| }, | |
| { | |
| "epoch": 0.16, | |
| "learning_rate": 4.655797101449275e-07, | |
| "logits/chosen": -1.3006255626678467, | |
| "logits/rejected": -1.1428701877593994, | |
| "logps/chosen": -422.60986328125, | |
| "logps/rejected": -394.9052429199219, | |
| "loss": 0.4141, | |
| "rewards/accuracies": 0.7250000238418579, | |
| "rewards/chosen": -0.40644779801368713, | |
| "rewards/margins": 1.319599986076355, | |
| "rewards/rejected": -1.7260475158691406, | |
| "step": 150 | |
| }, | |
| { | |
| "epoch": 0.17, | |
| "learning_rate": 4.5954106280193235e-07, | |
| "logits/chosen": -1.3789948225021362, | |
| "logits/rejected": -1.2863258123397827, | |
| "logps/chosen": -305.4602355957031, | |
| "logps/rejected": -374.1790466308594, | |
| "loss": 0.4965, | |
| "rewards/accuracies": 0.7749999761581421, | |
| "rewards/chosen": -0.9782305955886841, | |
| "rewards/margins": 1.459176778793335, | |
| "rewards/rejected": -2.4374070167541504, | |
| "step": 160 | |
| }, | |
| { | |
| "epoch": 0.18, | |
| "learning_rate": 4.5350241545893717e-07, | |
| "logits/chosen": -1.4751253128051758, | |
| "logits/rejected": -1.1761484146118164, | |
| "logps/chosen": -395.0107727050781, | |
| "logps/rejected": -390.03076171875, | |
| "loss": 0.441, | |
| "rewards/accuracies": 0.800000011920929, | |
| "rewards/chosen": -0.5482892394065857, | |
| "rewards/margins": 1.585138201713562, | |
| "rewards/rejected": -2.133427143096924, | |
| "step": 170 | |
| }, | |
| { | |
| "epoch": 0.2, | |
| "learning_rate": 4.47463768115942e-07, | |
| "logits/chosen": -1.6447718143463135, | |
| "logits/rejected": -1.6261088848114014, | |
| "logps/chosen": -430.76129150390625, | |
| "logps/rejected": -425.74969482421875, | |
| "loss": 0.4392, | |
| "rewards/accuracies": 0.762499988079071, | |
| "rewards/chosen": -0.5816106796264648, | |
| "rewards/margins": 1.8813564777374268, | |
| "rewards/rejected": -2.4629669189453125, | |
| "step": 180 | |
| }, | |
| { | |
| "epoch": 0.21, | |
| "learning_rate": 4.414251207729469e-07, | |
| "logits/chosen": -1.2417869567871094, | |
| "logits/rejected": -1.3334053754806519, | |
| "logps/chosen": -337.415771484375, | |
| "logps/rejected": -356.4654235839844, | |
| "loss": 0.4574, | |
| "rewards/accuracies": 0.737500011920929, | |
| "rewards/chosen": -0.755621075630188, | |
| "rewards/margins": 1.5105997323989868, | |
| "rewards/rejected": -2.266220808029175, | |
| "step": 190 | |
| }, | |
| { | |
| "epoch": 0.22, | |
| "learning_rate": 4.3538647342995165e-07, | |
| "logits/chosen": -1.5782802104949951, | |
| "logits/rejected": -1.6571115255355835, | |
| "logps/chosen": -431.74151611328125, | |
| "logps/rejected": -431.4637145996094, | |
| "loss": 0.4384, | |
| "rewards/accuracies": 0.8125, | |
| "rewards/chosen": -0.07804969698190689, | |
| "rewards/margins": 1.6836214065551758, | |
| "rewards/rejected": -1.7616710662841797, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 0.22, | |
| "eval_logits/chosen": -1.4511131048202515, | |
| "eval_logits/rejected": -1.3157293796539307, | |
| "eval_logps/chosen": -397.8616638183594, | |
| "eval_logps/rejected": -405.7994079589844, | |
| "eval_loss": 0.4556037187576294, | |
| "eval_rewards/accuracies": 0.7936508059501648, | |
| "eval_rewards/chosen": -0.3274631202220917, | |
| "eval_rewards/margins": 1.6173655986785889, | |
| "eval_rewards/rejected": -1.944828748703003, | |
| "eval_runtime": 405.1145, | |
| "eval_samples_per_second": 4.937, | |
| "eval_steps_per_second": 0.156, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 0.23, | |
| "learning_rate": 4.2934782608695653e-07, | |
| "logits/chosen": -0.9520143270492554, | |
| "logits/rejected": -1.01302969455719, | |
| "logps/chosen": -444.7759704589844, | |
| "logps/rejected": -401.30389404296875, | |
| "loss": 0.4141, | |
| "rewards/accuracies": 0.800000011920929, | |
| "rewards/chosen": -0.38320282101631165, | |
| "rewards/margins": 1.236628770828247, | |
| "rewards/rejected": -1.6198316812515259, | |
| "step": 210 | |
| }, | |
| { | |
| "epoch": 0.24, | |
| "learning_rate": 4.2330917874396135e-07, | |
| "logits/chosen": -1.601231336593628, | |
| "logits/rejected": -1.4172089099884033, | |
| "logps/chosen": -431.98272705078125, | |
| "logps/rejected": -412.7859802246094, | |
| "loss": 0.4817, | |
| "rewards/accuracies": 0.7875000238418579, | |
| "rewards/chosen": -0.31900420784950256, | |
| "rewards/margins": 1.7263078689575195, | |
| "rewards/rejected": -2.0453121662139893, | |
| "step": 220 | |
| }, | |
| { | |
| "epoch": 0.25, | |
| "learning_rate": 4.172705314009662e-07, | |
| "logits/chosen": -1.1527206897735596, | |
| "logits/rejected": -0.8433942794799805, | |
| "logps/chosen": -383.67559814453125, | |
| "logps/rejected": -369.19342041015625, | |
| "loss": 0.45, | |
| "rewards/accuracies": 0.7749999761581421, | |
| "rewards/chosen": -0.5148889422416687, | |
| "rewards/margins": 1.6342861652374268, | |
| "rewards/rejected": -2.1491751670837402, | |
| "step": 230 | |
| }, | |
| { | |
| "epoch": 0.26, | |
| "learning_rate": 4.11231884057971e-07, | |
| "logits/chosen": -1.3957723379135132, | |
| "logits/rejected": -1.4669153690338135, | |
| "logps/chosen": -366.89093017578125, | |
| "logps/rejected": -381.7056579589844, | |
| "loss": 0.4573, | |
| "rewards/accuracies": 0.762499988079071, | |
| "rewards/chosen": -0.2394193708896637, | |
| "rewards/margins": 1.0727304220199585, | |
| "rewards/rejected": -1.3121497631072998, | |
| "step": 240 | |
| }, | |
| { | |
| "epoch": 0.27, | |
| "learning_rate": 4.0519323671497583e-07, | |
| "logits/chosen": -1.1060564517974854, | |
| "logits/rejected": -1.020811676979065, | |
| "logps/chosen": -431.10791015625, | |
| "logps/rejected": -398.845458984375, | |
| "loss": 0.4008, | |
| "rewards/accuracies": 0.8374999761581421, | |
| "rewards/chosen": -0.08395320922136307, | |
| "rewards/margins": 1.6107537746429443, | |
| "rewards/rejected": -1.694706916809082, | |
| "step": 250 | |
| }, | |
| { | |
| "epoch": 0.28, | |
| "learning_rate": 3.9915458937198065e-07, | |
| "logits/chosen": -1.0142405033111572, | |
| "logits/rejected": -1.0058037042617798, | |
| "logps/chosen": -404.4456481933594, | |
| "logps/rejected": -417.4208984375, | |
| "loss": 0.4342, | |
| "rewards/accuracies": 0.8125, | |
| "rewards/chosen": 0.09296660125255585, | |
| "rewards/margins": 1.8066604137420654, | |
| "rewards/rejected": -1.713693618774414, | |
| "step": 260 | |
| }, | |
| { | |
| "epoch": 0.29, | |
| "learning_rate": 3.9311594202898553e-07, | |
| "logits/chosen": -0.816851794719696, | |
| "logits/rejected": -0.7470394372940063, | |
| "logps/chosen": -395.8139343261719, | |
| "logps/rejected": -355.42071533203125, | |
| "loss": 0.4899, | |
| "rewards/accuracies": 0.7749999761581421, | |
| "rewards/chosen": -0.38039931654930115, | |
| "rewards/margins": 1.4586222171783447, | |
| "rewards/rejected": -1.8390214443206787, | |
| "step": 270 | |
| }, | |
| { | |
| "epoch": 0.3, | |
| "learning_rate": 3.870772946859903e-07, | |
| "logits/chosen": -1.0235198736190796, | |
| "logits/rejected": -1.009104609489441, | |
| "logps/chosen": -419.8531188964844, | |
| "logps/rejected": -417.8365783691406, | |
| "loss": 0.4258, | |
| "rewards/accuracies": 0.800000011920929, | |
| "rewards/chosen": -0.24367530643939972, | |
| "rewards/margins": 1.267002820968628, | |
| "rewards/rejected": -1.5106781721115112, | |
| "step": 280 | |
| }, | |
| { | |
| "epoch": 0.31, | |
| "learning_rate": 3.810386473429952e-07, | |
| "logits/chosen": -1.1125423908233643, | |
| "logits/rejected": -1.093120813369751, | |
| "logps/chosen": -347.3122863769531, | |
| "logps/rejected": -387.7191467285156, | |
| "loss": 0.435, | |
| "rewards/accuracies": 0.7875000238418579, | |
| "rewards/chosen": 0.060557298362255096, | |
| "rewards/margins": 1.8192393779754639, | |
| "rewards/rejected": -1.7586820125579834, | |
| "step": 290 | |
| }, | |
| { | |
| "epoch": 0.33, | |
| "learning_rate": 3.75e-07, | |
| "logits/chosen": -1.1970382928848267, | |
| "logits/rejected": -1.1414059400558472, | |
| "logps/chosen": -380.08892822265625, | |
| "logps/rejected": -393.11126708984375, | |
| "loss": 0.4137, | |
| "rewards/accuracies": 0.737500011920929, | |
| "rewards/chosen": -0.4049530625343323, | |
| "rewards/margins": 1.5264403820037842, | |
| "rewards/rejected": -1.9313932657241821, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 0.34, | |
| "learning_rate": 3.689613526570048e-07, | |
| "logits/chosen": -1.4467629194259644, | |
| "logits/rejected": -1.4420421123504639, | |
| "logps/chosen": -412.6943359375, | |
| "logps/rejected": -411.64031982421875, | |
| "loss": 0.4545, | |
| "rewards/accuracies": 0.75, | |
| "rewards/chosen": -0.44115525484085083, | |
| "rewards/margins": 1.5014537572860718, | |
| "rewards/rejected": -1.9426085948944092, | |
| "step": 310 | |
| }, | |
| { | |
| "epoch": 0.35, | |
| "learning_rate": 3.6292270531400966e-07, | |
| "logits/chosen": -1.576790690422058, | |
| "logits/rejected": -1.605373740196228, | |
| "logps/chosen": -442.7998046875, | |
| "logps/rejected": -471.8130798339844, | |
| "loss": 0.426, | |
| "rewards/accuracies": 0.7749999761581421, | |
| "rewards/chosen": -0.1621936857700348, | |
| "rewards/margins": 1.6487839221954346, | |
| "rewards/rejected": -1.810977578163147, | |
| "step": 320 | |
| }, | |
| { | |
| "epoch": 0.36, | |
| "learning_rate": 3.5688405797101443e-07, | |
| "logits/chosen": -1.3066380023956299, | |
| "logits/rejected": -1.1275885105133057, | |
| "logps/chosen": -369.5173034667969, | |
| "logps/rejected": -393.6142883300781, | |
| "loss": 0.4594, | |
| "rewards/accuracies": 0.737500011920929, | |
| "rewards/chosen": -0.3911195993423462, | |
| "rewards/margins": 1.561603307723999, | |
| "rewards/rejected": -1.9527229070663452, | |
| "step": 330 | |
| }, | |
| { | |
| "epoch": 0.37, | |
| "learning_rate": 3.508454106280193e-07, | |
| "logits/chosen": -1.460533857345581, | |
| "logits/rejected": -1.3913938999176025, | |
| "logps/chosen": -461.4322814941406, | |
| "logps/rejected": -443.2294006347656, | |
| "loss": 0.4343, | |
| "rewards/accuracies": 0.800000011920929, | |
| "rewards/chosen": 0.05905343219637871, | |
| "rewards/margins": 1.777876615524292, | |
| "rewards/rejected": -1.7188230752944946, | |
| "step": 340 | |
| }, | |
| { | |
| "epoch": 0.38, | |
| "learning_rate": 3.4480676328502414e-07, | |
| "logits/chosen": -1.0480554103851318, | |
| "logits/rejected": -0.939221203327179, | |
| "logps/chosen": -389.5008544921875, | |
| "logps/rejected": -391.26678466796875, | |
| "loss": 0.4464, | |
| "rewards/accuracies": 0.7749999761581421, | |
| "rewards/chosen": -0.5172668099403381, | |
| "rewards/margins": 1.5226786136627197, | |
| "rewards/rejected": -2.039945125579834, | |
| "step": 350 | |
| }, | |
| { | |
| "epoch": 0.39, | |
| "learning_rate": 3.3876811594202896e-07, | |
| "logits/chosen": -1.367060899734497, | |
| "logits/rejected": -1.250497579574585, | |
| "logps/chosen": -374.1922302246094, | |
| "logps/rejected": -377.8554382324219, | |
| "loss": 0.4265, | |
| "rewards/accuracies": 0.7749999761581421, | |
| "rewards/chosen": -0.34825998544692993, | |
| "rewards/margins": 1.6436443328857422, | |
| "rewards/rejected": -1.9919040203094482, | |
| "step": 360 | |
| }, | |
| { | |
| "epoch": 0.4, | |
| "learning_rate": 3.327294685990338e-07, | |
| "logits/chosen": -1.4619824886322021, | |
| "logits/rejected": -1.224457025527954, | |
| "logps/chosen": -394.55694580078125, | |
| "logps/rejected": -406.80389404296875, | |
| "loss": 0.4343, | |
| "rewards/accuracies": 0.8125, | |
| "rewards/chosen": 0.11512075364589691, | |
| "rewards/margins": 1.9784886837005615, | |
| "rewards/rejected": -1.863368034362793, | |
| "step": 370 | |
| }, | |
| { | |
| "epoch": 0.41, | |
| "learning_rate": 3.266908212560386e-07, | |
| "logits/chosen": -1.5472246408462524, | |
| "logits/rejected": -1.3760802745819092, | |
| "logps/chosen": -370.1455993652344, | |
| "logps/rejected": -360.59735107421875, | |
| "loss": 0.4365, | |
| "rewards/accuracies": 0.737500011920929, | |
| "rewards/chosen": -0.24435913562774658, | |
| "rewards/margins": 1.2893067598342896, | |
| "rewards/rejected": -1.5336658954620361, | |
| "step": 380 | |
| }, | |
| { | |
| "epoch": 0.42, | |
| "learning_rate": 3.2065217391304344e-07, | |
| "logits/chosen": -1.3668596744537354, | |
| "logits/rejected": -1.2702538967132568, | |
| "logps/chosen": -429.017822265625, | |
| "logps/rejected": -415.56439208984375, | |
| "loss": 0.4121, | |
| "rewards/accuracies": 0.8125, | |
| "rewards/chosen": -0.2810801863670349, | |
| "rewards/margins": 1.7658510208129883, | |
| "rewards/rejected": -2.046931028366089, | |
| "step": 390 | |
| }, | |
| { | |
| "epoch": 0.43, | |
| "learning_rate": 3.146135265700483e-07, | |
| "logits/chosen": -1.3646513223648071, | |
| "logits/rejected": -1.072363257408142, | |
| "logps/chosen": -390.0302429199219, | |
| "logps/rejected": -395.0215148925781, | |
| "loss": 0.4064, | |
| "rewards/accuracies": 0.800000011920929, | |
| "rewards/chosen": -0.38891416788101196, | |
| "rewards/margins": 1.990821123123169, | |
| "rewards/rejected": -2.379735231399536, | |
| "step": 400 | |
| }, | |
| { | |
| "epoch": 0.43, | |
| "eval_logits/chosen": -0.6539027094841003, | |
| "eval_logits/rejected": -0.7660450339317322, | |
| "eval_logps/chosen": -396.7496032714844, | |
| "eval_logps/rejected": -408.44091796875, | |
| "eval_loss": 0.4285692274570465, | |
| "eval_rewards/accuracies": 0.8253968358039856, | |
| "eval_rewards/chosen": -0.21625187993049622, | |
| "eval_rewards/margins": 1.9927276372909546, | |
| "eval_rewards/rejected": -2.208979606628418, | |
| "eval_runtime": 409.2634, | |
| "eval_samples_per_second": 4.887, | |
| "eval_steps_per_second": 0.154, | |
| "step": 400 | |
| }, | |
| { | |
| "epoch": 0.45, | |
| "learning_rate": 3.085748792270531e-07, | |
| "logits/chosen": -1.2127052545547485, | |
| "logits/rejected": -1.2303555011749268, | |
| "logps/chosen": -399.98858642578125, | |
| "logps/rejected": -388.2366638183594, | |
| "loss": 0.4587, | |
| "rewards/accuracies": 0.8125, | |
| "rewards/chosen": -0.6597892642021179, | |
| "rewards/margins": 1.7150468826293945, | |
| "rewards/rejected": -2.3748364448547363, | |
| "step": 410 | |
| }, | |
| { | |
| "epoch": 0.46, | |
| "learning_rate": 3.0253623188405797e-07, | |
| "logits/chosen": -1.6032695770263672, | |
| "logits/rejected": -1.3187893629074097, | |
| "logps/chosen": -347.73553466796875, | |
| "logps/rejected": -366.84814453125, | |
| "loss": 0.4217, | |
| "rewards/accuracies": 0.7749999761581421, | |
| "rewards/chosen": -0.20497450232505798, | |
| "rewards/margins": 1.4589314460754395, | |
| "rewards/rejected": -1.6639057397842407, | |
| "step": 420 | |
| }, | |
| { | |
| "epoch": 0.47, | |
| "learning_rate": 2.964975845410628e-07, | |
| "logits/chosen": -1.4543535709381104, | |
| "logits/rejected": -1.2742624282836914, | |
| "logps/chosen": -423.95111083984375, | |
| "logps/rejected": -437.7804260253906, | |
| "loss": 0.4479, | |
| "rewards/accuracies": 0.8125, | |
| "rewards/chosen": 0.25907284021377563, | |
| "rewards/margins": 2.2142040729522705, | |
| "rewards/rejected": -1.9551312923431396, | |
| "step": 430 | |
| }, | |
| { | |
| "epoch": 0.48, | |
| "learning_rate": 2.904589371980676e-07, | |
| "logits/chosen": -1.9760487079620361, | |
| "logits/rejected": -1.7164779901504517, | |
| "logps/chosen": -386.17193603515625, | |
| "logps/rejected": -450.7064514160156, | |
| "loss": 0.4361, | |
| "rewards/accuracies": 0.75, | |
| "rewards/chosen": -0.34018686413764954, | |
| "rewards/margins": 1.6212705373764038, | |
| "rewards/rejected": -1.9614572525024414, | |
| "step": 440 | |
| }, | |
| { | |
| "epoch": 0.49, | |
| "learning_rate": 2.8442028985507245e-07, | |
| "logits/chosen": -1.5040684938430786, | |
| "logits/rejected": -1.4459855556488037, | |
| "logps/chosen": -410.3436584472656, | |
| "logps/rejected": -438.83837890625, | |
| "loss": 0.4386, | |
| "rewards/accuracies": 0.7875000238418579, | |
| "rewards/chosen": -0.016602765768766403, | |
| "rewards/margins": 1.8991400003433228, | |
| "rewards/rejected": -1.9157428741455078, | |
| "step": 450 | |
| }, | |
| { | |
| "epoch": 0.5, | |
| "learning_rate": 2.7838164251207727e-07, | |
| "logits/chosen": -1.193297266960144, | |
| "logits/rejected": -0.9360073804855347, | |
| "logps/chosen": -415.88568115234375, | |
| "logps/rejected": -389.2828674316406, | |
| "loss": 0.4233, | |
| "rewards/accuracies": 0.800000011920929, | |
| "rewards/chosen": 0.08352740854024887, | |
| "rewards/margins": 2.2130849361419678, | |
| "rewards/rejected": -2.1295576095581055, | |
| "step": 460 | |
| }, | |
| { | |
| "epoch": 0.51, | |
| "learning_rate": 2.723429951690821e-07, | |
| "logits/chosen": -1.5409491062164307, | |
| "logits/rejected": -1.7222496271133423, | |
| "logps/chosen": -426.3505859375, | |
| "logps/rejected": -415.15753173828125, | |
| "loss": 0.3939, | |
| "rewards/accuracies": 0.8500000238418579, | |
| "rewards/chosen": -0.04843442887067795, | |
| "rewards/margins": 2.11486554145813, | |
| "rewards/rejected": -2.163300037384033, | |
| "step": 470 | |
| }, | |
| { | |
| "epoch": 0.52, | |
| "learning_rate": 2.66304347826087e-07, | |
| "logits/chosen": -1.1968003511428833, | |
| "logits/rejected": -1.0116466283798218, | |
| "logps/chosen": -381.65277099609375, | |
| "logps/rejected": -407.2720642089844, | |
| "loss": 0.4468, | |
| "rewards/accuracies": 0.800000011920929, | |
| "rewards/chosen": 0.10518859326839447, | |
| "rewards/margins": 2.6760292053222656, | |
| "rewards/rejected": -2.570840358734131, | |
| "step": 480 | |
| }, | |
| { | |
| "epoch": 0.53, | |
| "learning_rate": 2.6026570048309175e-07, | |
| "logits/chosen": -1.5621907711029053, | |
| "logits/rejected": -1.4111429452896118, | |
| "logps/chosen": -331.765380859375, | |
| "logps/rejected": -345.84417724609375, | |
| "loss": 0.4083, | |
| "rewards/accuracies": 0.762499988079071, | |
| "rewards/chosen": -0.2680264115333557, | |
| "rewards/margins": 1.9610168933868408, | |
| "rewards/rejected": -2.229043483734131, | |
| "step": 490 | |
| }, | |
| { | |
| "epoch": 0.54, | |
| "learning_rate": 2.5422705314009663e-07, | |
| "logits/chosen": -1.137390375137329, | |
| "logits/rejected": -0.9998054504394531, | |
| "logps/chosen": -393.7897644042969, | |
| "logps/rejected": -426.21612548828125, | |
| "loss": 0.4487, | |
| "rewards/accuracies": 0.7124999761581421, | |
| "rewards/chosen": -0.3346656262874603, | |
| "rewards/margins": 1.5842936038970947, | |
| "rewards/rejected": -1.9189590215682983, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 0.55, | |
| "learning_rate": 2.4818840579710145e-07, | |
| "logits/chosen": -1.2915681600570679, | |
| "logits/rejected": -1.1393409967422485, | |
| "logps/chosen": -476.5746154785156, | |
| "logps/rejected": -397.6057434082031, | |
| "loss": 0.4763, | |
| "rewards/accuracies": 0.762499988079071, | |
| "rewards/chosen": -0.33603593707084656, | |
| "rewards/margins": 1.7989603281021118, | |
| "rewards/rejected": -2.134996175765991, | |
| "step": 510 | |
| }, | |
| { | |
| "epoch": 0.56, | |
| "learning_rate": 2.421497584541063e-07, | |
| "logits/chosen": -1.4665111303329468, | |
| "logits/rejected": -1.4302623271942139, | |
| "logps/chosen": -410.9242248535156, | |
| "logps/rejected": -435.70721435546875, | |
| "loss": 0.4378, | |
| "rewards/accuracies": 0.75, | |
| "rewards/chosen": -0.36040204763412476, | |
| "rewards/margins": 1.7777122259140015, | |
| "rewards/rejected": -2.1381144523620605, | |
| "step": 520 | |
| }, | |
| { | |
| "epoch": 0.58, | |
| "learning_rate": 2.361111111111111e-07, | |
| "logits/chosen": -1.3417316675186157, | |
| "logits/rejected": -1.2472248077392578, | |
| "logps/chosen": -396.3175354003906, | |
| "logps/rejected": -406.9625244140625, | |
| "loss": 0.3857, | |
| "rewards/accuracies": 0.862500011920929, | |
| "rewards/chosen": 0.186844140291214, | |
| "rewards/margins": 2.318281650543213, | |
| "rewards/rejected": -2.131437301635742, | |
| "step": 530 | |
| }, | |
| { | |
| "epoch": 0.59, | |
| "learning_rate": 2.3007246376811593e-07, | |
| "logits/chosen": -1.5599995851516724, | |
| "logits/rejected": -1.4291542768478394, | |
| "logps/chosen": -411.170654296875, | |
| "logps/rejected": -423.13177490234375, | |
| "loss": 0.4244, | |
| "rewards/accuracies": 0.875, | |
| "rewards/chosen": -0.1071757823228836, | |
| "rewards/margins": 1.971361517906189, | |
| "rewards/rejected": -2.0785374641418457, | |
| "step": 540 | |
| }, | |
| { | |
| "epoch": 0.6, | |
| "learning_rate": 2.2403381642512075e-07, | |
| "logits/chosen": -1.2990379333496094, | |
| "logits/rejected": -1.1973732709884644, | |
| "logps/chosen": -418.79437255859375, | |
| "logps/rejected": -472.189453125, | |
| "loss": 0.4643, | |
| "rewards/accuracies": 0.8374999761581421, | |
| "rewards/chosen": 0.147103950381279, | |
| "rewards/margins": 2.323150157928467, | |
| "rewards/rejected": -2.176046371459961, | |
| "step": 550 | |
| }, | |
| { | |
| "epoch": 0.61, | |
| "learning_rate": 2.1799516908212558e-07, | |
| "logits/chosen": -1.8142582178115845, | |
| "logits/rejected": -1.725685715675354, | |
| "logps/chosen": -374.6809387207031, | |
| "logps/rejected": -410.0592346191406, | |
| "loss": 0.4145, | |
| "rewards/accuracies": 0.824999988079071, | |
| "rewards/chosen": -0.23340897262096405, | |
| "rewards/margins": 2.3226966857910156, | |
| "rewards/rejected": -2.556105852127075, | |
| "step": 560 | |
| }, | |
| { | |
| "epoch": 0.62, | |
| "learning_rate": 2.1195652173913043e-07, | |
| "logits/chosen": -1.4505486488342285, | |
| "logits/rejected": -1.3042548894882202, | |
| "logps/chosen": -409.4254150390625, | |
| "logps/rejected": -374.3670654296875, | |
| "loss": 0.4178, | |
| "rewards/accuracies": 0.8500000238418579, | |
| "rewards/chosen": 0.1292637288570404, | |
| "rewards/margins": 2.543600082397461, | |
| "rewards/rejected": -2.414336681365967, | |
| "step": 570 | |
| }, | |
| { | |
| "epoch": 0.63, | |
| "learning_rate": 2.0591787439613526e-07, | |
| "logits/chosen": -0.5316571593284607, | |
| "logits/rejected": -0.45167016983032227, | |
| "logps/chosen": -386.24517822265625, | |
| "logps/rejected": -434.59393310546875, | |
| "loss": 0.3853, | |
| "rewards/accuracies": 0.737500011920929, | |
| "rewards/chosen": -0.4753696322441101, | |
| "rewards/margins": 1.9324595928192139, | |
| "rewards/rejected": -2.4078292846679688, | |
| "step": 580 | |
| }, | |
| { | |
| "epoch": 0.64, | |
| "learning_rate": 1.9987922705314008e-07, | |
| "logits/chosen": -1.3159304857254028, | |
| "logits/rejected": -1.087425947189331, | |
| "logps/chosen": -377.9801025390625, | |
| "logps/rejected": -377.0760803222656, | |
| "loss": 0.4461, | |
| "rewards/accuracies": 0.824999988079071, | |
| "rewards/chosen": -0.38399359583854675, | |
| "rewards/margins": 2.1969008445739746, | |
| "rewards/rejected": -2.580894708633423, | |
| "step": 590 | |
| }, | |
| { | |
| "epoch": 0.65, | |
| "learning_rate": 1.938405797101449e-07, | |
| "logits/chosen": -0.9003721475601196, | |
| "logits/rejected": -0.6628047227859497, | |
| "logps/chosen": -435.5120544433594, | |
| "logps/rejected": -446.45550537109375, | |
| "loss": 0.3952, | |
| "rewards/accuracies": 0.875, | |
| "rewards/chosen": 0.012645396403968334, | |
| "rewards/margins": 1.9577815532684326, | |
| "rewards/rejected": -1.9451364278793335, | |
| "step": 600 | |
| }, | |
| { | |
| "epoch": 0.65, | |
| "eval_logits/chosen": -0.7205740809440613, | |
| "eval_logits/rejected": -0.6783490777015686, | |
| "eval_logps/chosen": -395.8981628417969, | |
| "eval_logps/rejected": -407.95367431640625, | |
| "eval_loss": 0.42750075459480286, | |
| "eval_rewards/accuracies": 0.8015872836112976, | |
| "eval_rewards/chosen": -0.13110622763633728, | |
| "eval_rewards/margins": 2.0291495323181152, | |
| "eval_rewards/rejected": -2.1602559089660645, | |
| "eval_runtime": 404.1135, | |
| "eval_samples_per_second": 4.949, | |
| "eval_steps_per_second": 0.156, | |
| "step": 600 | |
| }, | |
| { | |
| "epoch": 0.66, | |
| "learning_rate": 1.8780193236714976e-07, | |
| "logits/chosen": -0.49158763885498047, | |
| "logits/rejected": -0.30927062034606934, | |
| "logps/chosen": -421.99005126953125, | |
| "logps/rejected": -378.0501708984375, | |
| "loss": 0.4263, | |
| "rewards/accuracies": 0.7749999761581421, | |
| "rewards/chosen": -0.14008215069770813, | |
| "rewards/margins": 1.6353435516357422, | |
| "rewards/rejected": -1.775425672531128, | |
| "step": 610 | |
| }, | |
| { | |
| "epoch": 0.67, | |
| "learning_rate": 1.8176328502415459e-07, | |
| "logits/chosen": -1.3565137386322021, | |
| "logits/rejected": -1.1652584075927734, | |
| "logps/chosen": -450.35540771484375, | |
| "logps/rejected": -436.74761962890625, | |
| "loss": 0.4252, | |
| "rewards/accuracies": 0.7749999761581421, | |
| "rewards/chosen": -0.03007013536989689, | |
| "rewards/margins": 1.9853531122207642, | |
| "rewards/rejected": -2.01542329788208, | |
| "step": 620 | |
| }, | |
| { | |
| "epoch": 0.68, | |
| "learning_rate": 1.757246376811594e-07, | |
| "logits/chosen": -0.015371406450867653, | |
| "logits/rejected": -0.01580933667719364, | |
| "logps/chosen": -382.2900390625, | |
| "logps/rejected": -360.01446533203125, | |
| "loss": 0.3848, | |
| "rewards/accuracies": 0.949999988079071, | |
| "rewards/chosen": 0.17353440821170807, | |
| "rewards/margins": 2.286402940750122, | |
| "rewards/rejected": -2.1128687858581543, | |
| "step": 630 | |
| }, | |
| { | |
| "epoch": 0.69, | |
| "learning_rate": 1.6968599033816424e-07, | |
| "logits/chosen": 0.3802258372306824, | |
| "logits/rejected": 0.4669532775878906, | |
| "logps/chosen": -355.05322265625, | |
| "logps/rejected": -390.06976318359375, | |
| "loss": 0.4137, | |
| "rewards/accuracies": 0.737500011920929, | |
| "rewards/chosen": -0.6333318948745728, | |
| "rewards/margins": 1.6801398992538452, | |
| "rewards/rejected": -2.313471794128418, | |
| "step": 640 | |
| }, | |
| { | |
| "epoch": 0.71, | |
| "learning_rate": 1.636473429951691e-07, | |
| "logits/chosen": 0.4362594485282898, | |
| "logits/rejected": 0.5603054761886597, | |
| "logps/chosen": -411.2711486816406, | |
| "logps/rejected": -398.11346435546875, | |
| "loss": 0.4552, | |
| "rewards/accuracies": 0.800000011920929, | |
| "rewards/chosen": -0.7625397443771362, | |
| "rewards/margins": 1.8215090036392212, | |
| "rewards/rejected": -2.5840485095977783, | |
| "step": 650 | |
| }, | |
| { | |
| "epoch": 0.72, | |
| "learning_rate": 1.5760869565217392e-07, | |
| "logits/chosen": 0.4388657212257385, | |
| "logits/rejected": 0.8059428334236145, | |
| "logps/chosen": -406.7152404785156, | |
| "logps/rejected": -383.4807434082031, | |
| "loss": 0.4061, | |
| "rewards/accuracies": 0.7749999761581421, | |
| "rewards/chosen": -0.5559307336807251, | |
| "rewards/margins": 1.9154608249664307, | |
| "rewards/rejected": -2.4713916778564453, | |
| "step": 660 | |
| }, | |
| { | |
| "epoch": 0.73, | |
| "learning_rate": 1.5157004830917874e-07, | |
| "logits/chosen": 0.707136332988739, | |
| "logits/rejected": 0.8469961881637573, | |
| "logps/chosen": -373.77032470703125, | |
| "logps/rejected": -383.32525634765625, | |
| "loss": 0.4709, | |
| "rewards/accuracies": 0.762499988079071, | |
| "rewards/chosen": -0.159020334482193, | |
| "rewards/margins": 1.6874653100967407, | |
| "rewards/rejected": -1.8464854955673218, | |
| "step": 670 | |
| }, | |
| { | |
| "epoch": 0.74, | |
| "learning_rate": 1.4553140096618357e-07, | |
| "logits/chosen": -0.08916174620389938, | |
| "logits/rejected": -0.07962872833013535, | |
| "logps/chosen": -403.15765380859375, | |
| "logps/rejected": -382.7056884765625, | |
| "loss": 0.4123, | |
| "rewards/accuracies": 0.7875000238418579, | |
| "rewards/chosen": 0.1752249300479889, | |
| "rewards/margins": 2.1825637817382812, | |
| "rewards/rejected": -2.007338762283325, | |
| "step": 680 | |
| }, | |
| { | |
| "epoch": 0.75, | |
| "learning_rate": 1.3949275362318842e-07, | |
| "logits/chosen": 0.2861272692680359, | |
| "logits/rejected": 0.5865569114685059, | |
| "logps/chosen": -438.97808837890625, | |
| "logps/rejected": -424.977294921875, | |
| "loss": 0.3888, | |
| "rewards/accuracies": 0.737500011920929, | |
| "rewards/chosen": -0.10896565020084381, | |
| "rewards/margins": 1.5186336040496826, | |
| "rewards/rejected": -1.6275993585586548, | |
| "step": 690 | |
| }, | |
| { | |
| "epoch": 0.76, | |
| "learning_rate": 1.3345410628019324e-07, | |
| "logits/chosen": 0.7959792017936707, | |
| "logits/rejected": 1.0797499418258667, | |
| "logps/chosen": -399.54302978515625, | |
| "logps/rejected": -411.88824462890625, | |
| "loss": 0.4496, | |
| "rewards/accuracies": 0.737500011920929, | |
| "rewards/chosen": -0.02021363377571106, | |
| "rewards/margins": 2.1036648750305176, | |
| "rewards/rejected": -2.1238787174224854, | |
| "step": 700 | |
| }, | |
| { | |
| "epoch": 0.77, | |
| "learning_rate": 1.2741545893719807e-07, | |
| "logits/chosen": 0.061588358134031296, | |
| "logits/rejected": 0.5792427659034729, | |
| "logps/chosen": -389.0401611328125, | |
| "logps/rejected": -442.1222229003906, | |
| "loss": 0.461, | |
| "rewards/accuracies": 0.7875000238418579, | |
| "rewards/chosen": -0.3942088484764099, | |
| "rewards/margins": 1.9804880619049072, | |
| "rewards/rejected": -2.3746962547302246, | |
| "step": 710 | |
| }, | |
| { | |
| "epoch": 0.78, | |
| "learning_rate": 1.213768115942029e-07, | |
| "logits/chosen": -0.3789304494857788, | |
| "logits/rejected": -0.5976569056510925, | |
| "logps/chosen": -383.9620056152344, | |
| "logps/rejected": -411.39923095703125, | |
| "loss": 0.3725, | |
| "rewards/accuracies": 0.7875000238418579, | |
| "rewards/chosen": -0.1539289653301239, | |
| "rewards/margins": 2.0121874809265137, | |
| "rewards/rejected": -2.166116237640381, | |
| "step": 720 | |
| }, | |
| { | |
| "epoch": 0.79, | |
| "learning_rate": 1.1533816425120772e-07, | |
| "logits/chosen": -0.32424452900886536, | |
| "logits/rejected": 0.054877616465091705, | |
| "logps/chosen": -399.9856262207031, | |
| "logps/rejected": -433.40008544921875, | |
| "loss": 0.4335, | |
| "rewards/accuracies": 0.7749999761581421, | |
| "rewards/chosen": -0.20857331156730652, | |
| "rewards/margins": 1.7776143550872803, | |
| "rewards/rejected": -1.9861876964569092, | |
| "step": 730 | |
| }, | |
| { | |
| "epoch": 0.8, | |
| "learning_rate": 1.0929951690821256e-07, | |
| "logits/chosen": 0.7587161064147949, | |
| "logits/rejected": 0.4672706127166748, | |
| "logps/chosen": -394.1853942871094, | |
| "logps/rejected": -397.8048095703125, | |
| "loss": 0.4497, | |
| "rewards/accuracies": 0.8500000238418579, | |
| "rewards/chosen": -0.04534666985273361, | |
| "rewards/margins": 2.2218165397644043, | |
| "rewards/rejected": -2.267162799835205, | |
| "step": 740 | |
| }, | |
| { | |
| "epoch": 0.81, | |
| "learning_rate": 1.0326086956521738e-07, | |
| "logits/chosen": -0.007064342498779297, | |
| "logits/rejected": -0.10647717863321304, | |
| "logps/chosen": -451.4822692871094, | |
| "logps/rejected": -446.7669372558594, | |
| "loss": 0.4347, | |
| "rewards/accuracies": 0.8125, | |
| "rewards/chosen": -0.011669009923934937, | |
| "rewards/margins": 1.8043409585952759, | |
| "rewards/rejected": -1.8160098791122437, | |
| "step": 750 | |
| }, | |
| { | |
| "epoch": 0.83, | |
| "learning_rate": 9.722222222222222e-08, | |
| "logits/chosen": -0.4753515124320984, | |
| "logits/rejected": -0.590586245059967, | |
| "logps/chosen": -471.35137939453125, | |
| "logps/rejected": -436.88531494140625, | |
| "loss": 0.386, | |
| "rewards/accuracies": 0.8500000238418579, | |
| "rewards/chosen": 0.01547972857952118, | |
| "rewards/margins": 2.1215872764587402, | |
| "rewards/rejected": -2.106107473373413, | |
| "step": 760 | |
| }, | |
| { | |
| "epoch": 0.84, | |
| "learning_rate": 9.118357487922705e-08, | |
| "logits/chosen": -0.4006249010562897, | |
| "logits/rejected": -0.5747838616371155, | |
| "logps/chosen": -375.1439514160156, | |
| "logps/rejected": -383.0394287109375, | |
| "loss": 0.3963, | |
| "rewards/accuracies": 0.8125, | |
| "rewards/chosen": -0.3230701684951782, | |
| "rewards/margins": 2.1252005100250244, | |
| "rewards/rejected": -2.448270797729492, | |
| "step": 770 | |
| }, | |
| { | |
| "epoch": 0.85, | |
| "learning_rate": 8.514492753623189e-08, | |
| "logits/chosen": 0.18706265091896057, | |
| "logits/rejected": 0.14009952545166016, | |
| "logps/chosen": -381.85443115234375, | |
| "logps/rejected": -417.75146484375, | |
| "loss": 0.3904, | |
| "rewards/accuracies": 0.7875000238418579, | |
| "rewards/chosen": -0.3124622702598572, | |
| "rewards/margins": 1.5956885814666748, | |
| "rewards/rejected": -1.9081509113311768, | |
| "step": 780 | |
| }, | |
| { | |
| "epoch": 0.86, | |
| "learning_rate": 7.910628019323671e-08, | |
| "logits/chosen": 0.359174907207489, | |
| "logits/rejected": 0.3385276198387146, | |
| "logps/chosen": -429.9090270996094, | |
| "logps/rejected": -362.47857666015625, | |
| "loss": 0.4715, | |
| "rewards/accuracies": 0.762499988079071, | |
| "rewards/chosen": -0.23555055260658264, | |
| "rewards/margins": 1.2514636516571045, | |
| "rewards/rejected": -1.4870140552520752, | |
| "step": 790 | |
| }, | |
| { | |
| "epoch": 0.87, | |
| "learning_rate": 7.306763285024155e-08, | |
| "logits/chosen": -0.1668163537979126, | |
| "logits/rejected": 0.2065034657716751, | |
| "logps/chosen": -394.82257080078125, | |
| "logps/rejected": -394.52142333984375, | |
| "loss": 0.3909, | |
| "rewards/accuracies": 0.762499988079071, | |
| "rewards/chosen": -0.4898454248905182, | |
| "rewards/margins": 1.7736278772354126, | |
| "rewards/rejected": -2.2634730339050293, | |
| "step": 800 | |
| }, | |
| { | |
| "epoch": 0.87, | |
| "eval_logits/chosen": -0.773809552192688, | |
| "eval_logits/rejected": -0.84580397605896, | |
| "eval_logps/chosen": -396.8601989746094, | |
| "eval_logps/rejected": -409.49676513671875, | |
| "eval_loss": 0.41672879457473755, | |
| "eval_rewards/accuracies": 0.8134920597076416, | |
| "eval_rewards/chosen": -0.22731684148311615, | |
| "eval_rewards/margins": 2.0872485637664795, | |
| "eval_rewards/rejected": -2.3145651817321777, | |
| "eval_runtime": 391.0713, | |
| "eval_samples_per_second": 5.114, | |
| "eval_steps_per_second": 0.161, | |
| "step": 800 | |
| }, | |
| { | |
| "epoch": 0.88, | |
| "learning_rate": 6.702898550724638e-08, | |
| "logits/chosen": -0.8055841326713562, | |
| "logits/rejected": -0.8581466674804688, | |
| "logps/chosen": -434.89300537109375, | |
| "logps/rejected": -407.73486328125, | |
| "loss": 0.4191, | |
| "rewards/accuracies": 0.824999988079071, | |
| "rewards/chosen": -0.5859915018081665, | |
| "rewards/margins": 1.8876674175262451, | |
| "rewards/rejected": -2.473658800125122, | |
| "step": 810 | |
| }, | |
| { | |
| "epoch": 0.89, | |
| "learning_rate": 6.09903381642512e-08, | |
| "logits/chosen": -0.6149953603744507, | |
| "logits/rejected": -0.6610177755355835, | |
| "logps/chosen": -403.4602355957031, | |
| "logps/rejected": -412.859619140625, | |
| "loss": 0.3632, | |
| "rewards/accuracies": 0.8500000238418579, | |
| "rewards/chosen": 0.0069200992584228516, | |
| "rewards/margins": 2.4245333671569824, | |
| "rewards/rejected": -2.4176132678985596, | |
| "step": 820 | |
| }, | |
| { | |
| "epoch": 0.9, | |
| "learning_rate": 5.4951690821256036e-08, | |
| "logits/chosen": -0.7099069356918335, | |
| "logits/rejected": -0.5322675704956055, | |
| "logps/chosen": -477.89080810546875, | |
| "logps/rejected": -509.3751525878906, | |
| "loss": 0.4242, | |
| "rewards/accuracies": 0.862500011920929, | |
| "rewards/chosen": 0.04087377339601517, | |
| "rewards/margins": 2.5105957984924316, | |
| "rewards/rejected": -2.469722270965576, | |
| "step": 830 | |
| }, | |
| { | |
| "epoch": 0.91, | |
| "learning_rate": 4.891304347826087e-08, | |
| "logits/chosen": -0.8323551416397095, | |
| "logits/rejected": -0.5835217833518982, | |
| "logps/chosen": -344.49261474609375, | |
| "logps/rejected": -416.62738037109375, | |
| "loss": 0.4114, | |
| "rewards/accuracies": 0.7250000238418579, | |
| "rewards/chosen": -0.43376049399375916, | |
| "rewards/margins": 1.836294412612915, | |
| "rewards/rejected": -2.270055055618286, | |
| "step": 840 | |
| }, | |
| { | |
| "epoch": 0.92, | |
| "learning_rate": 4.28743961352657e-08, | |
| "logits/chosen": -0.8896607160568237, | |
| "logits/rejected": -0.9991308450698853, | |
| "logps/chosen": -429.5093688964844, | |
| "logps/rejected": -462.163330078125, | |
| "loss": 0.4213, | |
| "rewards/accuracies": 0.7749999761581421, | |
| "rewards/chosen": 0.14300577342510223, | |
| "rewards/margins": 2.3328614234924316, | |
| "rewards/rejected": -2.1898555755615234, | |
| "step": 850 | |
| }, | |
| { | |
| "epoch": 0.93, | |
| "learning_rate": 3.6835748792270526e-08, | |
| "logits/chosen": -1.5217692852020264, | |
| "logits/rejected": -1.4032458066940308, | |
| "logps/chosen": -445.5403747558594, | |
| "logps/rejected": -484.6028747558594, | |
| "loss": 0.4084, | |
| "rewards/accuracies": 0.8125, | |
| "rewards/chosen": -0.3494121730327606, | |
| "rewards/margins": 1.832707166671753, | |
| "rewards/rejected": -2.182119369506836, | |
| "step": 860 | |
| }, | |
| { | |
| "epoch": 0.94, | |
| "learning_rate": 3.0797101449275364e-08, | |
| "logits/chosen": -1.3731000423431396, | |
| "logits/rejected": -1.262548804283142, | |
| "logps/chosen": -421.68524169921875, | |
| "logps/rejected": -415.681884765625, | |
| "loss": 0.4108, | |
| "rewards/accuracies": 0.8125, | |
| "rewards/chosen": 0.045568324625492096, | |
| "rewards/margins": 2.3908283710479736, | |
| "rewards/rejected": -2.34525990486145, | |
| "step": 870 | |
| }, | |
| { | |
| "epoch": 0.96, | |
| "learning_rate": 2.475845410628019e-08, | |
| "logits/chosen": -1.434045433998108, | |
| "logits/rejected": -1.2689034938812256, | |
| "logps/chosen": -386.4149169921875, | |
| "logps/rejected": -396.9994812011719, | |
| "loss": 0.4121, | |
| "rewards/accuracies": 0.824999988079071, | |
| "rewards/chosen": -0.10745694488286972, | |
| "rewards/margins": 2.171525478363037, | |
| "rewards/rejected": -2.278982400894165, | |
| "step": 880 | |
| }, | |
| { | |
| "epoch": 0.97, | |
| "learning_rate": 1.8719806763285022e-08, | |
| "logits/chosen": -1.6423695087432861, | |
| "logits/rejected": -1.5149376392364502, | |
| "logps/chosen": -413.06158447265625, | |
| "logps/rejected": -437.2244567871094, | |
| "loss": 0.3935, | |
| "rewards/accuracies": 0.8125, | |
| "rewards/chosen": -0.15360334515571594, | |
| "rewards/margins": 2.0728042125701904, | |
| "rewards/rejected": -2.226407527923584, | |
| "step": 890 | |
| }, | |
| { | |
| "epoch": 0.98, | |
| "learning_rate": 1.2681159420289856e-08, | |
| "logits/chosen": -0.9922491312026978, | |
| "logits/rejected": -1.0585081577301025, | |
| "logps/chosen": -401.11627197265625, | |
| "logps/rejected": -402.2158508300781, | |
| "loss": 0.3954, | |
| "rewards/accuracies": 0.800000011920929, | |
| "rewards/chosen": -0.1775648146867752, | |
| "rewards/margins": 2.2243430614471436, | |
| "rewards/rejected": -2.4019076824188232, | |
| "step": 900 | |
| }, | |
| { | |
| "epoch": 0.99, | |
| "learning_rate": 6.642512077294686e-09, | |
| "logits/chosen": 0.17671330273151398, | |
| "logits/rejected": 0.08089754730463028, | |
| "logps/chosen": -331.13409423828125, | |
| "logps/rejected": -373.9659729003906, | |
| "loss": 0.3714, | |
| "rewards/accuracies": 0.7749999761581421, | |
| "rewards/chosen": -0.24092864990234375, | |
| "rewards/margins": 1.8302351236343384, | |
| "rewards/rejected": -2.0711638927459717, | |
| "step": 910 | |
| }, | |
| { | |
| "epoch": 1.0, | |
| "learning_rate": 6.038647342995168e-10, | |
| "logits/chosen": -0.6910379528999329, | |
| "logits/rejected": -0.7387306094169617, | |
| "logps/chosen": -440.9806213378906, | |
| "logps/rejected": -403.29083251953125, | |
| "loss": 0.3947, | |
| "rewards/accuracies": 0.8500000238418579, | |
| "rewards/chosen": -0.311662882566452, | |
| "rewards/margins": 2.3290438652038574, | |
| "rewards/rejected": -2.640706777572632, | |
| "step": 920 | |
| }, | |
| { | |
| "epoch": 1.0, | |
| "step": 921, | |
| "total_flos": 0.0, | |
| "train_loss": 0.4461688995361328, | |
| "train_runtime": 44067.2139, | |
| "train_samples_per_second": 1.337, | |
| "train_steps_per_second": 0.021 | |
| } | |
| ], | |
| "logging_steps": 10, | |
| "max_steps": 921, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 1, | |
| "save_steps": 500, | |
| "total_flos": 0.0, | |
| "train_batch_size": 8, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |