| { | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 2.0, | |
| "eval_steps": 1000, | |
| "global_step": 1638, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "epoch": 0.001221001221001221, | |
| "grad_norm": 3.104700246084015, | |
| "learning_rate": 3.048780487804878e-09, | |
| "logits/chosen": -2.611332893371582, | |
| "logits/rejected": -2.6034297943115234, | |
| "logps/chosen": -424.76251220703125, | |
| "logps/rejected": -401.40936279296875, | |
| "loss": 0.6931, | |
| "rewards/accuracies": 0.0, | |
| "rewards/chosen": 0.0, | |
| "rewards/margins": 0.0, | |
| "rewards/rejected": 0.0, | |
| "step": 1 | |
| }, | |
| { | |
| "epoch": 0.01221001221001221, | |
| "grad_norm": 3.069950647256397, | |
| "learning_rate": 3.048780487804878e-08, | |
| "logits/chosen": -2.483457326889038, | |
| "logits/rejected": -2.5166726112365723, | |
| "logps/chosen": -394.9632873535156, | |
| "logps/rejected": -370.91741943359375, | |
| "loss": 0.6932, | |
| "rewards/accuracies": 0.4444444477558136, | |
| "rewards/chosen": -0.00035367117379792035, | |
| "rewards/margins": -0.00030399090610444546, | |
| "rewards/rejected": -4.968021312379278e-05, | |
| "step": 10 | |
| }, | |
| { | |
| "epoch": 0.02442002442002442, | |
| "grad_norm": 3.064473375199238, | |
| "learning_rate": 6.097560975609756e-08, | |
| "logits/chosen": -2.4844443798065186, | |
| "logits/rejected": -2.5088613033294678, | |
| "logps/chosen": -393.47589111328125, | |
| "logps/rejected": -384.7496337890625, | |
| "loss": 0.6931, | |
| "rewards/accuracies": 0.5062500238418579, | |
| "rewards/chosen": -0.0005507160676643252, | |
| "rewards/margins": -0.0003282243851572275, | |
| "rewards/rejected": -0.00022249165340326726, | |
| "step": 20 | |
| }, | |
| { | |
| "epoch": 0.03663003663003663, | |
| "grad_norm": 2.881896681708338, | |
| "learning_rate": 9.146341463414634e-08, | |
| "logits/chosen": -2.478980779647827, | |
| "logits/rejected": -2.4746451377868652, | |
| "logps/chosen": -400.7593994140625, | |
| "logps/rejected": -374.46099853515625, | |
| "loss": 0.693, | |
| "rewards/accuracies": 0.53125, | |
| "rewards/chosen": 0.0005641734460368752, | |
| "rewards/margins": 0.00045302818762138486, | |
| "rewards/rejected": 0.00011114527296740562, | |
| "step": 30 | |
| }, | |
| { | |
| "epoch": 0.04884004884004884, | |
| "grad_norm": 3.0295854115026777, | |
| "learning_rate": 1.219512195121951e-07, | |
| "logits/chosen": -2.462890386581421, | |
| "logits/rejected": -2.483200788497925, | |
| "logps/chosen": -397.7459411621094, | |
| "logps/rejected": -385.4634704589844, | |
| "loss": 0.6922, | |
| "rewards/accuracies": 0.5625, | |
| "rewards/chosen": 0.0019774003885686398, | |
| "rewards/margins": 0.0016252705827355385, | |
| "rewards/rejected": 0.00035212995135225356, | |
| "step": 40 | |
| }, | |
| { | |
| "epoch": 0.06105006105006105, | |
| "grad_norm": 2.9757197429390847, | |
| "learning_rate": 1.524390243902439e-07, | |
| "logits/chosen": -2.4540820121765137, | |
| "logits/rejected": -2.4638664722442627, | |
| "logps/chosen": -391.7041931152344, | |
| "logps/rejected": -370.6739196777344, | |
| "loss": 0.6906, | |
| "rewards/accuracies": 0.6656249761581421, | |
| "rewards/chosen": 0.005458775907754898, | |
| "rewards/margins": 0.004881708417087793, | |
| "rewards/rejected": 0.0005770674324594438, | |
| "step": 50 | |
| }, | |
| { | |
| "epoch": 0.07326007326007326, | |
| "grad_norm": 3.0702933396650827, | |
| "learning_rate": 1.8292682926829268e-07, | |
| "logits/chosen": -2.453782796859741, | |
| "logits/rejected": -2.4780030250549316, | |
| "logps/chosen": -375.0745544433594, | |
| "logps/rejected": -376.1570739746094, | |
| "loss": 0.6872, | |
| "rewards/accuracies": 0.7281249761581421, | |
| "rewards/chosen": 0.011652452871203423, | |
| "rewards/margins": 0.010647130198776722, | |
| "rewards/rejected": 0.0010053229052573442, | |
| "step": 60 | |
| }, | |
| { | |
| "epoch": 0.08547008547008547, | |
| "grad_norm": 2.9490982076861703, | |
| "learning_rate": 2.1341463414634144e-07, | |
| "logits/chosen": -2.5242810249328613, | |
| "logits/rejected": -2.513012170791626, | |
| "logps/chosen": -402.56036376953125, | |
| "logps/rejected": -372.23004150390625, | |
| "loss": 0.6833, | |
| "rewards/accuracies": 0.7906249761581421, | |
| "rewards/chosen": 0.01956186816096306, | |
| "rewards/margins": 0.018751125782728195, | |
| "rewards/rejected": 0.0008107417961582541, | |
| "step": 70 | |
| }, | |
| { | |
| "epoch": 0.09768009768009768, | |
| "grad_norm": 2.995960567198045, | |
| "learning_rate": 2.439024390243902e-07, | |
| "logits/chosen": -2.4664769172668457, | |
| "logits/rejected": -2.512596607208252, | |
| "logps/chosen": -388.77276611328125, | |
| "logps/rejected": -386.55120849609375, | |
| "loss": 0.6754, | |
| "rewards/accuracies": 0.8062499761581421, | |
| "rewards/chosen": 0.0328640341758728, | |
| "rewards/margins": 0.035563308745622635, | |
| "rewards/rejected": -0.0026992757339030504, | |
| "step": 80 | |
| }, | |
| { | |
| "epoch": 0.10989010989010989, | |
| "grad_norm": 3.0842908329196925, | |
| "learning_rate": 2.7439024390243906e-07, | |
| "logits/chosen": -2.583833932876587, | |
| "logits/rejected": -2.6459927558898926, | |
| "logps/chosen": -411.39117431640625, | |
| "logps/rejected": -416.40771484375, | |
| "loss": 0.6616, | |
| "rewards/accuracies": 0.809374988079071, | |
| "rewards/chosen": 0.04710187762975693, | |
| "rewards/margins": 0.06961944699287415, | |
| "rewards/rejected": -0.022517573088407516, | |
| "step": 90 | |
| }, | |
| { | |
| "epoch": 0.1221001221001221, | |
| "grad_norm": 2.588661111129042, | |
| "learning_rate": 3.048780487804878e-07, | |
| "logits/chosen": -2.5411438941955566, | |
| "logits/rejected": -2.5637898445129395, | |
| "logps/chosen": -382.1836853027344, | |
| "logps/rejected": -380.8400573730469, | |
| "loss": 0.6506, | |
| "rewards/accuracies": 0.824999988079071, | |
| "rewards/chosen": 0.053000353276729584, | |
| "rewards/margins": 0.09493989497423172, | |
| "rewards/rejected": -0.04193953052163124, | |
| "step": 100 | |
| }, | |
| { | |
| "epoch": 0.1343101343101343, | |
| "grad_norm": 2.921339715123654, | |
| "learning_rate": 3.353658536585366e-07, | |
| "logits/chosen": -2.5204200744628906, | |
| "logits/rejected": -2.5622482299804688, | |
| "logps/chosen": -393.4702453613281, | |
| "logps/rejected": -395.1978454589844, | |
| "loss": 0.6293, | |
| "rewards/accuracies": 0.8062499761581421, | |
| "rewards/chosen": 0.03344795107841492, | |
| "rewards/margins": 0.16028472781181335, | |
| "rewards/rejected": -0.12683679163455963, | |
| "step": 110 | |
| }, | |
| { | |
| "epoch": 0.14652014652014653, | |
| "grad_norm": 2.963460425484982, | |
| "learning_rate": 3.6585365853658536e-07, | |
| "logits/chosen": -2.508514881134033, | |
| "logits/rejected": -2.5441009998321533, | |
| "logps/chosen": -383.43133544921875, | |
| "logps/rejected": -407.09210205078125, | |
| "loss": 0.5887, | |
| "rewards/accuracies": 0.809374988079071, | |
| "rewards/chosen": -0.013098609633743763, | |
| "rewards/margins": 0.2991253733634949, | |
| "rewards/rejected": -0.31222397089004517, | |
| "step": 120 | |
| }, | |
| { | |
| "epoch": 0.15873015873015872, | |
| "grad_norm": 3.0254473498968553, | |
| "learning_rate": 3.9634146341463414e-07, | |
| "logits/chosen": -2.542419910430908, | |
| "logits/rejected": -2.5631086826324463, | |
| "logps/chosen": -407.2526550292969, | |
| "logps/rejected": -442.8262634277344, | |
| "loss": 0.5587, | |
| "rewards/accuracies": 0.800000011920929, | |
| "rewards/chosen": -0.11188334226608276, | |
| "rewards/margins": 0.4068102240562439, | |
| "rewards/rejected": -0.5186935663223267, | |
| "step": 130 | |
| }, | |
| { | |
| "epoch": 0.17094017094017094, | |
| "grad_norm": 3.612015672751943, | |
| "learning_rate": 4.268292682926829e-07, | |
| "logits/chosen": -2.5460963249206543, | |
| "logits/rejected": -2.595196485519409, | |
| "logps/chosen": -435.4662170410156, | |
| "logps/rejected": -484.08270263671875, | |
| "loss": 0.5266, | |
| "rewards/accuracies": 0.796875, | |
| "rewards/chosen": -0.2931057810783386, | |
| "rewards/margins": 0.5774662494659424, | |
| "rewards/rejected": -0.8705719709396362, | |
| "step": 140 | |
| }, | |
| { | |
| "epoch": 0.18315018315018314, | |
| "grad_norm": 4.357042344454044, | |
| "learning_rate": 4.573170731707317e-07, | |
| "logits/chosen": -2.4378104209899902, | |
| "logits/rejected": -2.4749557971954346, | |
| "logps/chosen": -473.6615295410156, | |
| "logps/rejected": -519.7752685546875, | |
| "loss": 0.5035, | |
| "rewards/accuracies": 0.8062499761581421, | |
| "rewards/chosen": -0.6644935607910156, | |
| "rewards/margins": 0.7163723707199097, | |
| "rewards/rejected": -1.3808658123016357, | |
| "step": 150 | |
| }, | |
| { | |
| "epoch": 0.19536019536019536, | |
| "grad_norm": 4.657915772314801, | |
| "learning_rate": 4.878048780487804e-07, | |
| "logits/chosen": -2.3850626945495605, | |
| "logits/rejected": -2.404574394226074, | |
| "logps/chosen": -475.3724060058594, | |
| "logps/rejected": -554.40576171875, | |
| "loss": 0.4585, | |
| "rewards/accuracies": 0.793749988079071, | |
| "rewards/chosen": -0.9302708506584167, | |
| "rewards/margins": 0.9146777987480164, | |
| "rewards/rejected": -1.8449485301971436, | |
| "step": 160 | |
| }, | |
| { | |
| "epoch": 0.20757020757020758, | |
| "grad_norm": 5.145238611541166, | |
| "learning_rate": 4.999795585653115e-07, | |
| "logits/chosen": -2.4656128883361816, | |
| "logits/rejected": -2.5041518211364746, | |
| "logps/chosen": -500.9999084472656, | |
| "logps/rejected": -621.8275146484375, | |
| "loss": 0.4203, | |
| "rewards/accuracies": 0.846875011920929, | |
| "rewards/chosen": -1.0742014646530151, | |
| "rewards/margins": 1.363214135169983, | |
| "rewards/rejected": -2.437415599822998, | |
| "step": 170 | |
| }, | |
| { | |
| "epoch": 0.21978021978021978, | |
| "grad_norm": 5.498621709371294, | |
| "learning_rate": 4.998546507921325e-07, | |
| "logits/chosen": -2.3531060218811035, | |
| "logits/rejected": -2.3719255924224854, | |
| "logps/chosen": -516.4880981445312, | |
| "logps/rejected": -656.3202514648438, | |
| "loss": 0.3875, | |
| "rewards/accuracies": 0.8125, | |
| "rewards/chosen": -1.3029719591140747, | |
| "rewards/margins": 1.5469024181365967, | |
| "rewards/rejected": -2.8498740196228027, | |
| "step": 180 | |
| }, | |
| { | |
| "epoch": 0.231990231990232, | |
| "grad_norm": 4.777943322815817, | |
| "learning_rate": 4.996162482680895e-07, | |
| "logits/chosen": -2.383643388748169, | |
| "logits/rejected": -2.390869617462158, | |
| "logps/chosen": -557.1913452148438, | |
| "logps/rejected": -686.9656372070312, | |
| "loss": 0.3856, | |
| "rewards/accuracies": 0.828125, | |
| "rewards/chosen": -1.459194540977478, | |
| "rewards/margins": 1.6121686697006226, | |
| "rewards/rejected": -3.0713634490966797, | |
| "step": 190 | |
| }, | |
| { | |
| "epoch": 0.2442002442002442, | |
| "grad_norm": 5.801515238018521, | |
| "learning_rate": 4.992644592858842e-07, | |
| "logits/chosen": -2.3923563957214355, | |
| "logits/rejected": -2.444206476211548, | |
| "logps/chosen": -532.6453247070312, | |
| "logps/rejected": -670.8153076171875, | |
| "loss": 0.3645, | |
| "rewards/accuracies": 0.8187500238418579, | |
| "rewards/chosen": -1.294365406036377, | |
| "rewards/margins": 1.502074122428894, | |
| "rewards/rejected": -2.7964394092559814, | |
| "step": 200 | |
| }, | |
| { | |
| "epoch": 0.2564102564102564, | |
| "grad_norm": 7.316959768144989, | |
| "learning_rate": 4.987994436432335e-07, | |
| "logits/chosen": -2.342782497406006, | |
| "logits/rejected": -2.362506151199341, | |
| "logps/chosen": -557.2277221679688, | |
| "logps/rejected": -723.668701171875, | |
| "loss": 0.3664, | |
| "rewards/accuracies": 0.846875011920929, | |
| "rewards/chosen": -1.531425952911377, | |
| "rewards/margins": 1.8236634731292725, | |
| "rewards/rejected": -3.3550896644592285, | |
| "step": 210 | |
| }, | |
| { | |
| "epoch": 0.2686202686202686, | |
| "grad_norm": 5.84414999103754, | |
| "learning_rate": 4.982214125702845e-07, | |
| "logits/chosen": -2.363788366317749, | |
| "logits/rejected": -2.4076294898986816, | |
| "logps/chosen": -515.8175659179688, | |
| "logps/rejected": -681.577880859375, | |
| "loss": 0.3513, | |
| "rewards/accuracies": 0.856249988079071, | |
| "rewards/chosen": -1.2184154987335205, | |
| "rewards/margins": 1.8515632152557373, | |
| "rewards/rejected": -3.069979190826416, | |
| "step": 220 | |
| }, | |
| { | |
| "epoch": 0.28083028083028083, | |
| "grad_norm": 5.57281060925085, | |
| "learning_rate": 4.975306286336627e-07, | |
| "logits/chosen": -2.390061855316162, | |
| "logits/rejected": -2.4118847846984863, | |
| "logps/chosen": -558.8645629882812, | |
| "logps/rejected": -748.7266845703125, | |
| "loss": 0.3603, | |
| "rewards/accuracies": 0.824999988079071, | |
| "rewards/chosen": -1.5887969732284546, | |
| "rewards/margins": 1.980838418006897, | |
| "rewards/rejected": -3.5696358680725098, | |
| "step": 230 | |
| }, | |
| { | |
| "epoch": 0.29304029304029305, | |
| "grad_norm": 5.714762684377855, | |
| "learning_rate": 4.967274056172044e-07, | |
| "logits/chosen": -2.3940510749816895, | |
| "logits/rejected": -2.4176127910614014, | |
| "logps/chosen": -543.5166015625, | |
| "logps/rejected": -715.0543212890625, | |
| "loss": 0.3498, | |
| "rewards/accuracies": 0.84375, | |
| "rewards/chosen": -1.3804460763931274, | |
| "rewards/margins": 1.9572741985321045, | |
| "rewards/rejected": -3.3377201557159424, | |
| "step": 240 | |
| }, | |
| { | |
| "epoch": 0.3052503052503053, | |
| "grad_norm": 5.674577347175447, | |
| "learning_rate": 4.958121083794216e-07, | |
| "logits/chosen": -2.360697031021118, | |
| "logits/rejected": -2.3762993812561035, | |
| "logps/chosen": -573.5714111328125, | |
| "logps/rejected": -768.0198364257812, | |
| "loss": 0.3375, | |
| "rewards/accuracies": 0.8343750238418579, | |
| "rewards/chosen": -1.5868339538574219, | |
| "rewards/margins": 2.1389665603637695, | |
| "rewards/rejected": -3.7258007526397705, | |
| "step": 250 | |
| }, | |
| { | |
| "epoch": 0.31746031746031744, | |
| "grad_norm": 6.3452788515852365, | |
| "learning_rate": 4.947851526877681e-07, | |
| "logits/chosen": -2.3265128135681152, | |
| "logits/rejected": -2.354276180267334, | |
| "logps/chosen": -530.7965087890625, | |
| "logps/rejected": -724.9639282226562, | |
| "loss": 0.3366, | |
| "rewards/accuracies": 0.8125, | |
| "rewards/chosen": -1.4949758052825928, | |
| "rewards/margins": 2.002520799636841, | |
| "rewards/rejected": -3.4974968433380127, | |
| "step": 260 | |
| }, | |
| { | |
| "epoch": 0.32967032967032966, | |
| "grad_norm": 6.281289415201168, | |
| "learning_rate": 4.936470050297798e-07, | |
| "logits/chosen": -2.301875352859497, | |
| "logits/rejected": -2.3351759910583496, | |
| "logps/chosen": -551.602294921875, | |
| "logps/rejected": -743.7047119140625, | |
| "loss": 0.3241, | |
| "rewards/accuracies": 0.8343750238418579, | |
| "rewards/chosen": -1.5574215650558472, | |
| "rewards/margins": 2.1133522987365723, | |
| "rewards/rejected": -3.67077374458313, | |
| "step": 270 | |
| }, | |
| { | |
| "epoch": 0.3418803418803419, | |
| "grad_norm": 6.620858897078139, | |
| "learning_rate": 4.92398182401176e-07, | |
| "logits/chosen": -2.3097803592681885, | |
| "logits/rejected": -2.3185763359069824, | |
| "logps/chosen": -573.0301513671875, | |
| "logps/rejected": -778.9459228515625, | |
| "loss": 0.3358, | |
| "rewards/accuracies": 0.871874988079071, | |
| "rewards/chosen": -1.798607587814331, | |
| "rewards/margins": 2.2159924507141113, | |
| "rewards/rejected": -4.0146002769470215, | |
| "step": 280 | |
| }, | |
| { | |
| "epoch": 0.3540903540903541, | |
| "grad_norm": 6.930259744533563, | |
| "learning_rate": 4.910392520710174e-07, | |
| "logits/chosen": -2.301408529281616, | |
| "logits/rejected": -2.3531241416931152, | |
| "logps/chosen": -536.4937133789062, | |
| "logps/rejected": -733.7653198242188, | |
| "loss": 0.3233, | |
| "rewards/accuracies": 0.846875011920929, | |
| "rewards/chosen": -1.5255537033081055, | |
| "rewards/margins": 2.0018668174743652, | |
| "rewards/rejected": -3.52742075920105, | |
| "step": 290 | |
| }, | |
| { | |
| "epoch": 0.3663003663003663, | |
| "grad_norm": 6.863827814496594, | |
| "learning_rate": 4.895708313240285e-07, | |
| "logits/chosen": -2.3684794902801514, | |
| "logits/rejected": -2.3983054161071777, | |
| "logps/chosen": -560.7042236328125, | |
| "logps/rejected": -786.914794921875, | |
| "loss": 0.3143, | |
| "rewards/accuracies": 0.878125011920929, | |
| "rewards/chosen": -1.5986013412475586, | |
| "rewards/margins": 2.3771414756774902, | |
| "rewards/rejected": -3.9757423400878906, | |
| "step": 300 | |
| }, | |
| { | |
| "epoch": 0.3785103785103785, | |
| "grad_norm": 6.618762045262814, | |
| "learning_rate": 4.879935871802001e-07, | |
| "logits/chosen": -2.340313196182251, | |
| "logits/rejected": -2.348755121231079, | |
| "logps/chosen": -555.6011962890625, | |
| "logps/rejected": -804.3768310546875, | |
| "loss": 0.3199, | |
| "rewards/accuracies": 0.875, | |
| "rewards/chosen": -1.6778042316436768, | |
| "rewards/margins": 2.592184543609619, | |
| "rewards/rejected": -4.269989013671875, | |
| "step": 310 | |
| }, | |
| { | |
| "epoch": 0.3907203907203907, | |
| "grad_norm": 6.104172520347164, | |
| "learning_rate": 4.863082360917998e-07, | |
| "logits/chosen": -2.300529956817627, | |
| "logits/rejected": -2.3536949157714844, | |
| "logps/chosen": -565.5293579101562, | |
| "logps/rejected": -806.9231567382812, | |
| "loss": 0.3146, | |
| "rewards/accuracies": 0.856249988079071, | |
| "rewards/chosen": -1.7859094142913818, | |
| "rewards/margins": 2.411153793334961, | |
| "rewards/rejected": -4.19706392288208, | |
| "step": 320 | |
| }, | |
| { | |
| "epoch": 0.40293040293040294, | |
| "grad_norm": 13.362839259142513, | |
| "learning_rate": 4.845155436179286e-07, | |
| "logits/chosen": -2.2448084354400635, | |
| "logits/rejected": -2.265852451324463, | |
| "logps/chosen": -558.632080078125, | |
| "logps/rejected": -792.7662963867188, | |
| "loss": 0.2963, | |
| "rewards/accuracies": 0.8687499761581421, | |
| "rewards/chosen": -1.8193552494049072, | |
| "rewards/margins": 2.476144790649414, | |
| "rewards/rejected": -4.2955002784729, | |
| "step": 330 | |
| }, | |
| { | |
| "epoch": 0.41514041514041516, | |
| "grad_norm": 8.193239382667205, | |
| "learning_rate": 4.826163240767716e-07, | |
| "logits/chosen": -2.2685139179229736, | |
| "logits/rejected": -2.291156053543091, | |
| "logps/chosen": -578.7394409179688, | |
| "logps/rejected": -828.9251098632812, | |
| "loss": 0.2845, | |
| "rewards/accuracies": 0.909375011920929, | |
| "rewards/chosen": -1.7241359949111938, | |
| "rewards/margins": 2.655815601348877, | |
| "rewards/rejected": -4.379951000213623, | |
| "step": 340 | |
| }, | |
| { | |
| "epoch": 0.42735042735042733, | |
| "grad_norm": 11.339490692180462, | |
| "learning_rate": 4.806114401756988e-07, | |
| "logits/chosen": -2.278423309326172, | |
| "logits/rejected": -2.281816244125366, | |
| "logps/chosen": -609.6286010742188, | |
| "logps/rejected": -881.9080810546875, | |
| "loss": 0.3005, | |
| "rewards/accuracies": 0.887499988079071, | |
| "rewards/chosen": -2.0114948749542236, | |
| "rewards/margins": 2.8581748008728027, | |
| "rewards/rejected": -4.8696699142456055, | |
| "step": 350 | |
| }, | |
| { | |
| "epoch": 0.43956043956043955, | |
| "grad_norm": 7.458240778141653, | |
| "learning_rate": 4.785018026193862e-07, | |
| "logits/chosen": -2.3067989349365234, | |
| "logits/rejected": -2.302696943283081, | |
| "logps/chosen": -579.6629638671875, | |
| "logps/rejected": -773.7774658203125, | |
| "loss": 0.3119, | |
| "rewards/accuracies": 0.8687499761581421, | |
| "rewards/chosen": -1.6714990139007568, | |
| "rewards/margins": 2.2192506790161133, | |
| "rewards/rejected": -3.890749454498291, | |
| "step": 360 | |
| }, | |
| { | |
| "epoch": 0.4517704517704518, | |
| "grad_norm": 7.753160256439322, | |
| "learning_rate": 4.762883696961353e-07, | |
| "logits/chosen": -2.2718212604522705, | |
| "logits/rejected": -2.275289535522461, | |
| "logps/chosen": -606.1307373046875, | |
| "logps/rejected": -799.2538452148438, | |
| "loss": 0.2968, | |
| "rewards/accuracies": 0.8687499761581421, | |
| "rewards/chosen": -2.069481134414673, | |
| "rewards/margins": 2.2473983764648438, | |
| "rewards/rejected": -4.316879749298096, | |
| "step": 370 | |
| }, | |
| { | |
| "epoch": 0.463980463980464, | |
| "grad_norm": 7.668480163136406, | |
| "learning_rate": 4.739721468425763e-07, | |
| "logits/chosen": -2.196530342102051, | |
| "logits/rejected": -2.2193641662597656, | |
| "logps/chosen": -598.3923950195312, | |
| "logps/rejected": -832.3982543945312, | |
| "loss": 0.2843, | |
| "rewards/accuracies": 0.840624988079071, | |
| "rewards/chosen": -2.077108383178711, | |
| "rewards/margins": 2.470067024230957, | |
| "rewards/rejected": -4.547175407409668, | |
| "step": 380 | |
| }, | |
| { | |
| "epoch": 0.47619047619047616, | |
| "grad_norm": 7.295306779149404, | |
| "learning_rate": 4.715541861869562e-07, | |
| "logits/chosen": -2.260920763015747, | |
| "logits/rejected": -2.2953712940216064, | |
| "logps/chosen": -582.3689575195312, | |
| "logps/rejected": -889.9528198242188, | |
| "loss": 0.2779, | |
| "rewards/accuracies": 0.8968750238418579, | |
| "rewards/chosen": -1.7534271478652954, | |
| "rewards/margins": 3.1180617809295654, | |
| "rewards/rejected": -4.871488571166992, | |
| "step": 390 | |
| }, | |
| { | |
| "epoch": 0.4884004884004884, | |
| "grad_norm": 8.066116525077385, | |
| "learning_rate": 4.690355860712163e-07, | |
| "logits/chosen": -2.222865581512451, | |
| "logits/rejected": -2.278761386871338, | |
| "logps/chosen": -597.5587158203125, | |
| "logps/rejected": -863.9420776367188, | |
| "loss": 0.2852, | |
| "rewards/accuracies": 0.893750011920929, | |
| "rewards/chosen": -1.9404022693634033, | |
| "rewards/margins": 2.7501840591430664, | |
| "rewards/rejected": -4.690586090087891, | |
| "step": 400 | |
| }, | |
| { | |
| "epoch": 0.5006105006105006, | |
| "grad_norm": 7.116028285618711, | |
| "learning_rate": 4.664174905520782e-07, | |
| "logits/chosen": -2.1432394981384277, | |
| "logits/rejected": -2.1721110343933105, | |
| "logps/chosen": -567.879638671875, | |
| "logps/rejected": -829.4773559570312, | |
| "loss": 0.2967, | |
| "rewards/accuracies": 0.846875011920929, | |
| "rewards/chosen": -1.8987783193588257, | |
| "rewards/margins": 2.688133716583252, | |
| "rewards/rejected": -4.586911678314209, | |
| "step": 410 | |
| }, | |
| { | |
| "epoch": 0.5128205128205128, | |
| "grad_norm": 7.04360607650549, | |
| "learning_rate": 4.637010888813638e-07, | |
| "logits/chosen": -2.182570695877075, | |
| "logits/rejected": -2.1820969581604004, | |
| "logps/chosen": -577.9962768554688, | |
| "logps/rejected": -821.7763671875, | |
| "loss": 0.2965, | |
| "rewards/accuracies": 0.84375, | |
| "rewards/chosen": -1.8270905017852783, | |
| "rewards/margins": 2.6087398529052734, | |
| "rewards/rejected": -4.435830116271973, | |
| "step": 420 | |
| }, | |
| { | |
| "epoch": 0.525030525030525, | |
| "grad_norm": 8.266012865923875, | |
| "learning_rate": 4.608876149657862e-07, | |
| "logits/chosen": -2.1726815700531006, | |
| "logits/rejected": -2.179741621017456, | |
| "logps/chosen": -615.1981201171875, | |
| "logps/rejected": -858.2872314453125, | |
| "loss": 0.2806, | |
| "rewards/accuracies": 0.9125000238418579, | |
| "rewards/chosen": -2.058166980743408, | |
| "rewards/margins": 2.7252743244171143, | |
| "rewards/rejected": -4.783441066741943, | |
| "step": 430 | |
| }, | |
| { | |
| "epoch": 0.5372405372405372, | |
| "grad_norm": 7.654156099725738, | |
| "learning_rate": 4.5797834680645553e-07, | |
| "logits/chosen": -2.149817943572998, | |
| "logits/rejected": -2.1750922203063965, | |
| "logps/chosen": -626.6456909179688, | |
| "logps/rejected": -924.1580810546875, | |
| "loss": 0.2855, | |
| "rewards/accuracies": 0.887499988079071, | |
| "rewards/chosen": -2.245823621749878, | |
| "rewards/margins": 3.026350736618042, | |
| "rewards/rejected": -5.27217435836792, | |
| "step": 440 | |
| }, | |
| { | |
| "epoch": 0.5494505494505495, | |
| "grad_norm": 7.2185121675912445, | |
| "learning_rate": 4.549746059183561e-07, | |
| "logits/chosen": -2.134995460510254, | |
| "logits/rejected": -2.1586225032806396, | |
| "logps/chosen": -588.483642578125, | |
| "logps/rejected": -902.3905029296875, | |
| "loss": 0.2651, | |
| "rewards/accuracies": 0.893750011920929, | |
| "rewards/chosen": -2.05491304397583, | |
| "rewards/margins": 3.2267088890075684, | |
| "rewards/rejected": -5.281621932983398, | |
| "step": 450 | |
| }, | |
| { | |
| "epoch": 0.5616605616605617, | |
| "grad_norm": 8.705469465511992, | |
| "learning_rate": 4.5187775673005744e-07, | |
| "logits/chosen": -2.0956122875213623, | |
| "logits/rejected": -2.122889757156372, | |
| "logps/chosen": -614.2383422851562, | |
| "logps/rejected": -854.7008666992188, | |
| "loss": 0.2706, | |
| "rewards/accuracies": 0.875, | |
| "rewards/chosen": -2.2875373363494873, | |
| "rewards/margins": 2.5731403827667236, | |
| "rewards/rejected": -4.860678195953369, | |
| "step": 460 | |
| }, | |
| { | |
| "epoch": 0.5738705738705738, | |
| "grad_norm": 6.674714090077302, | |
| "learning_rate": 4.4868920596393197e-07, | |
| "logits/chosen": -2.122455596923828, | |
| "logits/rejected": -2.1570394039154053, | |
| "logps/chosen": -628.7379150390625, | |
| "logps/rejected": -913.1719970703125, | |
| "loss": 0.2635, | |
| "rewards/accuracies": 0.8531249761581421, | |
| "rewards/chosen": -2.345153331756592, | |
| "rewards/margins": 2.962648630142212, | |
| "rewards/rejected": -5.307801723480225, | |
| "step": 470 | |
| }, | |
| { | |
| "epoch": 0.5860805860805861, | |
| "grad_norm": 10.484528534722092, | |
| "learning_rate": 4.4541040199716063e-07, | |
| "logits/chosen": -2.1315033435821533, | |
| "logits/rejected": -2.133057117462158, | |
| "logps/chosen": -652.1898803710938, | |
| "logps/rejected": -918.3726806640625, | |
| "loss": 0.2841, | |
| "rewards/accuracies": 0.887499988079071, | |
| "rewards/chosen": -2.442749261856079, | |
| "rewards/margins": 2.9514050483703613, | |
| "rewards/rejected": -5.3941545486450195, | |
| "step": 480 | |
| }, | |
| { | |
| "epoch": 0.5982905982905983, | |
| "grad_norm": 6.764572032034108, | |
| "learning_rate": 4.4204283420381827e-07, | |
| "logits/chosen": -2.0328996181488037, | |
| "logits/rejected": -2.0834460258483887, | |
| "logps/chosen": -605.2128295898438, | |
| "logps/rejected": -897.3580322265625, | |
| "loss": 0.2652, | |
| "rewards/accuracies": 0.871874988079071, | |
| "rewards/chosen": -2.2144479751586914, | |
| "rewards/margins": 2.9639675617218018, | |
| "rewards/rejected": -5.178415775299072, | |
| "step": 490 | |
| }, | |
| { | |
| "epoch": 0.6105006105006106, | |
| "grad_norm": 7.778145469907922, | |
| "learning_rate": 4.3858803227833526e-07, | |
| "logits/chosen": -1.9765386581420898, | |
| "logits/rejected": -2.015956401824951, | |
| "logps/chosen": -608.0709838867188, | |
| "logps/rejected": -899.2232666015625, | |
| "loss": 0.2715, | |
| "rewards/accuracies": 0.90625, | |
| "rewards/chosen": -2.045722484588623, | |
| "rewards/margins": 3.0392613410949707, | |
| "rewards/rejected": -5.084983825683594, | |
| "step": 500 | |
| }, | |
| { | |
| "epoch": 0.6227106227106227, | |
| "grad_norm": 7.943002170259063, | |
| "learning_rate": 4.350475655406445e-07, | |
| "logits/chosen": -2.0267040729522705, | |
| "logits/rejected": -2.0642428398132324, | |
| "logps/chosen": -634.7288208007812, | |
| "logps/rejected": -928.9196166992188, | |
| "loss": 0.2677, | |
| "rewards/accuracies": 0.875, | |
| "rewards/chosen": -2.3875765800476074, | |
| "rewards/margins": 3.001603126525879, | |
| "rewards/rejected": -5.3891801834106445, | |
| "step": 510 | |
| }, | |
| { | |
| "epoch": 0.6349206349206349, | |
| "grad_norm": 8.177071581241291, | |
| "learning_rate": 4.314230422233286e-07, | |
| "logits/chosen": -1.9996439218521118, | |
| "logits/rejected": -2.001002788543701, | |
| "logps/chosen": -606.2533569335938, | |
| "logps/rejected": -888.8444213867188, | |
| "loss": 0.2508, | |
| "rewards/accuracies": 0.875, | |
| "rewards/chosen": -2.237772226333618, | |
| "rewards/margins": 2.8773951530456543, | |
| "rewards/rejected": -5.115168571472168, | |
| "step": 520 | |
| }, | |
| { | |
| "epoch": 0.6471306471306472, | |
| "grad_norm": 7.600982395285539, | |
| "learning_rate": 4.2771610874109166e-07, | |
| "logits/chosen": -1.9806162118911743, | |
| "logits/rejected": -2.036025285720825, | |
| "logps/chosen": -653.9401245117188, | |
| "logps/rejected": -974.8654174804688, | |
| "loss": 0.2529, | |
| "rewards/accuracies": 0.90625, | |
| "rewards/chosen": -2.513798236846924, | |
| "rewards/margins": 3.2604928016662598, | |
| "rewards/rejected": -5.774291038513184, | |
| "step": 530 | |
| }, | |
| { | |
| "epoch": 0.6593406593406593, | |
| "grad_norm": 9.082170828519892, | |
| "learning_rate": 4.2392844894288605e-07, | |
| "logits/chosen": -1.9804267883300781, | |
| "logits/rejected": -1.9906890392303467, | |
| "logps/chosen": -633.1802368164062, | |
| "logps/rejected": -931.8502197265625, | |
| "loss": 0.2571, | |
| "rewards/accuracies": 0.921875, | |
| "rewards/chosen": -2.3836662769317627, | |
| "rewards/margins": 3.0242981910705566, | |
| "rewards/rejected": -5.40796422958374, | |
| "step": 540 | |
| }, | |
| { | |
| "epoch": 0.6715506715506715, | |
| "grad_norm": 7.837053802282412, | |
| "learning_rate": 4.2006178334703636e-07, | |
| "logits/chosen": -2.006538152694702, | |
| "logits/rejected": -2.0499379634857178, | |
| "logps/chosen": -650.6314697265625, | |
| "logps/rejected": -932.8741455078125, | |
| "loss": 0.2636, | |
| "rewards/accuracies": 0.8656250238418579, | |
| "rewards/chosen": -2.4409451484680176, | |
| "rewards/margins": 3.0085086822509766, | |
| "rewards/rejected": -5.449453830718994, | |
| "step": 550 | |
| }, | |
| { | |
| "epoch": 0.6837606837606838, | |
| "grad_norm": 6.941390377752381, | |
| "learning_rate": 4.161178683597054e-07, | |
| "logits/chosen": -2.0168557167053223, | |
| "logits/rejected": -1.9983936548233032, | |
| "logps/chosen": -596.7725830078125, | |
| "logps/rejected": -862.6604614257812, | |
| "loss": 0.2596, | |
| "rewards/accuracies": 0.8812500238418579, | |
| "rewards/chosen": -2.015202283859253, | |
| "rewards/margins": 2.8551807403564453, | |
| "rewards/rejected": -4.870382785797119, | |
| "step": 560 | |
| }, | |
| { | |
| "epoch": 0.6959706959706959, | |
| "grad_norm": 9.116023944385924, | |
| "learning_rate": 4.1209849547705916e-07, | |
| "logits/chosen": -1.9668136835098267, | |
| "logits/rejected": -1.9564120769500732, | |
| "logps/chosen": -597.05419921875, | |
| "logps/rejected": -889.5735473632812, | |
| "loss": 0.2673, | |
| "rewards/accuracies": 0.9312499761581421, | |
| "rewards/chosen": -1.908453345298767, | |
| "rewards/margins": 3.2164673805236816, | |
| "rewards/rejected": -5.124920845031738, | |
| "step": 570 | |
| }, | |
| { | |
| "epoch": 0.7081807081807082, | |
| "grad_norm": 9.73618947224433, | |
| "learning_rate": 4.080054904714917e-07, | |
| "logits/chosen": -2.0258185863494873, | |
| "logits/rejected": -2.048612594604492, | |
| "logps/chosen": -642.0574951171875, | |
| "logps/rejected": -941.4654541015625, | |
| "loss": 0.2523, | |
| "rewards/accuracies": 0.893750011920929, | |
| "rewards/chosen": -2.526458263397217, | |
| "rewards/margins": 3.056541919708252, | |
| "rewards/rejected": -5.583000183105469, | |
| "step": 580 | |
| }, | |
| { | |
| "epoch": 0.7203907203907204, | |
| "grad_norm": 8.420821275679248, | |
| "learning_rate": 4.038407125622806e-07, | |
| "logits/chosen": -1.9966434240341187, | |
| "logits/rejected": -1.9993374347686768, | |
| "logps/chosen": -637.5452880859375, | |
| "logps/rejected": -916.9855346679688, | |
| "loss": 0.2664, | |
| "rewards/accuracies": 0.8812500238418579, | |
| "rewards/chosen": -2.3783979415893555, | |
| "rewards/margins": 3.0379176139831543, | |
| "rewards/rejected": -5.41631555557251, | |
| "step": 590 | |
| }, | |
| { | |
| "epoch": 0.7326007326007326, | |
| "grad_norm": 11.758932811938493, | |
| "learning_rate": 3.9960605357105e-07, | |
| "logits/chosen": -2.070080041885376, | |
| "logits/rejected": -2.069638729095459, | |
| "logps/chosen": -621.4031372070312, | |
| "logps/rejected": -907.4441528320312, | |
| "loss": 0.2693, | |
| "rewards/accuracies": 0.8843749761581421, | |
| "rewards/chosen": -2.1775975227355957, | |
| "rewards/margins": 3.1478095054626465, | |
| "rewards/rejected": -5.3254075050354, | |
| "step": 600 | |
| }, | |
| { | |
| "epoch": 0.7448107448107448, | |
| "grad_norm": 7.854542997142867, | |
| "learning_rate": 3.95303437062423e-07, | |
| "logits/chosen": -1.9713811874389648, | |
| "logits/rejected": -1.977330207824707, | |
| "logps/chosen": -642.8685913085938, | |
| "logps/rejected": -944.4246826171875, | |
| "loss": 0.2427, | |
| "rewards/accuracies": 0.909375011920929, | |
| "rewards/chosen": -2.425691604614258, | |
| "rewards/margins": 3.1849162578582764, | |
| "rewards/rejected": -5.610608100891113, | |
| "step": 610 | |
| }, | |
| { | |
| "epoch": 0.757020757020757, | |
| "grad_norm": 7.703815141377651, | |
| "learning_rate": 3.9093481747025615e-07, | |
| "logits/chosen": -1.9850308895111084, | |
| "logits/rejected": -1.9987919330596924, | |
| "logps/chosen": -668.91943359375, | |
| "logps/rejected": -992.9368896484375, | |
| "loss": 0.2573, | |
| "rewards/accuracies": 0.8968750238418579, | |
| "rewards/chosen": -2.7951459884643555, | |
| "rewards/margins": 3.3580493927001953, | |
| "rewards/rejected": -6.153195381164551, | |
| "step": 620 | |
| }, | |
| { | |
| "epoch": 0.7692307692307693, | |
| "grad_norm": 8.680106719787165, | |
| "learning_rate": 3.86502179209851e-07, | |
| "logits/chosen": -1.9663703441619873, | |
| "logits/rejected": -1.9948896169662476, | |
| "logps/chosen": -610.5440673828125, | |
| "logps/rejected": -898.1494140625, | |
| "loss": 0.2614, | |
| "rewards/accuracies": 0.8812500238418579, | |
| "rewards/chosen": -2.289752960205078, | |
| "rewards/margins": 2.931398868560791, | |
| "rewards/rejected": -5.221152305603027, | |
| "step": 630 | |
| }, | |
| { | |
| "epoch": 0.7814407814407814, | |
| "grad_norm": 10.429529355264094, | |
| "learning_rate": 3.8200753577654765e-07, | |
| "logits/chosen": -1.944758653640747, | |
| "logits/rejected": -1.981778860092163, | |
| "logps/chosen": -658.198486328125, | |
| "logps/rejected": -976.310546875, | |
| "loss": 0.252, | |
| "rewards/accuracies": 0.8999999761581421, | |
| "rewards/chosen": -2.5240378379821777, | |
| "rewards/margins": 3.3471717834472656, | |
| "rewards/rejected": -5.871209621429443, | |
| "step": 640 | |
| }, | |
| { | |
| "epoch": 0.7936507936507936, | |
| "grad_norm": 9.854306625880072, | |
| "learning_rate": 3.7745292883110784e-07, | |
| "logits/chosen": -1.9690608978271484, | |
| "logits/rejected": -1.9564428329467773, | |
| "logps/chosen": -661.0819091796875, | |
| "logps/rejected": -980.1275634765625, | |
| "loss": 0.2457, | |
| "rewards/accuracies": 0.921875, | |
| "rewards/chosen": -2.6281650066375732, | |
| "rewards/margins": 3.3420677185058594, | |
| "rewards/rejected": -5.970232963562012, | |
| "step": 650 | |
| }, | |
| { | |
| "epoch": 0.8058608058608059, | |
| "grad_norm": 10.023665069599845, | |
| "learning_rate": 3.7284042727230506e-07, | |
| "logits/chosen": -1.9493147134780884, | |
| "logits/rejected": -1.9569787979125977, | |
| "logps/chosen": -632.8013916015625, | |
| "logps/rejected": -883.5313720703125, | |
| "loss": 0.2535, | |
| "rewards/accuracies": 0.8968750238418579, | |
| "rewards/chosen": -2.2697696685791016, | |
| "rewards/margins": 2.83363676071167, | |
| "rewards/rejected": -5.1034064292907715, | |
| "step": 660 | |
| }, | |
| { | |
| "epoch": 0.818070818070818, | |
| "grad_norm": 8.667472255038128, | |
| "learning_rate": 3.681721262971413e-07, | |
| "logits/chosen": -2.0272722244262695, | |
| "logits/rejected": -2.039168119430542, | |
| "logps/chosen": -667.6822509765625, | |
| "logps/rejected": -953.4684448242188, | |
| "loss": 0.2344, | |
| "rewards/accuracies": 0.871874988079071, | |
| "rewards/chosen": -2.6374218463897705, | |
| "rewards/margins": 3.003962516784668, | |
| "rewards/rejected": -5.641384601593018, | |
| "step": 670 | |
| }, | |
| { | |
| "epoch": 0.8302808302808303, | |
| "grad_norm": 11.27138748410362, | |
| "learning_rate": 3.634501464491183e-07, | |
| "logits/chosen": -2.0476953983306885, | |
| "logits/rejected": -2.064707040786743, | |
| "logps/chosen": -683.5264282226562, | |
| "logps/rejected": -1021.5324096679688, | |
| "loss": 0.233, | |
| "rewards/accuracies": 0.9156249761581421, | |
| "rewards/chosen": -2.805328845977783, | |
| "rewards/margins": 3.454031467437744, | |
| "rewards/rejected": -6.259359836578369, | |
| "step": 680 | |
| }, | |
| { | |
| "epoch": 0.8424908424908425, | |
| "grad_norm": 10.056725367187733, | |
| "learning_rate": 3.5867663265499553e-07, | |
| "logits/chosen": -1.9634895324707031, | |
| "logits/rejected": -1.978262186050415, | |
| "logps/chosen": -688.0383911132812, | |
| "logps/rejected": -1032.1566162109375, | |
| "loss": 0.2429, | |
| "rewards/accuracies": 0.90625, | |
| "rewards/chosen": -2.8448052406311035, | |
| "rewards/margins": 3.5894615650177, | |
| "rewards/rejected": -6.434267520904541, | |
| "step": 690 | |
| }, | |
| { | |
| "epoch": 0.8547008547008547, | |
| "grad_norm": 10.779176958531542, | |
| "learning_rate": 3.5385375325047163e-07, | |
| "logits/chosen": -1.9880058765411377, | |
| "logits/rejected": -1.948150396347046, | |
| "logps/chosen": -678.7589111328125, | |
| "logps/rejected": -950.1603393554688, | |
| "loss": 0.2498, | |
| "rewards/accuracies": 0.875, | |
| "rewards/chosen": -2.7791597843170166, | |
| "rewards/margins": 2.923888921737671, | |
| "rewards/rejected": -5.7030487060546875, | |
| "step": 700 | |
| }, | |
| { | |
| "epoch": 0.8669108669108669, | |
| "grad_norm": 7.2521850880280665, | |
| "learning_rate": 3.4898369899523323e-07, | |
| "logits/chosen": -1.9596385955810547, | |
| "logits/rejected": -1.9468225240707397, | |
| "logps/chosen": -670.3238525390625, | |
| "logps/rejected": -978.1044921875, | |
| "loss": 0.245, | |
| "rewards/accuracies": 0.934374988079071, | |
| "rewards/chosen": -2.5701966285705566, | |
| "rewards/margins": 3.3597824573516846, | |
| "rewards/rejected": -5.92997932434082, | |
| "step": 710 | |
| }, | |
| { | |
| "epoch": 0.8791208791208791, | |
| "grad_norm": 8.740369578606884, | |
| "learning_rate": 3.4406868207781725e-07, | |
| "logits/chosen": -1.9431158304214478, | |
| "logits/rejected": -1.9668858051300049, | |
| "logps/chosen": -652.6699829101562, | |
| "logps/rejected": -978.8201293945312, | |
| "loss": 0.252, | |
| "rewards/accuracies": 0.8968750238418579, | |
| "rewards/chosen": -2.550971746444702, | |
| "rewards/margins": 3.3800606727600098, | |
| "rewards/rejected": -5.931032180786133, | |
| "step": 720 | |
| }, | |
| { | |
| "epoch": 0.8913308913308914, | |
| "grad_norm": 8.917488139539987, | |
| "learning_rate": 3.3911093511073984e-07, | |
| "logits/chosen": -1.9911832809448242, | |
| "logits/rejected": -2.0098257064819336, | |
| "logps/chosen": -659.505859375, | |
| "logps/rejected": -971.2593994140625, | |
| "loss": 0.2467, | |
| "rewards/accuracies": 0.909375011920929, | |
| "rewards/chosen": -2.533627986907959, | |
| "rewards/margins": 3.233253002166748, | |
| "rewards/rejected": -5.766880989074707, | |
| "step": 730 | |
| }, | |
| { | |
| "epoch": 0.9035409035409036, | |
| "grad_norm": 10.10992640831635, | |
| "learning_rate": 3.3411271011634697e-07, | |
| "logits/chosen": -1.8714303970336914, | |
| "logits/rejected": -1.8533025979995728, | |
| "logps/chosen": -610.3406372070312, | |
| "logps/rejected": -920.5524291992188, | |
| "loss": 0.2487, | |
| "rewards/accuracies": 0.9281250238418579, | |
| "rewards/chosen": -2.253237247467041, | |
| "rewards/margins": 3.4128048419952393, | |
| "rewards/rejected": -5.666041374206543, | |
| "step": 740 | |
| }, | |
| { | |
| "epoch": 0.9157509157509157, | |
| "grad_norm": 10.16349038435686, | |
| "learning_rate": 3.290762775038494e-07, | |
| "logits/chosen": -1.8609752655029297, | |
| "logits/rejected": -1.8657268285751343, | |
| "logps/chosen": -650.5752563476562, | |
| "logps/rejected": -990.2158203125, | |
| "loss": 0.2209, | |
| "rewards/accuracies": 0.918749988079071, | |
| "rewards/chosen": -2.499966859817505, | |
| "rewards/margins": 3.5357258319854736, | |
| "rewards/rejected": -6.0356926918029785, | |
| "step": 750 | |
| }, | |
| { | |
| "epoch": 0.927960927960928, | |
| "grad_norm": 7.957411909596094, | |
| "learning_rate": 3.2400392503800477e-07, | |
| "logits/chosen": -1.904550313949585, | |
| "logits/rejected": -1.8870503902435303, | |
| "logps/chosen": -671.4937744140625, | |
| "logps/rejected": -1035.06005859375, | |
| "loss": 0.2278, | |
| "rewards/accuracies": 0.925000011920929, | |
| "rewards/chosen": -2.7466225624084473, | |
| "rewards/margins": 3.689298629760742, | |
| "rewards/rejected": -6.435920715332031, | |
| "step": 760 | |
| }, | |
| { | |
| "epoch": 0.9401709401709402, | |
| "grad_norm": 10.052468847497332, | |
| "learning_rate": 3.188979567999161e-07, | |
| "logits/chosen": -1.846875786781311, | |
| "logits/rejected": -1.8232959508895874, | |
| "logps/chosen": -650.9820556640625, | |
| "logps/rejected": -956.2470703125, | |
| "loss": 0.2315, | |
| "rewards/accuracies": 0.90625, | |
| "rewards/chosen": -2.5622196197509766, | |
| "rewards/margins": 3.281222105026245, | |
| "rewards/rejected": -5.843441963195801, | |
| "step": 770 | |
| }, | |
| { | |
| "epoch": 0.9523809523809523, | |
| "grad_norm": 8.845964368891687, | |
| "learning_rate": 3.137606921404191e-07, | |
| "logits/chosen": -1.8606138229370117, | |
| "logits/rejected": -1.8387963771820068, | |
| "logps/chosen": -645.6654663085938, | |
| "logps/rejected": -939.6808471679688, | |
| "loss": 0.2448, | |
| "rewards/accuracies": 0.909375011920929, | |
| "rewards/chosen": -2.5100626945495605, | |
| "rewards/margins": 3.1421546936035156, | |
| "rewards/rejected": -5.652216911315918, | |
| "step": 780 | |
| }, | |
| { | |
| "epoch": 0.9645909645909646, | |
| "grad_norm": 8.337030706793822, | |
| "learning_rate": 3.0859446462653273e-07, | |
| "logits/chosen": -1.8315823078155518, | |
| "logits/rejected": -1.8353532552719116, | |
| "logps/chosen": -647.4951782226562, | |
| "logps/rejected": -976.1868286132812, | |
| "loss": 0.2333, | |
| "rewards/accuracies": 0.921875, | |
| "rewards/chosen": -2.6538143157958984, | |
| "rewards/margins": 3.3457908630371094, | |
| "rewards/rejected": -5.999605178833008, | |
| "step": 790 | |
| }, | |
| { | |
| "epoch": 0.9768009768009768, | |
| "grad_norm": 12.589579709309238, | |
| "learning_rate": 3.034016209814529e-07, | |
| "logits/chosen": -1.7978140115737915, | |
| "logits/rejected": -1.8211300373077393, | |
| "logps/chosen": -673.1063232421875, | |
| "logps/rejected": -1035.5140380859375, | |
| "loss": 0.2326, | |
| "rewards/accuracies": 0.90625, | |
| "rewards/chosen": -2.7855618000030518, | |
| "rewards/margins": 3.614232301712036, | |
| "rewards/rejected": -6.399794101715088, | |
| "step": 800 | |
| }, | |
| { | |
| "epoch": 0.989010989010989, | |
| "grad_norm": 8.693339105078397, | |
| "learning_rate": 2.9818452001856926e-07, | |
| "logits/chosen": -1.937139868736267, | |
| "logits/rejected": -1.9104955196380615, | |
| "logps/chosen": -709.281982421875, | |
| "logps/rejected": -1029.5799560546875, | |
| "loss": 0.2363, | |
| "rewards/accuracies": 0.8968750238418579, | |
| "rewards/chosen": -2.88238263130188, | |
| "rewards/margins": 3.4413890838623047, | |
| "rewards/rejected": -6.3237714767456055, | |
| "step": 810 | |
| }, | |
| { | |
| "epoch": 1.0012210012210012, | |
| "grad_norm": 7.149313221308795, | |
| "learning_rate": 2.929455315699908e-07, | |
| "logits/chosen": -1.8502495288848877, | |
| "logits/rejected": -1.8472564220428467, | |
| "logps/chosen": -633.6524658203125, | |
| "logps/rejected": -995.9943237304688, | |
| "loss": 0.2329, | |
| "rewards/accuracies": 0.8968750238418579, | |
| "rewards/chosen": -2.5007681846618652, | |
| "rewards/margins": 3.637205123901367, | |
| "rewards/rejected": -6.137973308563232, | |
| "step": 820 | |
| }, | |
| { | |
| "epoch": 1.0134310134310134, | |
| "grad_norm": 10.404080063241945, | |
| "learning_rate": 2.8768703541006574e-07, | |
| "logits/chosen": -1.9154850244522095, | |
| "logits/rejected": -1.9522449970245361, | |
| "logps/chosen": -641.5855712890625, | |
| "logps/rejected": -996.3082275390625, | |
| "loss": 0.2282, | |
| "rewards/accuracies": 0.903124988079071, | |
| "rewards/chosen": -2.4940600395202637, | |
| "rewards/margins": 3.6092581748962402, | |
| "rewards/rejected": -6.103318214416504, | |
| "step": 830 | |
| }, | |
| { | |
| "epoch": 1.0256410256410255, | |
| "grad_norm": 9.587186254816348, | |
| "learning_rate": 2.8241142017438557e-07, | |
| "logits/chosen": -1.873044729232788, | |
| "logits/rejected": -1.8523098230361938, | |
| "logps/chosen": -667.5242309570312, | |
| "logps/rejected": -989.6135864257812, | |
| "loss": 0.2229, | |
| "rewards/accuracies": 0.90625, | |
| "rewards/chosen": -2.665759563446045, | |
| "rewards/margins": 3.5281364917755127, | |
| "rewards/rejected": -6.193896293640137, | |
| "step": 840 | |
| }, | |
| { | |
| "epoch": 1.037851037851038, | |
| "grad_norm": 9.366883940315772, | |
| "learning_rate": 2.771210822747639e-07, | |
| "logits/chosen": -1.9449422359466553, | |
| "logits/rejected": -1.8745979070663452, | |
| "logps/chosen": -703.3695068359375, | |
| "logps/rejected": -1020.8590087890625, | |
| "loss": 0.2436, | |
| "rewards/accuracies": 0.8999999761581421, | |
| "rewards/chosen": -2.8462772369384766, | |
| "rewards/margins": 3.382256031036377, | |
| "rewards/rejected": -6.2285332679748535, | |
| "step": 850 | |
| }, | |
| { | |
| "epoch": 1.05006105006105, | |
| "grad_norm": 7.56593773353816, | |
| "learning_rate": 2.718184248106828e-07, | |
| "logits/chosen": -1.834773063659668, | |
| "logits/rejected": -1.8442004919052124, | |
| "logps/chosen": -658.0390014648438, | |
| "logps/rejected": -1019.7131958007812, | |
| "loss": 0.2224, | |
| "rewards/accuracies": 0.9312499761581421, | |
| "rewards/chosen": -2.64483380317688, | |
| "rewards/margins": 3.6114120483398438, | |
| "rewards/rejected": -6.256246089935303, | |
| "step": 860 | |
| }, | |
| { | |
| "epoch": 1.0622710622710623, | |
| "grad_norm": 8.841079507264354, | |
| "learning_rate": 2.665058564777014e-07, | |
| "logits/chosen": -1.8463929891586304, | |
| "logits/rejected": -1.839784026145935, | |
| "logps/chosen": -652.1268920898438, | |
| "logps/rejected": -1015.1273193359375, | |
| "loss": 0.2131, | |
| "rewards/accuracies": 0.9156249761581421, | |
| "rewards/chosen": -2.7053442001342773, | |
| "rewards/margins": 3.6853573322296143, | |
| "rewards/rejected": -6.3907012939453125, | |
| "step": 870 | |
| }, | |
| { | |
| "epoch": 1.0744810744810744, | |
| "grad_norm": 9.314872473095937, | |
| "learning_rate": 2.611857904733227e-07, | |
| "logits/chosen": -1.7795578241348267, | |
| "logits/rejected": -1.7798683643341064, | |
| "logps/chosen": -688.0113525390625, | |
| "logps/rejected": -1035.625244140625, | |
| "loss": 0.2242, | |
| "rewards/accuracies": 0.909375011920929, | |
| "rewards/chosen": -2.99015474319458, | |
| "rewards/margins": 3.6745171546936035, | |
| "rewards/rejected": -6.664670467376709, | |
| "step": 880 | |
| }, | |
| { | |
| "epoch": 1.0866910866910866, | |
| "grad_norm": 8.334325457508486, | |
| "learning_rate": 2.5586064340081516e-07, | |
| "logits/chosen": -1.8280951976776123, | |
| "logits/rejected": -1.8405084609985352, | |
| "logps/chosen": -666.4359741210938, | |
| "logps/rejected": -1007.44091796875, | |
| "loss": 0.23, | |
| "rewards/accuracies": 0.9312499761581421, | |
| "rewards/chosen": -2.6223227977752686, | |
| "rewards/margins": 3.5808663368225098, | |
| "rewards/rejected": -6.203189373016357, | |
| "step": 890 | |
| }, | |
| { | |
| "epoch": 1.098901098901099, | |
| "grad_norm": 7.206748373825152, | |
| "learning_rate": 2.505328341714873e-07, | |
| "logits/chosen": -1.7787061929702759, | |
| "logits/rejected": -1.8012263774871826, | |
| "logps/chosen": -663.2637329101562, | |
| "logps/rejected": -1036.8768310546875, | |
| "loss": 0.2129, | |
| "rewards/accuracies": 0.9375, | |
| "rewards/chosen": -2.8313677310943604, | |
| "rewards/margins": 3.7177510261535645, | |
| "rewards/rejected": -6.5491180419921875, | |
| "step": 900 | |
| }, | |
| { | |
| "epoch": 1.1111111111111112, | |
| "grad_norm": 10.177149465840422, | |
| "learning_rate": 2.4520478290591416e-07, | |
| "logits/chosen": -1.7504581212997437, | |
| "logits/rejected": -1.7739044427871704, | |
| "logps/chosen": -702.8826904296875, | |
| "logps/rejected": -1059.865234375, | |
| "loss": 0.2268, | |
| "rewards/accuracies": 0.9156249761581421, | |
| "rewards/chosen": -2.962860584259033, | |
| "rewards/margins": 3.6696648597717285, | |
| "rewards/rejected": -6.6325249671936035, | |
| "step": 910 | |
| }, | |
| { | |
| "epoch": 1.1233211233211233, | |
| "grad_norm": 8.632975577344476, | |
| "learning_rate": 2.3987890983461403e-07, | |
| "logits/chosen": -1.7676483392715454, | |
| "logits/rejected": -1.7390410900115967, | |
| "logps/chosen": -696.1847534179688, | |
| "logps/rejected": -1042.147216796875, | |
| "loss": 0.2198, | |
| "rewards/accuracies": 0.90625, | |
| "rewards/chosen": -2.94394850730896, | |
| "rewards/margins": 3.708522319793701, | |
| "rewards/rejected": -6.652470588684082, | |
| "step": 920 | |
| }, | |
| { | |
| "epoch": 1.1355311355311355, | |
| "grad_norm": 9.916019501072466, | |
| "learning_rate": 2.3455763419867544e-07, | |
| "logits/chosen": -1.7698522806167603, | |
| "logits/rejected": -1.8097903728485107, | |
| "logps/chosen": -661.6279907226562, | |
| "logps/rejected": -993.2236328125, | |
| "loss": 0.2322, | |
| "rewards/accuracies": 0.8843749761581421, | |
| "rewards/chosen": -2.7180135250091553, | |
| "rewards/margins": 3.294512987136841, | |
| "rewards/rejected": -6.012526512145996, | |
| "step": 930 | |
| }, | |
| { | |
| "epoch": 1.1477411477411477, | |
| "grad_norm": 8.842361688694279, | |
| "learning_rate": 2.2924337315083353e-07, | |
| "logits/chosen": -1.782261610031128, | |
| "logits/rejected": -1.8005173206329346, | |
| "logps/chosen": -626.9920043945312, | |
| "logps/rejected": -989.1697387695312, | |
| "loss": 0.2168, | |
| "rewards/accuracies": 0.925000011920929, | |
| "rewards/chosen": -2.3607215881347656, | |
| "rewards/margins": 3.6655585765838623, | |
| "rewards/rejected": -6.026279926300049, | |
| "step": 940 | |
| }, | |
| { | |
| "epoch": 1.1599511599511598, | |
| "grad_norm": 9.32555579290425, | |
| "learning_rate": 2.239385406574955e-07, | |
| "logits/chosen": -1.7827867269515991, | |
| "logits/rejected": -1.779762625694275, | |
| "logps/chosen": -655.2774047851562, | |
| "logps/rejected": -1022.1461791992188, | |
| "loss": 0.2233, | |
| "rewards/accuracies": 0.940625011920929, | |
| "rewards/chosen": -2.592012405395508, | |
| "rewards/margins": 3.816871166229248, | |
| "rewards/rejected": -6.408883571624756, | |
| "step": 950 | |
| }, | |
| { | |
| "epoch": 1.1721611721611722, | |
| "grad_norm": 10.939107380292038, | |
| "learning_rate": 2.1864554640221244e-07, | |
| "logits/chosen": -1.7000805139541626, | |
| "logits/rejected": -1.688855767250061, | |
| "logps/chosen": -676.7574462890625, | |
| "logps/rejected": -1019.0291748046875, | |
| "loss": 0.2202, | |
| "rewards/accuracies": 0.9156249761581421, | |
| "rewards/chosen": -2.7764394283294678, | |
| "rewards/margins": 3.648998260498047, | |
| "rewards/rejected": -6.425436973571777, | |
| "step": 960 | |
| }, | |
| { | |
| "epoch": 1.1843711843711844, | |
| "grad_norm": 10.161202425220864, | |
| "learning_rate": 2.133667946910977e-07, | |
| "logits/chosen": -1.812464714050293, | |
| "logits/rejected": -1.8180309534072876, | |
| "logps/chosen": -680.7279052734375, | |
| "logps/rejected": -1037.299560546875, | |
| "loss": 0.2178, | |
| "rewards/accuracies": 0.90625, | |
| "rewards/chosen": -2.6717233657836914, | |
| "rewards/margins": 3.6986587047576904, | |
| "rewards/rejected": -6.370382308959961, | |
| "step": 970 | |
| }, | |
| { | |
| "epoch": 1.1965811965811965, | |
| "grad_norm": 9.089009127660146, | |
| "learning_rate": 2.0810468336068697e-07, | |
| "logits/chosen": -1.752598524093628, | |
| "logits/rejected": -1.7276668548583984, | |
| "logps/chosen": -658.7349243164062, | |
| "logps/rejected": -977.19580078125, | |
| "loss": 0.2208, | |
| "rewards/accuracies": 0.90625, | |
| "rewards/chosen": -2.660342216491699, | |
| "rewards/margins": 3.40667462348938, | |
| "rewards/rejected": -6.0670166015625, | |
| "step": 980 | |
| }, | |
| { | |
| "epoch": 1.2087912087912087, | |
| "grad_norm": 8.82346746989032, | |
| "learning_rate": 2.0286160268873826e-07, | |
| "logits/chosen": -1.7947756052017212, | |
| "logits/rejected": -1.7663764953613281, | |
| "logps/chosen": -677.0531005859375, | |
| "logps/rejected": -991.953125, | |
| "loss": 0.2212, | |
| "rewards/accuracies": 0.903124988079071, | |
| "rewards/chosen": -2.6913745403289795, | |
| "rewards/margins": 3.3568809032440186, | |
| "rewards/rejected": -6.048255920410156, | |
| "step": 990 | |
| }, | |
| { | |
| "epoch": 1.221001221001221, | |
| "grad_norm": 8.94368139635424, | |
| "learning_rate": 1.9763993430846392e-07, | |
| "logits/chosen": -1.691082239151001, | |
| "logits/rejected": -1.7195104360580444, | |
| "logps/chosen": -689.7127075195312, | |
| "logps/rejected": -993.8372802734375, | |
| "loss": 0.2293, | |
| "rewards/accuracies": 0.893750011920929, | |
| "rewards/chosen": -2.915121555328369, | |
| "rewards/margins": 3.158756971359253, | |
| "rewards/rejected": -6.073878288269043, | |
| "step": 1000 | |
| }, | |
| { | |
| "epoch": 1.221001221001221, | |
| "eval_logits/chosen": -1.457505226135254, | |
| "eval_logits/rejected": -1.4398448467254639, | |
| "eval_logps/chosen": -689.9403076171875, | |
| "eval_logps/rejected": -1048.287109375, | |
| "eval_loss": 0.32239073514938354, | |
| "eval_rewards/accuracies": 0.90625, | |
| "eval_rewards/chosen": -3.3080387115478516, | |
| "eval_rewards/margins": 3.7410764694213867, | |
| "eval_rewards/rejected": -7.049115180969238, | |
| "eval_runtime": 3.2056, | |
| "eval_samples_per_second": 62.391, | |
| "eval_steps_per_second": 1.248, | |
| "step": 1000 | |
| }, | |
| { | |
| "epoch": 1.2332112332112333, | |
| "grad_norm": 10.431477967240758, | |
| "learning_rate": 1.9244205012669066e-07, | |
| "logits/chosen": -1.6947963237762451, | |
| "logits/rejected": -1.6955578327178955, | |
| "logps/chosen": -682.6785278320312, | |
| "logps/rejected": -1043.7890625, | |
| "loss": 0.2195, | |
| "rewards/accuracies": 0.909375011920929, | |
| "rewards/chosen": -2.892595052719116, | |
| "rewards/margins": 3.645073652267456, | |
| "rewards/rejected": -6.537668704986572, | |
| "step": 1010 | |
| }, | |
| { | |
| "epoch": 1.2454212454212454, | |
| "grad_norm": 10.45964252043519, | |
| "learning_rate": 1.8727031124643738e-07, | |
| "logits/chosen": -1.7148557901382446, | |
| "logits/rejected": -1.730376958847046, | |
| "logps/chosen": -693.7335205078125, | |
| "logps/rejected": -1035.718505859375, | |
| "loss": 0.2235, | |
| "rewards/accuracies": 0.909375011920929, | |
| "rewards/chosen": -2.9769744873046875, | |
| "rewards/margins": 3.624099016189575, | |
| "rewards/rejected": -6.601072788238525, | |
| "step": 1020 | |
| }, | |
| { | |
| "epoch": 1.2576312576312576, | |
| "grad_norm": 10.46971576068257, | |
| "learning_rate": 1.8212706689439993e-07, | |
| "logits/chosen": -1.6916805505752563, | |
| "logits/rejected": -1.693860411643982, | |
| "logps/chosen": -655.6392211914062, | |
| "logps/rejected": -1019.2525634765625, | |
| "loss": 0.2083, | |
| "rewards/accuracies": 0.925000011920929, | |
| "rewards/chosen": -2.5844101905822754, | |
| "rewards/margins": 3.817833662033081, | |
| "rewards/rejected": -6.402244567871094, | |
| "step": 1030 | |
| }, | |
| { | |
| "epoch": 1.2698412698412698, | |
| "grad_norm": 11.412713931602926, | |
| "learning_rate": 1.7701465335383148e-07, | |
| "logits/chosen": -1.7352428436279297, | |
| "logits/rejected": -1.712531328201294, | |
| "logps/chosen": -703.946533203125, | |
| "logps/rejected": -1039.840087890625, | |
| "loss": 0.2246, | |
| "rewards/accuracies": 0.9281250238418579, | |
| "rewards/chosen": -2.9772469997406006, | |
| "rewards/margins": 3.4660041332244873, | |
| "rewards/rejected": -6.443251132965088, | |
| "step": 1040 | |
| }, | |
| { | |
| "epoch": 1.282051282051282, | |
| "grad_norm": 9.59659946541285, | |
| "learning_rate": 1.7193539290330172e-07, | |
| "logits/chosen": -1.7665666341781616, | |
| "logits/rejected": -1.7859532833099365, | |
| "logps/chosen": -693.987548828125, | |
| "logps/rejected": -1081.337158203125, | |
| "loss": 0.2048, | |
| "rewards/accuracies": 0.925000011920929, | |
| "rewards/chosen": -2.7806649208068848, | |
| "rewards/margins": 3.96142578125, | |
| "rewards/rejected": -6.742089748382568, | |
| "step": 1050 | |
| }, | |
| { | |
| "epoch": 1.2942612942612943, | |
| "grad_norm": 7.833467318755542, | |
| "learning_rate": 1.668915927618183e-07, | |
| "logits/chosen": -1.6072509288787842, | |
| "logits/rejected": -1.5868537425994873, | |
| "logps/chosen": -644.87353515625, | |
| "logps/rejected": -992.3282470703125, | |
| "loss": 0.2139, | |
| "rewards/accuracies": 0.893750011920929, | |
| "rewards/chosen": -2.6021721363067627, | |
| "rewards/margins": 3.6462604999542236, | |
| "rewards/rejected": -6.248432636260986, | |
| "step": 1060 | |
| }, | |
| { | |
| "epoch": 1.3064713064713065, | |
| "grad_norm": 8.944995143399364, | |
| "learning_rate": 1.618855440407878e-07, | |
| "logits/chosen": -1.6417900323867798, | |
| "logits/rejected": -1.645308494567871, | |
| "logps/chosen": -680.7823486328125, | |
| "logps/rejected": -984.8699951171875, | |
| "loss": 0.2206, | |
| "rewards/accuracies": 0.878125011920929, | |
| "rewards/chosen": -2.845156669616699, | |
| "rewards/margins": 3.2548797130584717, | |
| "rewards/rejected": -6.100037097930908, | |
| "step": 1070 | |
| }, | |
| { | |
| "epoch": 1.3186813186813187, | |
| "grad_norm": 8.538205860728207, | |
| "learning_rate": 1.5691952070329493e-07, | |
| "logits/chosen": -1.5984406471252441, | |
| "logits/rejected": -1.5525627136230469, | |
| "logps/chosen": -696.5716552734375, | |
| "logps/rejected": -1034.56640625, | |
| "loss": 0.2183, | |
| "rewards/accuracies": 0.9156249761581421, | |
| "rewards/chosen": -2.9366581439971924, | |
| "rewards/margins": 3.6637485027313232, | |
| "rewards/rejected": -6.600405693054199, | |
| "step": 1080 | |
| }, | |
| { | |
| "epoch": 1.3308913308913308, | |
| "grad_norm": 12.006671385110291, | |
| "learning_rate": 1.519957785311698e-07, | |
| "logits/chosen": -1.6169030666351318, | |
| "logits/rejected": -1.630112648010254, | |
| "logps/chosen": -687.6176147460938, | |
| "logps/rejected": -1042.2376708984375, | |
| "loss": 0.2274, | |
| "rewards/accuracies": 0.940625011920929, | |
| "rewards/chosen": -2.9744813442230225, | |
| "rewards/margins": 3.7546725273132324, | |
| "rewards/rejected": -6.729154109954834, | |
| "step": 1090 | |
| }, | |
| { | |
| "epoch": 1.3431013431013432, | |
| "grad_norm": 9.403413375132386, | |
| "learning_rate": 1.4711655410031536e-07, | |
| "logits/chosen": -1.752509355545044, | |
| "logits/rejected": -1.7398014068603516, | |
| "logps/chosen": -701.8548583984375, | |
| "logps/rejected": -1074.74609375, | |
| "loss": 0.2107, | |
| "rewards/accuracies": 0.9156249761581421, | |
| "rewards/chosen": -2.93528413772583, | |
| "rewards/margins": 3.7751307487487793, | |
| "rewards/rejected": -6.710414886474609, | |
| "step": 1100 | |
| }, | |
| { | |
| "epoch": 1.3553113553113554, | |
| "grad_norm": 9.067370494527864, | |
| "learning_rate": 1.422840637647574e-07, | |
| "logits/chosen": -1.6540132761001587, | |
| "logits/rejected": -1.6520767211914062, | |
| "logps/chosen": -648.3968505859375, | |
| "logps/rejected": -1009.6447143554688, | |
| "loss": 0.2154, | |
| "rewards/accuracies": 0.9281250238418579, | |
| "rewards/chosen": -2.6349334716796875, | |
| "rewards/margins": 3.7205891609191895, | |
| "rewards/rejected": -6.355522632598877, | |
| "step": 1110 | |
| }, | |
| { | |
| "epoch": 1.3675213675213675, | |
| "grad_norm": 8.8218853819329, | |
| "learning_rate": 1.3750050264988172e-07, | |
| "logits/chosen": -1.599161148071289, | |
| "logits/rejected": -1.6590015888214111, | |
| "logps/chosen": -683.4578857421875, | |
| "logps/rejected": -1025.697998046875, | |
| "loss": 0.2113, | |
| "rewards/accuracies": 0.918749988079071, | |
| "rewards/chosen": -2.938286542892456, | |
| "rewards/margins": 3.5185725688934326, | |
| "rewards/rejected": -6.456859588623047, | |
| "step": 1120 | |
| }, | |
| { | |
| "epoch": 1.3797313797313797, | |
| "grad_norm": 7.729524060255651, | |
| "learning_rate": 1.3276804365531303e-07, | |
| "logits/chosen": -1.6903009414672852, | |
| "logits/rejected": -1.6954196691513062, | |
| "logps/chosen": -684.0424194335938, | |
| "logps/rejected": -1034.60205078125, | |
| "loss": 0.1965, | |
| "rewards/accuracies": 0.9156249761581421, | |
| "rewards/chosen": -2.8384480476379395, | |
| "rewards/margins": 3.558556079864502, | |
| "rewards/rejected": -6.3970046043396, | |
| "step": 1130 | |
| }, | |
| { | |
| "epoch": 1.3919413919413919, | |
| "grad_norm": 10.111068058521244, | |
| "learning_rate": 1.2808883646789088e-07, | |
| "logits/chosen": -1.6597654819488525, | |
| "logits/rejected": -1.6594810485839844, | |
| "logps/chosen": -678.0111083984375, | |
| "logps/rejected": -1045.977783203125, | |
| "loss": 0.2093, | |
| "rewards/accuracies": 0.9437500238418579, | |
| "rewards/chosen": -2.8265321254730225, | |
| "rewards/margins": 3.8290627002716064, | |
| "rewards/rejected": -6.655595302581787, | |
| "step": 1140 | |
| }, | |
| { | |
| "epoch": 1.404151404151404, | |
| "grad_norm": 9.778269285200919, | |
| "learning_rate": 1.2346500658518864e-07, | |
| "logits/chosen": -1.6785917282104492, | |
| "logits/rejected": -1.6684529781341553, | |
| "logps/chosen": -682.1680908203125, | |
| "logps/rejected": -1051.3212890625, | |
| "loss": 0.2278, | |
| "rewards/accuracies": 0.925000011920929, | |
| "rewards/chosen": -2.902517080307007, | |
| "rewards/margins": 3.8359665870666504, | |
| "rewards/rejected": -6.738483428955078, | |
| "step": 1150 | |
| }, | |
| { | |
| "epoch": 1.4163614163614164, | |
| "grad_norm": 8.55576204479013, | |
| "learning_rate": 1.1889865435002117e-07, | |
| "logits/chosen": -1.7918224334716797, | |
| "logits/rejected": -1.7642894983291626, | |
| "logps/chosen": -692.8043823242188, | |
| "logps/rejected": -1036.125244140625, | |
| "loss": 0.2091, | |
| "rewards/accuracies": 0.8968750238418579, | |
| "rewards/chosen": -2.8053765296936035, | |
| "rewards/margins": 3.668884754180908, | |
| "rewards/rejected": -6.474261283874512, | |
| "step": 1160 | |
| }, | |
| { | |
| "epoch": 1.4285714285714286, | |
| "grad_norm": 9.250758880138703, | |
| "learning_rate": 1.1439185399637888e-07, | |
| "logits/chosen": -1.809579849243164, | |
| "logits/rejected": -1.820180892944336, | |
| "logps/chosen": -689.2313232421875, | |
| "logps/rejected": -1069.072021484375, | |
| "loss": 0.2072, | |
| "rewards/accuracies": 0.9281250238418579, | |
| "rewards/chosen": -2.8422112464904785, | |
| "rewards/margins": 3.8554039001464844, | |
| "rewards/rejected": -6.697615623474121, | |
| "step": 1170 | |
| }, | |
| { | |
| "epoch": 1.4407814407814408, | |
| "grad_norm": 11.66443113180578, | |
| "learning_rate": 1.099466527072207e-07, | |
| "logits/chosen": -1.7422889471054077, | |
| "logits/rejected": -1.6775966882705688, | |
| "logps/chosen": -724.9702758789062, | |
| "logps/rejected": -1071.587890625, | |
| "loss": 0.2124, | |
| "rewards/accuracies": 0.8968750238418579, | |
| "rewards/chosen": -3.1015143394470215, | |
| "rewards/margins": 3.6922173500061035, | |
| "rewards/rejected": -6.793731689453125, | |
| "step": 1180 | |
| }, | |
| { | |
| "epoch": 1.452991452991453, | |
| "grad_norm": 9.357661665183807, | |
| "learning_rate": 1.0556506968455556e-07, | |
| "logits/chosen": -1.6604530811309814, | |
| "logits/rejected": -1.598813533782959, | |
| "logps/chosen": -708.178955078125, | |
| "logps/rejected": -1078.4857177734375, | |
| "loss": 0.2191, | |
| "rewards/accuracies": 0.953125, | |
| "rewards/chosen": -2.9419569969177246, | |
| "rewards/margins": 4.017121315002441, | |
| "rewards/rejected": -6.959078311920166, | |
| "step": 1190 | |
| }, | |
| { | |
| "epoch": 1.4652014652014653, | |
| "grad_norm": 8.971265853956833, | |
| "learning_rate": 1.0124909523223418e-07, | |
| "logits/chosen": -1.8122241497039795, | |
| "logits/rejected": -1.7499420642852783, | |
| "logps/chosen": -676.4575805664062, | |
| "logps/rejected": -1026.417724609375, | |
| "loss": 0.226, | |
| "rewards/accuracies": 0.903124988079071, | |
| "rewards/chosen": -2.6309123039245605, | |
| "rewards/margins": 3.7246639728546143, | |
| "rewards/rejected": -6.355576038360596, | |
| "step": 1200 | |
| }, | |
| { | |
| "epoch": 1.4774114774114775, | |
| "grad_norm": 11.17454543526871, | |
| "learning_rate": 9.700068985186677e-08, | |
| "logits/chosen": -1.7836052179336548, | |
| "logits/rejected": -1.7842352390289307, | |
| "logps/chosen": -679.8863525390625, | |
| "logps/rejected": -1052.3326416015625, | |
| "loss": 0.2076, | |
| "rewards/accuracies": 0.890625, | |
| "rewards/chosen": -2.7537055015563965, | |
| "rewards/margins": 3.8916175365448, | |
| "rewards/rejected": -6.645322322845459, | |
| "step": 1210 | |
| }, | |
| { | |
| "epoch": 1.4896214896214897, | |
| "grad_norm": 8.470663050298606, | |
| "learning_rate": 9.282178335227883e-08, | |
| "logits/chosen": -1.6586374044418335, | |
| "logits/rejected": -1.6548519134521484, | |
| "logps/chosen": -673.2451171875, | |
| "logps/rejected": -1058.727294921875, | |
| "loss": 0.1957, | |
| "rewards/accuracies": 0.925000011920929, | |
| "rewards/chosen": -2.7371268272399902, | |
| "rewards/margins": 3.8924622535705566, | |
| "rewards/rejected": -6.629590034484863, | |
| "step": 1220 | |
| }, | |
| { | |
| "epoch": 1.5018315018315018, | |
| "grad_norm": 8.98808268846931, | |
| "learning_rate": 8.871427397290893e-08, | |
| "logits/chosen": -1.6249803304672241, | |
| "logits/rejected": -1.6458851099014282, | |
| "logps/chosen": -654.238037109375, | |
| "logps/rejected": -1073.6612548828125, | |
| "loss": 0.1921, | |
| "rewards/accuracies": 0.940625011920929, | |
| "rewards/chosen": -2.555995464324951, | |
| "rewards/margins": 4.273428440093994, | |
| "rewards/rejected": -6.8294243812561035, | |
| "step": 1230 | |
| }, | |
| { | |
| "epoch": 1.514041514041514, | |
| "grad_norm": 10.028503381886956, | |
| "learning_rate": 8.468002752154671e-08, | |
| "logits/chosen": -1.7111746072769165, | |
| "logits/rejected": -1.7021448612213135, | |
| "logps/chosen": -696.2750854492188, | |
| "logps/rejected": -1062.916748046875, | |
| "loss": 0.2054, | |
| "rewards/accuracies": 0.925000011920929, | |
| "rewards/chosen": -2.8526463508605957, | |
| "rewards/margins": 3.87031626701355, | |
| "rewards/rejected": -6.722962856292725, | |
| "step": 1240 | |
| }, | |
| { | |
| "epoch": 1.5262515262515262, | |
| "grad_norm": 9.587289277274007, | |
| "learning_rate": 8.07208765268021e-08, | |
| "logits/chosen": -1.6548601388931274, | |
| "logits/rejected": -1.6278877258300781, | |
| "logps/chosen": -686.55126953125, | |
| "logps/rejected": -1056.9136962890625, | |
| "loss": 0.2031, | |
| "rewards/accuracies": 0.909375011920929, | |
| "rewards/chosen": -2.857362985610962, | |
| "rewards/margins": 3.842681407928467, | |
| "rewards/rejected": -6.700045108795166, | |
| "step": 1250 | |
| }, | |
| { | |
| "epoch": 1.5384615384615383, | |
| "grad_norm": 9.113469988640157, | |
| "learning_rate": 7.683861940569217e-08, | |
| "logits/chosen": -1.6518821716308594, | |
| "logits/rejected": -1.6446526050567627, | |
| "logps/chosen": -680.0530395507812, | |
| "logps/rejected": -1071.1126708984375, | |
| "loss": 0.1992, | |
| "rewards/accuracies": 0.9281250238418579, | |
| "rewards/chosen": -2.8840444087982178, | |
| "rewards/margins": 4.099475383758545, | |
| "rewards/rejected": -6.983519077301025, | |
| "step": 1260 | |
| }, | |
| { | |
| "epoch": 1.5506715506715507, | |
| "grad_norm": 9.40788034177994, | |
| "learning_rate": 7.303501964672246e-08, | |
| "logits/chosen": -1.6062475442886353, | |
| "logits/rejected": -1.5758030414581299, | |
| "logps/chosen": -676.6715087890625, | |
| "logps/rejected": -1019.4368896484375, | |
| "loss": 0.2274, | |
| "rewards/accuracies": 0.875, | |
| "rewards/chosen": -2.716747760772705, | |
| "rewards/margins": 3.6902427673339844, | |
| "rewards/rejected": -6.406990051269531, | |
| "step": 1270 | |
| }, | |
| { | |
| "epoch": 1.5628815628815629, | |
| "grad_norm": 10.234752942213406, | |
| "learning_rate": 6.931180500883484e-08, | |
| "logits/chosen": -1.6409828662872314, | |
| "logits/rejected": -1.6017128229141235, | |
| "logps/chosen": -686.1589965820312, | |
| "logps/rejected": -1049.4881591796875, | |
| "loss": 0.2011, | |
| "rewards/accuracies": 0.918749988079071, | |
| "rewards/chosen": -2.8092753887176514, | |
| "rewards/margins": 3.778611421585083, | |
| "rewards/rejected": -6.587886810302734, | |
| "step": 1280 | |
| }, | |
| { | |
| "epoch": 1.575091575091575, | |
| "grad_norm": 10.648822630887103, | |
| "learning_rate": 6.567066673658442e-08, | |
| "logits/chosen": -1.5854926109313965, | |
| "logits/rejected": -1.5248379707336426, | |
| "logps/chosen": -675.6094360351562, | |
| "logps/rejected": -1013.9142456054688, | |
| "loss": 0.2134, | |
| "rewards/accuracies": 0.871874988079071, | |
| "rewards/chosen": -2.8324193954467773, | |
| "rewards/margins": 3.5213024616241455, | |
| "rewards/rejected": -6.353722095489502, | |
| "step": 1290 | |
| }, | |
| { | |
| "epoch": 1.5873015873015874, | |
| "grad_norm": 10.089650308836681, | |
| "learning_rate": 6.21132587919036e-08, | |
| "logits/chosen": -1.593980312347412, | |
| "logits/rejected": -1.577946662902832, | |
| "logps/chosen": -666.9020385742188, | |
| "logps/rejected": -1011.3902587890625, | |
| "loss": 0.2021, | |
| "rewards/accuracies": 0.893750011920929, | |
| "rewards/chosen": -2.7358155250549316, | |
| "rewards/margins": 3.5997562408447266, | |
| "rewards/rejected": -6.335571765899658, | |
| "step": 1300 | |
| }, | |
| { | |
| "epoch": 1.5995115995115996, | |
| "grad_norm": 10.857356047559415, | |
| "learning_rate": 5.864119710280158e-08, | |
| "logits/chosen": -1.6040117740631104, | |
| "logits/rejected": -1.6094341278076172, | |
| "logps/chosen": -694.5184326171875, | |
| "logps/rejected": -1058.58740234375, | |
| "loss": 0.2002, | |
| "rewards/accuracies": 0.9281250238418579, | |
| "rewards/chosen": -2.9541563987731934, | |
| "rewards/margins": 3.760449171066284, | |
| "rewards/rejected": -6.714605808258057, | |
| "step": 1310 | |
| }, | |
| { | |
| "epoch": 1.6117216117216118, | |
| "grad_norm": 14.265677703397381, | |
| "learning_rate": 5.525605882933965e-08, | |
| "logits/chosen": -1.623427391052246, | |
| "logits/rejected": -1.6019861698150635, | |
| "logps/chosen": -692.2745971679688, | |
| "logps/rejected": -1085.745361328125, | |
| "loss": 0.1973, | |
| "rewards/accuracies": 0.8999999761581421, | |
| "rewards/chosen": -2.8990519046783447, | |
| "rewards/margins": 4.028355598449707, | |
| "rewards/rejected": -6.927407741546631, | |
| "step": 1320 | |
| }, | |
| { | |
| "epoch": 1.623931623931624, | |
| "grad_norm": 9.076988920336841, | |
| "learning_rate": 5.1959381647217665e-08, | |
| "logits/chosen": -1.5620160102844238, | |
| "logits/rejected": -1.5626099109649658, | |
| "logps/chosen": -688.0601806640625, | |
| "logps/rejected": -1034.831298828125, | |
| "loss": 0.205, | |
| "rewards/accuracies": 0.90625, | |
| "rewards/chosen": -2.8957722187042236, | |
| "rewards/margins": 3.635453701019287, | |
| "rewards/rejected": -6.53122615814209, | |
| "step": 1330 | |
| }, | |
| { | |
| "epoch": 1.636141636141636, | |
| "grad_norm": 11.741004645166157, | |
| "learning_rate": 4.875266304929496e-08, | |
| "logits/chosen": -1.5946216583251953, | |
| "logits/rejected": -1.552394151687622, | |
| "logps/chosen": -699.3309326171875, | |
| "logps/rejected": -1056.736083984375, | |
| "loss": 0.203, | |
| "rewards/accuracies": 0.9281250238418579, | |
| "rewards/chosen": -3.076303005218506, | |
| "rewards/margins": 3.665733814239502, | |
| "rewards/rejected": -6.74203634262085, | |
| "step": 1340 | |
| }, | |
| { | |
| "epoch": 1.6483516483516483, | |
| "grad_norm": 11.38093239647967, | |
| "learning_rate": 4.5637359665365025e-08, | |
| "logits/chosen": -1.5630689859390259, | |
| "logits/rejected": -1.5610148906707764, | |
| "logps/chosen": -688.2283325195312, | |
| "logps/rejected": -1071.9249267578125, | |
| "loss": 0.1984, | |
| "rewards/accuracies": 0.921875, | |
| "rewards/chosen": -2.906219244003296, | |
| "rewards/margins": 3.8943076133728027, | |
| "rewards/rejected": -6.8005266189575195, | |
| "step": 1350 | |
| }, | |
| { | |
| "epoch": 1.6605616605616604, | |
| "grad_norm": 9.41029423304621, | |
| "learning_rate": 4.2614886600491115e-08, | |
| "logits/chosen": -1.521751046180725, | |
| "logits/rejected": -1.528058409690857, | |
| "logps/chosen": -666.5167236328125, | |
| "logps/rejected": -1023.7020263671875, | |
| "loss": 0.2189, | |
| "rewards/accuracies": 0.8999999761581421, | |
| "rewards/chosen": -2.8758678436279297, | |
| "rewards/margins": 3.6217830181121826, | |
| "rewards/rejected": -6.497651100158691, | |
| "step": 1360 | |
| }, | |
| { | |
| "epoch": 1.6727716727716728, | |
| "grad_norm": 10.63202381564192, | |
| "learning_rate": 3.968661679220467e-08, | |
| "logits/chosen": -1.7034133672714233, | |
| "logits/rejected": -1.7073980569839478, | |
| "logps/chosen": -703.4854736328125, | |
| "logps/rejected": -1088.0830078125, | |
| "loss": 0.2113, | |
| "rewards/accuracies": 0.878125011920929, | |
| "rewards/chosen": -3.028465747833252, | |
| "rewards/margins": 3.8890113830566406, | |
| "rewards/rejected": -6.917477607727051, | |
| "step": 1370 | |
| }, | |
| { | |
| "epoch": 1.684981684981685, | |
| "grad_norm": 8.59601070770463, | |
| "learning_rate": 3.685388038685811e-08, | |
| "logits/chosen": -1.572533369064331, | |
| "logits/rejected": -1.5645427703857422, | |
| "logps/chosen": -689.0482177734375, | |
| "logps/rejected": -1043.1580810546875, | |
| "loss": 0.1907, | |
| "rewards/accuracies": 0.918749988079071, | |
| "rewards/chosen": -2.8478474617004395, | |
| "rewards/margins": 3.773597240447998, | |
| "rewards/rejected": -6.621443748474121, | |
| "step": 1380 | |
| }, | |
| { | |
| "epoch": 1.6971916971916972, | |
| "grad_norm": 8.096290604656218, | |
| "learning_rate": 3.41179641354146e-08, | |
| "logits/chosen": -1.5869381427764893, | |
| "logits/rejected": -1.5721813440322876, | |
| "logps/chosen": -709.0938110351562, | |
| "logps/rejected": -1064.73681640625, | |
| "loss": 0.2101, | |
| "rewards/accuracies": 0.925000011920929, | |
| "rewards/chosen": -3.0553669929504395, | |
| "rewards/margins": 3.6293067932128906, | |
| "rewards/rejected": -6.684674263000488, | |
| "step": 1390 | |
| }, | |
| { | |
| "epoch": 1.7094017094017095, | |
| "grad_norm": 10.34775868024534, | |
| "learning_rate": 3.1480110808950746e-08, | |
| "logits/chosen": -1.6079189777374268, | |
| "logits/rejected": -1.5532805919647217, | |
| "logps/chosen": -701.6239624023438, | |
| "logps/rejected": -1058.663330078125, | |
| "loss": 0.202, | |
| "rewards/accuracies": 0.921875, | |
| "rewards/chosen": -2.9393839836120605, | |
| "rewards/margins": 3.915294647216797, | |
| "rewards/rejected": -6.854678153991699, | |
| "step": 1400 | |
| }, | |
| { | |
| "epoch": 1.7216117216117217, | |
| "grad_norm": 8.551636954434649, | |
| "learning_rate": 2.8941518634136047e-08, | |
| "logits/chosen": -1.5729877948760986, | |
| "logits/rejected": -1.5950627326965332, | |
| "logps/chosen": -667.7506713867188, | |
| "logps/rejected": -1075.386962890625, | |
| "loss": 0.2036, | |
| "rewards/accuracies": 0.921875, | |
| "rewards/chosen": -2.7873880863189697, | |
| "rewards/margins": 4.09014892578125, | |
| "rewards/rejected": -6.877536773681641, | |
| "step": 1410 | |
| }, | |
| { | |
| "epoch": 1.7338217338217339, | |
| "grad_norm": 11.293363203538648, | |
| "learning_rate": 2.6503340748947083e-08, | |
| "logits/chosen": -1.6414867639541626, | |
| "logits/rejected": -1.6144354343414307, | |
| "logps/chosen": -710.8462524414062, | |
| "logps/rejected": -1076.911865234375, | |
| "loss": 0.2096, | |
| "rewards/accuracies": 0.934374988079071, | |
| "rewards/chosen": -3.167412519454956, | |
| "rewards/margins": 3.803969621658325, | |
| "rewards/rejected": -6.971382141113281, | |
| "step": 1420 | |
| }, | |
| { | |
| "epoch": 1.746031746031746, | |
| "grad_norm": 9.537502992918926, | |
| "learning_rate": 2.4166684678862208e-08, | |
| "logits/chosen": -1.5303064584732056, | |
| "logits/rejected": -1.523916482925415, | |
| "logps/chosen": -673.16748046875, | |
| "logps/rejected": -1046.1734619140625, | |
| "loss": 0.2136, | |
| "rewards/accuracies": 0.9156249761581421, | |
| "rewards/chosen": -2.9092774391174316, | |
| "rewards/margins": 3.6622211933135986, | |
| "rewards/rejected": -6.571497917175293, | |
| "step": 1430 | |
| }, | |
| { | |
| "epoch": 1.7582417582417582, | |
| "grad_norm": 11.313708406928527, | |
| "learning_rate": 2.1932611833775843e-08, | |
| "logits/chosen": -1.6144897937774658, | |
| "logits/rejected": -1.5737859010696411, | |
| "logps/chosen": -713.6920166015625, | |
| "logps/rejected": -1063.091064453125, | |
| "loss": 0.2143, | |
| "rewards/accuracies": 0.9125000238418579, | |
| "rewards/chosen": -3.0494279861450195, | |
| "rewards/margins": 3.7339680194854736, | |
| "rewards/rejected": -6.7833967208862305, | |
| "step": 1440 | |
| }, | |
| { | |
| "epoch": 1.7704517704517704, | |
| "grad_norm": 12.659032905074808, | |
| "learning_rate": 1.9802137025860394e-08, | |
| "logits/chosen": -1.541495680809021, | |
| "logits/rejected": -1.508460283279419, | |
| "logps/chosen": -672.1906127929688, | |
| "logps/rejected": -1020.8779296875, | |
| "loss": 0.2044, | |
| "rewards/accuracies": 0.9375, | |
| "rewards/chosen": -2.761587142944336, | |
| "rewards/margins": 3.6672203540802, | |
| "rewards/rejected": -6.428807258605957, | |
| "step": 1450 | |
| }, | |
| { | |
| "epoch": 1.7826617826617825, | |
| "grad_norm": 8.561486281617269, | |
| "learning_rate": 1.7776228008594962e-08, | |
| "logits/chosen": -1.5731874704360962, | |
| "logits/rejected": -1.6000001430511475, | |
| "logps/chosen": -699.12060546875, | |
| "logps/rejected": -1043.0130615234375, | |
| "loss": 0.1974, | |
| "rewards/accuracies": 0.909375011920929, | |
| "rewards/chosen": -2.973306894302368, | |
| "rewards/margins": 3.508462429046631, | |
| "rewards/rejected": -6.481769561767578, | |
| "step": 1460 | |
| }, | |
| { | |
| "epoch": 1.7948717948717947, | |
| "grad_norm": 9.72206499571952, | |
| "learning_rate": 1.5855805037169682e-08, | |
| "logits/chosen": -1.6329854726791382, | |
| "logits/rejected": -1.6412932872772217, | |
| "logps/chosen": -683.8406372070312, | |
| "logps/rejected": -1081.219482421875, | |
| "loss": 0.2127, | |
| "rewards/accuracies": 0.903124988079071, | |
| "rewards/chosen": -2.91072678565979, | |
| "rewards/margins": 4.06986665725708, | |
| "rewards/rejected": -6.980593681335449, | |
| "step": 1470 | |
| }, | |
| { | |
| "epoch": 1.807081807081807, | |
| "grad_norm": 9.226812102547623, | |
| "learning_rate": 1.4041740450466383e-08, | |
| "logits/chosen": -1.667553186416626, | |
| "logits/rejected": -1.6831690073013306, | |
| "logps/chosen": -684.6526489257812, | |
| "logps/rejected": -1098.877197265625, | |
| "loss": 0.2142, | |
| "rewards/accuracies": 0.9156249761581421, | |
| "rewards/chosen": -2.8064825534820557, | |
| "rewards/margins": 4.242220878601074, | |
| "rewards/rejected": -7.048703670501709, | |
| "step": 1480 | |
| }, | |
| { | |
| "epoch": 1.8192918192918193, | |
| "grad_norm": 10.957646684560045, | |
| "learning_rate": 1.2334858274804655e-08, | |
| "logits/chosen": -1.5813372135162354, | |
| "logits/rejected": -1.5335392951965332, | |
| "logps/chosen": -666.4457397460938, | |
| "logps/rejected": -1035.133544921875, | |
| "loss": 0.1955, | |
| "rewards/accuracies": 0.953125, | |
| "rewards/chosen": -2.7881767749786377, | |
| "rewards/margins": 3.741603136062622, | |
| "rewards/rejected": -6.529780387878418, | |
| "step": 1490 | |
| }, | |
| { | |
| "epoch": 1.8315018315018317, | |
| "grad_norm": 9.22473440765943, | |
| "learning_rate": 1.0735933849633561e-08, | |
| "logits/chosen": -1.5356563329696655, | |
| "logits/rejected": -1.5444982051849365, | |
| "logps/chosen": -656.3917236328125, | |
| "logps/rejected": -1014.2698974609375, | |
| "loss": 0.2173, | |
| "rewards/accuracies": 0.9125000238418579, | |
| "rewards/chosen": -2.679664134979248, | |
| "rewards/margins": 3.69049072265625, | |
| "rewards/rejected": -6.370155334472656, | |
| "step": 1500 | |
| }, | |
| { | |
| "epoch": 1.8437118437118438, | |
| "grad_norm": 10.099888495331802, | |
| "learning_rate": 9.245693475338906e-09, | |
| "logits/chosen": -1.6426986455917358, | |
| "logits/rejected": -1.636652946472168, | |
| "logps/chosen": -706.7011108398438, | |
| "logps/rejected": -1085.6732177734375, | |
| "loss": 0.2117, | |
| "rewards/accuracies": 0.918749988079071, | |
| "rewards/chosen": -2.9595284461975098, | |
| "rewards/margins": 3.9240341186523438, | |
| "rewards/rejected": -6.8835625648498535, | |
| "step": 1510 | |
| }, | |
| { | |
| "epoch": 1.855921855921856, | |
| "grad_norm": 11.718296845103126, | |
| "learning_rate": 7.86481408332651e-09, | |
| "logits/chosen": -1.63693368434906, | |
| "logits/rejected": -1.6097259521484375, | |
| "logps/chosen": -700.6412353515625, | |
| "logps/rejected": -1046.5230712890625, | |
| "loss": 0.2173, | |
| "rewards/accuracies": 0.90625, | |
| "rewards/chosen": -3.012932300567627, | |
| "rewards/margins": 3.689767837524414, | |
| "rewards/rejected": -6.702700614929199, | |
| "step": 1520 | |
| }, | |
| { | |
| "epoch": 1.8681318681318682, | |
| "grad_norm": 11.61323707998988, | |
| "learning_rate": 6.593922928530754e-09, | |
| "logits/chosen": -1.6446201801300049, | |
| "logits/rejected": -1.648411750793457, | |
| "logps/chosen": -701.7564697265625, | |
| "logps/rejected": -1085.4237060546875, | |
| "loss": 0.2141, | |
| "rewards/accuracies": 0.9125000238418579, | |
| "rewards/chosen": -2.927586317062378, | |
| "rewards/margins": 3.9655041694641113, | |
| "rewards/rejected": -6.893090724945068, | |
| "step": 1530 | |
| }, | |
| { | |
| "epoch": 1.8803418803418803, | |
| "grad_norm": 10.971983293596454, | |
| "learning_rate": 5.433597304488113e-09, | |
| "logits/chosen": -1.602278709411621, | |
| "logits/rejected": -1.5537140369415283, | |
| "logps/chosen": -681.44677734375, | |
| "logps/rejected": -1042.9771728515625, | |
| "loss": 0.2152, | |
| "rewards/accuracies": 0.934374988079071, | |
| "rewards/chosen": -2.83951997756958, | |
| "rewards/margins": 3.8127970695495605, | |
| "rewards/rejected": -6.652318000793457, | |
| "step": 1540 | |
| }, | |
| { | |
| "epoch": 1.8925518925518925, | |
| "grad_norm": 9.023723155797857, | |
| "learning_rate": 4.384364281105973e-09, | |
| "logits/chosen": -1.5327403545379639, | |
| "logits/rejected": -1.5472204685211182, | |
| "logps/chosen": -680.0806274414062, | |
| "logps/rejected": -1091.057861328125, | |
| "loss": 0.2025, | |
| "rewards/accuracies": 0.90625, | |
| "rewards/chosen": -2.8416030406951904, | |
| "rewards/margins": 4.13409948348999, | |
| "rewards/rejected": -6.97570276260376, | |
| "step": 1550 | |
| }, | |
| { | |
| "epoch": 1.9047619047619047, | |
| "grad_norm": 9.01098707409564, | |
| "learning_rate": 3.4467004652442842e-09, | |
| "logits/chosen": -1.4875050783157349, | |
| "logits/rejected": -1.4984132051467896, | |
| "logps/chosen": -679.8426513671875, | |
| "logps/rejected": -1051.27392578125, | |
| "loss": 0.1913, | |
| "rewards/accuracies": 0.925000011920929, | |
| "rewards/chosen": -2.8839166164398193, | |
| "rewards/margins": 3.8126988410949707, | |
| "rewards/rejected": -6.696614742279053, | |
| "step": 1560 | |
| }, | |
| { | |
| "epoch": 1.9169719169719168, | |
| "grad_norm": 9.97369900546846, | |
| "learning_rate": 2.6210317842206565e-09, | |
| "logits/chosen": -1.5761525630950928, | |
| "logits/rejected": -1.5815128087997437, | |
| "logps/chosen": -681.6040649414062, | |
| "logps/rejected": -1054.3304443359375, | |
| "loss": 0.2155, | |
| "rewards/accuracies": 0.875, | |
| "rewards/chosen": -2.8990960121154785, | |
| "rewards/margins": 3.823047637939453, | |
| "rewards/rejected": -6.722143650054932, | |
| "step": 1570 | |
| }, | |
| { | |
| "epoch": 1.9291819291819292, | |
| "grad_norm": 9.735400391893615, | |
| "learning_rate": 1.9077332923353728e-09, | |
| "logits/chosen": -1.554095983505249, | |
| "logits/rejected": -1.505615234375, | |
| "logps/chosen": -667.6380004882812, | |
| "logps/rejected": -1052.5147705078125, | |
| "loss": 0.1958, | |
| "rewards/accuracies": 0.9312499761581421, | |
| "rewards/chosen": -2.7255098819732666, | |
| "rewards/margins": 4.0490617752075195, | |
| "rewards/rejected": -6.774571895599365, | |
| "step": 1580 | |
| }, | |
| { | |
| "epoch": 1.9413919413919414, | |
| "grad_norm": 11.38486846510476, | |
| "learning_rate": 1.307129000505891e-09, | |
| "logits/chosen": -1.550065279006958, | |
| "logits/rejected": -1.5272270441055298, | |
| "logps/chosen": -644.3756103515625, | |
| "logps/rejected": -1003.22314453125, | |
| "loss": 0.2359, | |
| "rewards/accuracies": 0.921875, | |
| "rewards/chosen": -2.641263484954834, | |
| "rewards/margins": 3.7743351459503174, | |
| "rewards/rejected": -6.415598392486572, | |
| "step": 1590 | |
| }, | |
| { | |
| "epoch": 1.9536019536019538, | |
| "grad_norm": 12.537603692123119, | |
| "learning_rate": 8.194917290869907e-10, | |
| "logits/chosen": -1.638336420059204, | |
| "logits/rejected": -1.6147558689117432, | |
| "logps/chosen": -701.1859130859375, | |
| "logps/rejected": -1065.903076171875, | |
| "loss": 0.2184, | |
| "rewards/accuracies": 0.921875, | |
| "rewards/chosen": -2.792646884918213, | |
| "rewards/margins": 3.8054585456848145, | |
| "rewards/rejected": -6.598104953765869, | |
| "step": 1600 | |
| }, | |
| { | |
| "epoch": 1.965811965811966, | |
| "grad_norm": 9.457547390988431, | |
| "learning_rate": 4.450429839439884e-10, | |
| "logits/chosen": -1.52217435836792, | |
| "logits/rejected": -1.5371259450912476, | |
| "logps/chosen": -676.3851318359375, | |
| "logps/rejected": -1045.146728515625, | |
| "loss": 0.2196, | |
| "rewards/accuracies": 0.918749988079071, | |
| "rewards/chosen": -2.9359381198883057, | |
| "rewards/margins": 3.676692247390747, | |
| "rewards/rejected": -6.612630367279053, | |
| "step": 1610 | |
| }, | |
| { | |
| "epoch": 1.978021978021978, | |
| "grad_norm": 9.281764750094489, | |
| "learning_rate": 1.8395285583530652e-10, | |
| "logits/chosen": -1.5917165279388428, | |
| "logits/rejected": -1.591761827468872, | |
| "logps/chosen": -704.8922729492188, | |
| "logps/rejected": -1085.7891845703125, | |
| "loss": 0.2179, | |
| "rewards/accuracies": 0.9281250238418579, | |
| "rewards/chosen": -2.95951509475708, | |
| "rewards/margins": 3.9156622886657715, | |
| "rewards/rejected": -6.87517786026001, | |
| "step": 1620 | |
| }, | |
| { | |
| "epoch": 1.9902319902319903, | |
| "grad_norm": 9.654531338082007, | |
| "learning_rate": 3.63399431498046e-11, | |
| "logits/chosen": -1.483194351196289, | |
| "logits/rejected": -1.4703377485275269, | |
| "logps/chosen": -654.5201416015625, | |
| "logps/rejected": -1018.1467895507812, | |
| "loss": 0.2121, | |
| "rewards/accuracies": 0.925000011920929, | |
| "rewards/chosen": -2.7714340686798096, | |
| "rewards/margins": 3.737165927886963, | |
| "rewards/rejected": -6.50860071182251, | |
| "step": 1630 | |
| }, | |
| { | |
| "epoch": 2.0, | |
| "step": 1638, | |
| "total_flos": 0.0, | |
| "train_loss": 0.2817063624168927, | |
| "train_runtime": 11455.6006, | |
| "train_samples_per_second": 36.602, | |
| "train_steps_per_second": 0.143 | |
| } | |
| ], | |
| "logging_steps": 10, | |
| "max_steps": 1638, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 2, | |
| "save_steps": 100, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": true | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 0.0, | |
| "train_batch_size": 8, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |