| { | |
| "best_global_step": null, | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 2.962962962962963, | |
| "eval_steps": 500, | |
| "global_step": 2000, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "entropy": 2.1715362322330476, | |
| "epoch": 0.07407407407407407, | |
| "grad_norm": 1.0598933696746826, | |
| "learning_rate": 8.032786885245902e-05, | |
| "loss": 2.0077, | |
| "mean_token_accuracy": 0.5798640113323927, | |
| "num_tokens": 338396.0, | |
| "step": 50 | |
| }, | |
| { | |
| "entropy": 0.6553284931182861, | |
| "epoch": 0.14814814814814814, | |
| "grad_norm": 0.3852575719356537, | |
| "learning_rate": 9.990765991730485e-05, | |
| "loss": 0.5993, | |
| "mean_token_accuracy": 0.8612996307015419, | |
| "num_tokens": 677239.0, | |
| "step": 100 | |
| }, | |
| { | |
| "entropy": 0.4204328820481896, | |
| "epoch": 0.2222222222222222, | |
| "grad_norm": 0.2863098680973053, | |
| "learning_rate": 9.950545603782162e-05, | |
| "loss": 0.4172, | |
| "mean_token_accuracy": 0.9052096186578273, | |
| "num_tokens": 1015420.0, | |
| "step": 150 | |
| }, | |
| { | |
| "entropy": 0.3820256270095706, | |
| "epoch": 0.2962962962962963, | |
| "grad_norm": 0.2698630690574646, | |
| "learning_rate": 9.878674879048427e-05, | |
| "loss": 0.383, | |
| "mean_token_accuracy": 0.9129502369463444, | |
| "num_tokens": 1355048.0, | |
| "step": 200 | |
| }, | |
| { | |
| "entropy": 0.37178467601537707, | |
| "epoch": 0.37037037037037035, | |
| "grad_norm": 0.21257849037647247, | |
| "learning_rate": 9.775613308830824e-05, | |
| "loss": 0.37, | |
| "mean_token_accuracy": 0.9151540066301823, | |
| "num_tokens": 1691649.0, | |
| "step": 250 | |
| }, | |
| { | |
| "entropy": 0.36766253888607026, | |
| "epoch": 0.4444444444444444, | |
| "grad_norm": 0.31453582644462585, | |
| "learning_rate": 9.642019796948866e-05, | |
| "loss": 0.3634, | |
| "mean_token_accuracy": 0.9162409096956253, | |
| "num_tokens": 2029267.0, | |
| "step": 300 | |
| }, | |
| { | |
| "entropy": 0.35622088231146337, | |
| "epoch": 0.5185185185185185, | |
| "grad_norm": 0.20113833248615265, | |
| "learning_rate": 9.478748447168449e-05, | |
| "loss": 0.3533, | |
| "mean_token_accuracy": 0.9183417546749115, | |
| "num_tokens": 2369465.0, | |
| "step": 350 | |
| }, | |
| { | |
| "entropy": 0.3494216964393854, | |
| "epoch": 0.5925925925925926, | |
| "grad_norm": 0.20226770639419556, | |
| "learning_rate": 9.28684310265789e-05, | |
| "loss": 0.3492, | |
| "mean_token_accuracy": 0.919527986496687, | |
| "num_tokens": 2707235.0, | |
| "step": 400 | |
| }, | |
| { | |
| "entropy": 0.3433630639687181, | |
| "epoch": 0.6666666666666666, | |
| "grad_norm": 0.1721702516078949, | |
| "learning_rate": 9.067530672382544e-05, | |
| "loss": 0.3453, | |
| "mean_token_accuracy": 0.9204271128773689, | |
| "num_tokens": 3041282.0, | |
| "step": 450 | |
| }, | |
| { | |
| "entropy": 0.3465565311536193, | |
| "epoch": 0.7407407407407407, | |
| "grad_norm": 0.1957043558359146, | |
| "learning_rate": 8.822213287104348e-05, | |
| "loss": 0.3483, | |
| "mean_token_accuracy": 0.9200405342876912, | |
| "num_tokens": 3378157.0, | |
| "step": 500 | |
| }, | |
| { | |
| "entropy": 0.34500927191227676, | |
| "epoch": 0.8148148148148148, | |
| "grad_norm": 0.1682814359664917, | |
| "learning_rate": 8.552459335135381e-05, | |
| "loss": 0.3466, | |
| "mean_token_accuracy": 0.9210821086168289, | |
| "num_tokens": 3717487.0, | |
| "step": 550 | |
| }, | |
| { | |
| "entropy": 0.33874930143356324, | |
| "epoch": 0.8888888888888888, | |
| "grad_norm": 0.1756315678358078, | |
| "learning_rate": 8.259993435156559e-05, | |
| "loss": 0.3406, | |
| "mean_token_accuracy": 0.9231470887362957, | |
| "num_tokens": 4056814.0, | |
| "step": 600 | |
| }, | |
| { | |
| "entropy": 0.3372995613142848, | |
| "epoch": 0.9629629629629629, | |
| "grad_norm": 0.185526043176651, | |
| "learning_rate": 7.946685410208296e-05, | |
| "loss": 0.3378, | |
| "mean_token_accuracy": 0.924960307776928, | |
| "num_tokens": 4398736.0, | |
| "step": 650 | |
| }, | |
| { | |
| "entropy": 0.3282011279836297, | |
| "epoch": 1.037037037037037, | |
| "grad_norm": 0.23889201879501343, | |
| "learning_rate": 7.614538333345735e-05, | |
| "loss": 0.3293, | |
| "mean_token_accuracy": 0.9266741496324539, | |
| "num_tokens": 4737915.0, | |
| "step": 700 | |
| }, | |
| { | |
| "entropy": 0.32703890234231947, | |
| "epoch": 1.1111111111111112, | |
| "grad_norm": 0.21601562201976776, | |
| "learning_rate": 7.265675721386285e-05, | |
| "loss": 0.3275, | |
| "mean_token_accuracy": 0.9279554469883442, | |
| "num_tokens": 5074721.0, | |
| "step": 750 | |
| }, | |
| { | |
| "entropy": 0.3243879374489188, | |
| "epoch": 1.1851851851851851, | |
| "grad_norm": 0.22092069685459137, | |
| "learning_rate": 6.902327958623736e-05, | |
| "loss": 0.3244, | |
| "mean_token_accuracy": 0.9288445146381855, | |
| "num_tokens": 5413515.0, | |
| "step": 800 | |
| }, | |
| { | |
| "entropy": 0.3191816185042262, | |
| "epoch": 1.2592592592592593, | |
| "grad_norm": 0.2003597766160965, | |
| "learning_rate": 6.526818037306228e-05, | |
| "loss": 0.3197, | |
| "mean_token_accuracy": 0.9303379128873348, | |
| "num_tokens": 5751965.0, | |
| "step": 850 | |
| }, | |
| { | |
| "entropy": 0.311707403101027, | |
| "epoch": 1.3333333333333333, | |
| "grad_norm": 0.1664246916770935, | |
| "learning_rate": 6.14154670604355e-05, | |
| "loss": 0.3122, | |
| "mean_token_accuracy": 0.9324613013863563, | |
| "num_tokens": 6087993.0, | |
| "step": 900 | |
| }, | |
| { | |
| "entropy": 0.31203486569225786, | |
| "epoch": 1.4074074074074074, | |
| "grad_norm": 0.25155216455459595, | |
| "learning_rate": 5.7489771210944564e-05, | |
| "loss": 0.3129, | |
| "mean_token_accuracy": 0.932486515045166, | |
| "num_tokens": 6425349.0, | |
| "step": 950 | |
| }, | |
| { | |
| "entropy": 0.31312834514304994, | |
| "epoch": 1.4814814814814814, | |
| "grad_norm": 0.2299806922674179, | |
| "learning_rate": 5.351619098663021e-05, | |
| "loss": 0.3139, | |
| "mean_token_accuracy": 0.9324578355252743, | |
| "num_tokens": 6762060.0, | |
| "step": 1000 | |
| }, | |
| { | |
| "entropy": 0.31229398518800733, | |
| "epoch": 1.5555555555555556, | |
| "grad_norm": 0.21491679549217224, | |
| "learning_rate": 4.952013068883795e-05, | |
| "loss": 0.3123, | |
| "mean_token_accuracy": 0.9330742400884628, | |
| "num_tokens": 7099580.0, | |
| "step": 1050 | |
| }, | |
| { | |
| "entropy": 0.3089119081199169, | |
| "epoch": 1.6296296296296298, | |
| "grad_norm": 0.20928895473480225, | |
| "learning_rate": 4.5527138340828776e-05, | |
| "loss": 0.3083, | |
| "mean_token_accuracy": 0.9346707260608673, | |
| "num_tokens": 7441088.0, | |
| "step": 1100 | |
| }, | |
| { | |
| "entropy": 0.3122195054590702, | |
| "epoch": 1.7037037037037037, | |
| "grad_norm": 0.1754506677389145, | |
| "learning_rate": 4.156274235153189e-05, | |
| "loss": 0.3128, | |
| "mean_token_accuracy": 0.9337523929774761, | |
| "num_tokens": 7781339.0, | |
| "step": 1150 | |
| }, | |
| { | |
| "entropy": 0.3072645129263401, | |
| "epoch": 1.7777777777777777, | |
| "grad_norm": 0.19373080134391785, | |
| "learning_rate": 3.765228830469794e-05, | |
| "loss": 0.3083, | |
| "mean_token_accuracy": 0.9344205930829048, | |
| "num_tokens": 8118559.0, | |
| "step": 1200 | |
| }, | |
| { | |
| "entropy": 0.30596201956272123, | |
| "epoch": 1.8518518518518519, | |
| "grad_norm": 0.21876507997512817, | |
| "learning_rate": 3.3820776916908857e-05, | |
| "loss": 0.3065, | |
| "mean_token_accuracy": 0.935108490884304, | |
| "num_tokens": 8458822.0, | |
| "step": 1250 | |
| }, | |
| { | |
| "entropy": 0.3032321432232857, | |
| "epoch": 1.925925925925926, | |
| "grad_norm": 0.16569602489471436, | |
| "learning_rate": 3.0092704200428058e-05, | |
| "loss": 0.304, | |
| "mean_token_accuracy": 0.9358008193969727, | |
| "num_tokens": 8797573.0, | |
| "step": 1300 | |
| }, | |
| { | |
| "entropy": 0.3116097005084157, | |
| "epoch": 2.0, | |
| "grad_norm": 0.23642410337924957, | |
| "learning_rate": 2.649190485277792e-05, | |
| "loss": 0.312, | |
| "mean_token_accuracy": 0.9340617834031582, | |
| "num_tokens": 9138916.0, | |
| "step": 1350 | |
| }, | |
| { | |
| "entropy": 0.2998246630653739, | |
| "epoch": 2.074074074074074, | |
| "grad_norm": 0.4210748076438904, | |
| "learning_rate": 2.3041399874302905e-05, | |
| "loss": 0.3015, | |
| "mean_token_accuracy": 0.9369703502953053, | |
| "num_tokens": 9476346.0, | |
| "step": 1400 | |
| }, | |
| { | |
| "entropy": 0.29883858568966387, | |
| "epoch": 2.148148148148148, | |
| "grad_norm": 0.2179645299911499, | |
| "learning_rate": 1.976324938794482e-05, | |
| "loss": 0.2995, | |
| "mean_token_accuracy": 0.9364559413492679, | |
| "num_tokens": 9813810.0, | |
| "step": 1450 | |
| }, | |
| { | |
| "entropy": 0.30353500295430424, | |
| "epoch": 2.2222222222222223, | |
| "grad_norm": 0.15429526567459106, | |
| "learning_rate": 1.667841160219835e-05, | |
| "loss": 0.304, | |
| "mean_token_accuracy": 0.9365751321613789, | |
| "num_tokens": 10154937.0, | |
| "step": 1500 | |
| }, | |
| { | |
| "entropy": 0.3042653623223305, | |
| "epoch": 2.2962962962962963, | |
| "grad_norm": 0.42488107085227966, | |
| "learning_rate": 1.3806608818939203e-05, | |
| "loss": 0.3057, | |
| "mean_token_accuracy": 0.9359618930518627, | |
| "num_tokens": 10494104.0, | |
| "step": 1550 | |
| }, | |
| { | |
| "entropy": 0.3081327396631241, | |
| "epoch": 2.3703703703703702, | |
| "grad_norm": 0.2351272851228714, | |
| "learning_rate": 1.1166201342777438e-05, | |
| "loss": 0.3064, | |
| "mean_token_accuracy": 0.9357219311594963, | |
| "num_tokens": 10834738.0, | |
| "step": 1600 | |
| }, | |
| { | |
| "entropy": 0.29908867001533507, | |
| "epoch": 2.4444444444444446, | |
| "grad_norm": 0.18584305047988892, | |
| "learning_rate": 8.774070098071668e-06, | |
| "loss": 0.3003, | |
| "mean_token_accuracy": 0.9364377255737781, | |
| "num_tokens": 11170346.0, | |
| "step": 1650 | |
| }, | |
| { | |
| "entropy": 0.30048872336745264, | |
| "epoch": 2.5185185185185186, | |
| "grad_norm": 0.17059944570064545, | |
| "learning_rate": 6.645508704069003e-06, | |
| "loss": 0.2998, | |
| "mean_token_accuracy": 0.9365158031880856, | |
| "num_tokens": 11508330.0, | |
| "step": 1700 | |
| }, | |
| { | |
| "entropy": 0.3041739664040506, | |
| "epoch": 2.5925925925925926, | |
| "grad_norm": 0.16423071920871735, | |
| "learning_rate": 4.794125698167262e-06, | |
| "loss": 0.3058, | |
| "mean_token_accuracy": 0.9364174953103066, | |
| "num_tokens": 11850254.0, | |
| "step": 1750 | |
| }, | |
| { | |
| "entropy": 0.2920705447718501, | |
| "epoch": 2.6666666666666665, | |
| "grad_norm": 0.19584065675735474, | |
| "learning_rate": 3.231757532415458e-06, | |
| "loss": 0.2943, | |
| "mean_token_accuracy": 0.9384492780268192, | |
| "num_tokens": 12185740.0, | |
| "step": 1800 | |
| }, | |
| { | |
| "entropy": 0.30169622793793677, | |
| "epoch": 2.7407407407407405, | |
| "grad_norm": 0.37878257036209106, | |
| "learning_rate": 1.9683928994924385e-06, | |
| "loss": 0.3011, | |
| "mean_token_accuracy": 0.9368291793763638, | |
| "num_tokens": 12525018.0, | |
| "step": 1850 | |
| }, | |
| { | |
| "entropy": 0.29348658775910735, | |
| "epoch": 2.814814814814815, | |
| "grad_norm": 0.19502298533916473, | |
| "learning_rate": 1.0121088719706296e-06, | |
| "loss": 0.2948, | |
| "mean_token_accuracy": 0.9380346268415451, | |
| "num_tokens": 12860441.0, | |
| "step": 1900 | |
| }, | |
| { | |
| "entropy": 0.3033849265426397, | |
| "epoch": 2.888888888888889, | |
| "grad_norm": 0.16882896423339844, | |
| "learning_rate": 3.6901926314575894e-07, | |
| "loss": 0.3039, | |
| "mean_token_accuracy": 0.936366637647152, | |
| "num_tokens": 13198992.0, | |
| "step": 1950 | |
| }, | |
| { | |
| "entropy": 0.29646733120083807, | |
| "epoch": 2.962962962962963, | |
| "grad_norm": 0.1928730458021164, | |
| "learning_rate": 4.323553957759629e-08, | |
| "loss": 0.2962, | |
| "mean_token_accuracy": 0.9374634748697281, | |
| "num_tokens": 13538141.0, | |
| "step": 2000 | |
| } | |
| ], | |
| "logging_steps": 50, | |
| "max_steps": 2025, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 3, | |
| "save_steps": 500, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": false | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 5.6552428189358285e+17, | |
| "train_batch_size": 1, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |