| { | |
| "best_global_step": null, | |
| "best_metric": null, | |
| "best_model_checkpoint": null, | |
| "epoch": 1.4814814814814814, | |
| "eval_steps": 500, | |
| "global_step": 1000, | |
| "is_hyper_param_search": false, | |
| "is_local_process_zero": true, | |
| "is_world_process_zero": true, | |
| "log_history": [ | |
| { | |
| "entropy": 2.1715362322330476, | |
| "epoch": 0.07407407407407407, | |
| "grad_norm": 1.0598933696746826, | |
| "learning_rate": 8.032786885245902e-05, | |
| "loss": 2.0077, | |
| "mean_token_accuracy": 0.5798640113323927, | |
| "num_tokens": 338396.0, | |
| "step": 50 | |
| }, | |
| { | |
| "entropy": 0.6553284931182861, | |
| "epoch": 0.14814814814814814, | |
| "grad_norm": 0.3852575719356537, | |
| "learning_rate": 9.990765991730485e-05, | |
| "loss": 0.5993, | |
| "mean_token_accuracy": 0.8612996307015419, | |
| "num_tokens": 677239.0, | |
| "step": 100 | |
| }, | |
| { | |
| "entropy": 0.4204328820481896, | |
| "epoch": 0.2222222222222222, | |
| "grad_norm": 0.2863098680973053, | |
| "learning_rate": 9.950545603782162e-05, | |
| "loss": 0.4172, | |
| "mean_token_accuracy": 0.9052096186578273, | |
| "num_tokens": 1015420.0, | |
| "step": 150 | |
| }, | |
| { | |
| "entropy": 0.3820256270095706, | |
| "epoch": 0.2962962962962963, | |
| "grad_norm": 0.2698630690574646, | |
| "learning_rate": 9.878674879048427e-05, | |
| "loss": 0.383, | |
| "mean_token_accuracy": 0.9129502369463444, | |
| "num_tokens": 1355048.0, | |
| "step": 200 | |
| }, | |
| { | |
| "entropy": 0.37178467601537707, | |
| "epoch": 0.37037037037037035, | |
| "grad_norm": 0.21257849037647247, | |
| "learning_rate": 9.775613308830824e-05, | |
| "loss": 0.37, | |
| "mean_token_accuracy": 0.9151540066301823, | |
| "num_tokens": 1691649.0, | |
| "step": 250 | |
| }, | |
| { | |
| "entropy": 0.36766253888607026, | |
| "epoch": 0.4444444444444444, | |
| "grad_norm": 0.31453582644462585, | |
| "learning_rate": 9.642019796948866e-05, | |
| "loss": 0.3634, | |
| "mean_token_accuracy": 0.9162409096956253, | |
| "num_tokens": 2029267.0, | |
| "step": 300 | |
| }, | |
| { | |
| "entropy": 0.35622088231146337, | |
| "epoch": 0.5185185185185185, | |
| "grad_norm": 0.20113833248615265, | |
| "learning_rate": 9.478748447168449e-05, | |
| "loss": 0.3533, | |
| "mean_token_accuracy": 0.9183417546749115, | |
| "num_tokens": 2369465.0, | |
| "step": 350 | |
| }, | |
| { | |
| "entropy": 0.3494216964393854, | |
| "epoch": 0.5925925925925926, | |
| "grad_norm": 0.20226770639419556, | |
| "learning_rate": 9.28684310265789e-05, | |
| "loss": 0.3492, | |
| "mean_token_accuracy": 0.919527986496687, | |
| "num_tokens": 2707235.0, | |
| "step": 400 | |
| }, | |
| { | |
| "entropy": 0.3433630639687181, | |
| "epoch": 0.6666666666666666, | |
| "grad_norm": 0.1721702516078949, | |
| "learning_rate": 9.067530672382544e-05, | |
| "loss": 0.3453, | |
| "mean_token_accuracy": 0.9204271128773689, | |
| "num_tokens": 3041282.0, | |
| "step": 450 | |
| }, | |
| { | |
| "entropy": 0.3465565311536193, | |
| "epoch": 0.7407407407407407, | |
| "grad_norm": 0.1957043558359146, | |
| "learning_rate": 8.822213287104348e-05, | |
| "loss": 0.3483, | |
| "mean_token_accuracy": 0.9200405342876912, | |
| "num_tokens": 3378157.0, | |
| "step": 500 | |
| }, | |
| { | |
| "entropy": 0.34500927191227676, | |
| "epoch": 0.8148148148148148, | |
| "grad_norm": 0.1682814359664917, | |
| "learning_rate": 8.552459335135381e-05, | |
| "loss": 0.3466, | |
| "mean_token_accuracy": 0.9210821086168289, | |
| "num_tokens": 3717487.0, | |
| "step": 550 | |
| }, | |
| { | |
| "entropy": 0.33874930143356324, | |
| "epoch": 0.8888888888888888, | |
| "grad_norm": 0.1756315678358078, | |
| "learning_rate": 8.259993435156559e-05, | |
| "loss": 0.3406, | |
| "mean_token_accuracy": 0.9231470887362957, | |
| "num_tokens": 4056814.0, | |
| "step": 600 | |
| }, | |
| { | |
| "entropy": 0.3372995613142848, | |
| "epoch": 0.9629629629629629, | |
| "grad_norm": 0.185526043176651, | |
| "learning_rate": 7.946685410208296e-05, | |
| "loss": 0.3378, | |
| "mean_token_accuracy": 0.924960307776928, | |
| "num_tokens": 4398736.0, | |
| "step": 650 | |
| }, | |
| { | |
| "entropy": 0.3282011279836297, | |
| "epoch": 1.037037037037037, | |
| "grad_norm": 0.23889201879501343, | |
| "learning_rate": 7.614538333345735e-05, | |
| "loss": 0.3293, | |
| "mean_token_accuracy": 0.9266741496324539, | |
| "num_tokens": 4737915.0, | |
| "step": 700 | |
| }, | |
| { | |
| "entropy": 0.32703890234231947, | |
| "epoch": 1.1111111111111112, | |
| "grad_norm": 0.21601562201976776, | |
| "learning_rate": 7.265675721386285e-05, | |
| "loss": 0.3275, | |
| "mean_token_accuracy": 0.9279554469883442, | |
| "num_tokens": 5074721.0, | |
| "step": 750 | |
| }, | |
| { | |
| "entropy": 0.3243879374489188, | |
| "epoch": 1.1851851851851851, | |
| "grad_norm": 0.22092069685459137, | |
| "learning_rate": 6.902327958623736e-05, | |
| "loss": 0.3244, | |
| "mean_token_accuracy": 0.9288445146381855, | |
| "num_tokens": 5413515.0, | |
| "step": 800 | |
| }, | |
| { | |
| "entropy": 0.3191816185042262, | |
| "epoch": 1.2592592592592593, | |
| "grad_norm": 0.2003597766160965, | |
| "learning_rate": 6.526818037306228e-05, | |
| "loss": 0.3197, | |
| "mean_token_accuracy": 0.9303379128873348, | |
| "num_tokens": 5751965.0, | |
| "step": 850 | |
| }, | |
| { | |
| "entropy": 0.311707403101027, | |
| "epoch": 1.3333333333333333, | |
| "grad_norm": 0.1664246916770935, | |
| "learning_rate": 6.14154670604355e-05, | |
| "loss": 0.3122, | |
| "mean_token_accuracy": 0.9324613013863563, | |
| "num_tokens": 6087993.0, | |
| "step": 900 | |
| }, | |
| { | |
| "entropy": 0.31203486569225786, | |
| "epoch": 1.4074074074074074, | |
| "grad_norm": 0.25155216455459595, | |
| "learning_rate": 5.7489771210944564e-05, | |
| "loss": 0.3129, | |
| "mean_token_accuracy": 0.932486515045166, | |
| "num_tokens": 6425349.0, | |
| "step": 950 | |
| }, | |
| { | |
| "entropy": 0.31312834514304994, | |
| "epoch": 1.4814814814814814, | |
| "grad_norm": 0.2299806922674179, | |
| "learning_rate": 5.351619098663021e-05, | |
| "loss": 0.3139, | |
| "mean_token_accuracy": 0.9324578355252743, | |
| "num_tokens": 6762060.0, | |
| "step": 1000 | |
| } | |
| ], | |
| "logging_steps": 50, | |
| "max_steps": 2025, | |
| "num_input_tokens_seen": 0, | |
| "num_train_epochs": 3, | |
| "save_steps": 500, | |
| "stateful_callbacks": { | |
| "TrainerControl": { | |
| "args": { | |
| "should_epoch_stop": false, | |
| "should_evaluate": false, | |
| "should_log": false, | |
| "should_save": true, | |
| "should_training_stop": false | |
| }, | |
| "attributes": {} | |
| } | |
| }, | |
| "total_flos": 2.8246929365127168e+17, | |
| "train_batch_size": 1, | |
| "trial_name": null, | |
| "trial_params": null | |
| } | |