WinstonHu commited on
Commit
654a9af
·
verified ·
1 Parent(s): 78b38ed

Upload folder stage2d to multi_stage2_run_stage1_token_merging/stage2d

Browse files
Files changed (27) hide show
  1. multi_stage2_run_stage1_token_merging/stage2d/20250923_095348/20250923_095348.log +0 -0
  2. multi_stage2_run_stage1_token_merging/stage2d/20250923_095348/vis_data/20250923_095348.json +0 -0
  3. multi_stage2_run_stage1_token_merging/stage2d/20250923_095348/vis_data/config.py +258 -0
  4. multi_stage2_run_stage1_token_merging/stage2d/20250923_095348/vis_data/eval_outputs_iter_4095.txt +24 -0
  5. multi_stage2_run_stage1_token_merging/stage2d/20250923_095348/vis_data/eval_outputs_iter_5617.txt +24 -0
  6. multi_stage2_run_stage1_token_merging/stage2d/20250923_095348/vis_data/scalars.json +0 -0
  7. multi_stage2_run_stage1_token_merging/stage2d/iter_4096.pth/bf16_zero_pp_rank_0_mp_rank_00_optim_states.pt +3 -0
  8. multi_stage2_run_stage1_token_merging/stage2d/iter_4096.pth/bf16_zero_pp_rank_1_mp_rank_00_optim_states.pt +3 -0
  9. multi_stage2_run_stage1_token_merging/stage2d/iter_4096.pth/bf16_zero_pp_rank_2_mp_rank_00_optim_states.pt +3 -0
  10. multi_stage2_run_stage1_token_merging/stage2d/iter_4096.pth/bf16_zero_pp_rank_3_mp_rank_00_optim_states.pt +3 -0
  11. multi_stage2_run_stage1_token_merging/stage2d/iter_4096.pth/bf16_zero_pp_rank_4_mp_rank_00_optim_states.pt +3 -0
  12. multi_stage2_run_stage1_token_merging/stage2d/iter_4096.pth/bf16_zero_pp_rank_5_mp_rank_00_optim_states.pt +3 -0
  13. multi_stage2_run_stage1_token_merging/stage2d/iter_4096.pth/bf16_zero_pp_rank_6_mp_rank_00_optim_states.pt +3 -0
  14. multi_stage2_run_stage1_token_merging/stage2d/iter_4096.pth/bf16_zero_pp_rank_7_mp_rank_00_optim_states.pt +3 -0
  15. multi_stage2_run_stage1_token_merging/stage2d/iter_4096.pth/mp_rank_00_model_states.pt +3 -0
  16. multi_stage2_run_stage1_token_merging/stage2d/iter_5618.pth/bf16_zero_pp_rank_0_mp_rank_00_optim_states.pt +3 -0
  17. multi_stage2_run_stage1_token_merging/stage2d/iter_5618.pth/bf16_zero_pp_rank_1_mp_rank_00_optim_states.pt +3 -0
  18. multi_stage2_run_stage1_token_merging/stage2d/iter_5618.pth/bf16_zero_pp_rank_2_mp_rank_00_optim_states.pt +3 -0
  19. multi_stage2_run_stage1_token_merging/stage2d/iter_5618.pth/bf16_zero_pp_rank_3_mp_rank_00_optim_states.pt +3 -0
  20. multi_stage2_run_stage1_token_merging/stage2d/iter_5618.pth/bf16_zero_pp_rank_4_mp_rank_00_optim_states.pt +3 -0
  21. multi_stage2_run_stage1_token_merging/stage2d/iter_5618.pth/bf16_zero_pp_rank_5_mp_rank_00_optim_states.pt +3 -0
  22. multi_stage2_run_stage1_token_merging/stage2d/iter_5618.pth/bf16_zero_pp_rank_6_mp_rank_00_optim_states.pt +3 -0
  23. multi_stage2_run_stage1_token_merging/stage2d/iter_5618.pth/bf16_zero_pp_rank_7_mp_rank_00_optim_states.pt +3 -0
  24. multi_stage2_run_stage1_token_merging/stage2d/iter_5618.pth/mp_rank_00_model_states.pt +3 -0
  25. multi_stage2_run_stage1_token_merging/stage2d/last_checkpoint +1 -0
  26. multi_stage2_run_stage1_token_merging/stage2d/temp_config_stage_2d.py +258 -0
  27. multi_stage2_run_stage1_token_merging/stage2d/zero_to_fp32.py +760 -0
multi_stage2_run_stage1_token_merging/stage2d/20250923_095348/20250923_095348.log ADDED
The diff for this file is too large to render. See raw diff
 
multi_stage2_run_stage1_token_merging/stage2d/20250923_095348/vis_data/20250923_095348.json ADDED
The diff for this file is too large to render. See raw diff
 
multi_stage2_run_stage1_token_merging/stage2d/20250923_095348/vis_data/config.py ADDED
@@ -0,0 +1,258 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ SYSTEM = ''
2
+ accumulative_counts = 64
3
+ batch_size = 1
4
+ betas = (
5
+ 0.9,
6
+ 0.999,
7
+ )
8
+ bnb = dict(
9
+ bnb_4bit_compute_dtype='torch.bfloat16',
10
+ bnb_4bit_quant_type='nf4',
11
+ bnb_4bit_use_double_quant=True,
12
+ llm_int8_has_fp16_weight=False,
13
+ llm_int8_threshold=6.0,
14
+ load_in_4bit=True,
15
+ load_in_8bit=False,
16
+ type='transformers.BitsAndBytesConfig')
17
+ custom_hooks = [
18
+ dict(
19
+ tokenizer=dict(
20
+ padding_side='right',
21
+ pretrained_model_name_or_path='Qwen/Qwen2.5-7B-Instruct',
22
+ trust_remote_code=True,
23
+ type='transformers.AutoTokenizer.from_pretrained'),
24
+ type='xtuner.engine.hooks.DatasetInfoHook'),
25
+ dict(
26
+ evaluation_images=[
27
+ '/mnt/bn/xudong-va/meilong/datasets/Token_Compression/skcm_224x224_b20_t15/h5_files/TCGA-EB-A5UN-06Z-00-DX1.h5',
28
+ '/mnt/bn/xudong-va/meilong/datasets/Token_Compression/skcm_224x224_b20_t15/h5_files/TCGA-EE-A3AG-01Z-00-DX1.h5',
29
+ '/mnt/bn/xudong-va/meilong/datasets/Token_Compression/lusc_224x224_b20_t15/h5_files/TCGA-NC-A5HP-01Z-00-DX1.h5',
30
+ ],
31
+ evaluation_inputs=[
32
+ 'Are the tumor cells organized in a lobulated pattern within the slide?',
33
+ 'Craft a comprehensive outline capturing the key findings of the pathology report based on the whole slide image.',
34
+ 'Based on the observed features, what do you think is the correct histological classification of the tumor? A) Poorly differentiated keratinizing squamous cell carcinoma B) Moderately differentiated squamous cell carcinoma C) Well-differentiated squamous cell carcinoma D) Adenocarcinoma',
35
+ ],
36
+ every_n_iters=512,
37
+ prompt_template='xtuner.utils.PROMPT_TEMPLATE.qwen_chat',
38
+ system='',
39
+ tokenizer=dict(
40
+ padding_side='right',
41
+ pretrained_model_name_or_path='Qwen/Qwen2.5-7B-Instruct',
42
+ trust_remote_code=True,
43
+ type='transformers.AutoTokenizer.from_pretrained'),
44
+ type='xtuner.engine.hooks.EvaluateChatHook'),
45
+ dict(type='xtuner.engine.hooks.ThroughputHook'),
46
+ ]
47
+ data_path = '/mnt/bn/yuxuanwang/meilong/code/projects/efficient_foundation_wsi_llava/merged_dataset/stage2_tasks_plus_report.json'
48
+ dataloader_num_workers = 10
49
+ default_hooks = dict(
50
+ checkpoint=dict(
51
+ by_epoch=False,
52
+ interval=4096,
53
+ max_keep_ckpts=8,
54
+ type='mmengine.hooks.CheckpointHook'),
55
+ logger=dict(
56
+ interval=10,
57
+ log_metric_by_epoch=False,
58
+ type='mmengine.hooks.LoggerHook'),
59
+ param_scheduler=dict(type='mmengine.hooks.ParamSchedulerHook'),
60
+ sampler_seed=dict(type='mmengine.hooks.DistSamplerSeedHook'),
61
+ timer=dict(type='mmengine.hooks.IterTimerHook'))
62
+ env_cfg = dict(
63
+ cudnn_benchmark=False,
64
+ dist_cfg=dict(backend='nccl'),
65
+ mp_cfg=dict(mp_start_method='fork', opencv_num_threads=0))
66
+ evaluation_freq = 512
67
+ evaluation_images = [
68
+ '/mnt/bn/xudong-va/meilong/datasets/Token_Compression/skcm_224x224_b20_t15/h5_files/TCGA-EB-A5UN-06Z-00-DX1.h5',
69
+ '/mnt/bn/xudong-va/meilong/datasets/Token_Compression/skcm_224x224_b20_t15/h5_files/TCGA-EE-A3AG-01Z-00-DX1.h5',
70
+ '/mnt/bn/xudong-va/meilong/datasets/Token_Compression/lusc_224x224_b20_t15/h5_files/TCGA-NC-A5HP-01Z-00-DX1.h5',
71
+ ]
72
+ evaluation_inputs = [
73
+ 'Are the tumor cells organized in a lobulated pattern within the slide?',
74
+ 'Craft a comprehensive outline capturing the key findings of the pathology report based on the whole slide image.',
75
+ 'Based on the observed features, what do you think is the correct histological classification of the tumor? A) Poorly differentiated keratinizing squamous cell carcinoma B) Moderately differentiated squamous cell carcinoma C) Well-differentiated squamous cell carcinoma D) Adenocarcinoma',
76
+ ]
77
+ image_path_list = None
78
+ launcher = 'pytorch'
79
+ llava_dataset = dict(
80
+ data_path=
81
+ '/mnt/bn/yuxuanwang/meilong/code/projects/efficient_foundation_wsi_llava/merged_dataset/stage2_tasks_plus_report.json',
82
+ dataset_map_fn='xtuner.dataset.map_fns.llava_map_fn',
83
+ identifier='_224x224_b20_t15',
84
+ image_feature_prefix='/mnt/bn/xudong-va/meilong/datasets/Token_Compression',
85
+ image_feature_suffix='.h5',
86
+ image_folder='',
87
+ image_path_list=None,
88
+ max_length=15836,
89
+ pad_image_to_square=False,
90
+ per_image_length=10240,
91
+ sample_num=10240,
92
+ sample_strategy='linspace',
93
+ template_map_fn=dict(
94
+ template='xtuner.utils.PROMPT_TEMPLATE.qwen_chat',
95
+ type='xtuner.dataset.map_fns.template_map_fn_factory'),
96
+ tokenizer=dict(
97
+ padding_side='right',
98
+ pretrained_model_name_or_path='Qwen/Qwen2.5-7B-Instruct',
99
+ trust_remote_code=True,
100
+ type='transformers.AutoTokenizer.from_pretrained'),
101
+ type='xtuner.dataset.LLaVADataset',
102
+ unwanted_prefix_csv=
103
+ '/mnt/bn/yuxuanwang/meilong/code/projects/efficient_foundation_wsi_llava/merged_dataset/missing_slides3.csv'
104
+ )
105
+ llm_lora = dict(
106
+ bias='none',
107
+ lora_alpha=256,
108
+ lora_dropout=0.05,
109
+ r=128,
110
+ task_type='CAUSAL_LM',
111
+ type='peft.LoraConfig')
112
+ llm_name_or_path = 'Qwen/Qwen2.5-7B-Instruct'
113
+ load_from = '/mnt/bn/yuxuanwang/meilong/code/projects/efficient_foundation_wsi_llava/curriculum_training/models/outputs/multi_stage2_run_stage1_token_merging/stage2c/iter_2476.pth'
114
+ log_level = 'INFO'
115
+ log_processor = dict(
116
+ by_epoch=False,
117
+ mean_pattern='.*(loss|time|data_time|grad_norm|tflops).*',
118
+ window_size=1)
119
+ lr = 5e-06
120
+ max_epochs = 2
121
+ max_length = 15836
122
+ max_norm = 1
123
+ model = dict(
124
+ enable_token_merge=True,
125
+ freeze_llm=True,
126
+ freeze_mm_in_stage2=False,
127
+ llm=dict(
128
+ attn_implementation='flash_attention_2',
129
+ pretrained_model_name_or_path='Qwen/Qwen2.5-7B-Instruct',
130
+ quantization_config=dict(
131
+ bnb_4bit_compute_dtype='torch.bfloat16',
132
+ bnb_4bit_quant_type='nf4',
133
+ bnb_4bit_use_double_quant=True,
134
+ llm_int8_has_fp16_weight=False,
135
+ llm_int8_threshold=6.0,
136
+ load_in_4bit=True,
137
+ load_in_8bit=False,
138
+ type='transformers.BitsAndBytesConfig'),
139
+ torch_dtype='torch.bfloat16',
140
+ trust_remote_code=True,
141
+ type='transformers.AutoModelForCausalLM.from_pretrained'),
142
+ llm_lora=dict(
143
+ bias='none',
144
+ lora_alpha=256,
145
+ lora_dropout=0.05,
146
+ r=128,
147
+ task_type='CAUSAL_LM',
148
+ type='peft.LoraConfig'),
149
+ max_position_embeddings=None,
150
+ projector_pth=
151
+ '/mnt/bn/yuxuanwang/meilong/code/projects/efficient_foundation_wsi_llava/checkpoints/stage_1/token_merging/stage1_qwen25_token_merging/projector/projector.safetensors',
152
+ token_merge_pth=
153
+ '/mnt/bn/yuxuanwang/meilong/code/projects/efficient_foundation_wsi_llava/checkpoints/stage_1/token_merging/stage1_qwen25_token_merging/token_merger/merger.safetensors',
154
+ train_stage='2',
155
+ type='xtuner.model.llava_no_longnet.LLaVAModel',
156
+ use_perceiver_resampler=False)
157
+ optim_type = 'torch.optim.AdamW'
158
+ optim_wrapper = dict(
159
+ optimizer=dict(
160
+ betas=(
161
+ 0.9,
162
+ 0.999,
163
+ ),
164
+ lr=2e-05,
165
+ type='torch.optim.AdamW',
166
+ weight_decay=0.01),
167
+ paramwise_cfg=dict(
168
+ bias_decay_mult=0.0,
169
+ norm_decay_mult=0.0,
170
+ paramwise_cfg=dict(
171
+ custom_keys=dict({'^projector\.': dict(lr_mult=1.0)}))),
172
+ type='DeepSpeedOptimWrapper')
173
+ param_scheduler = [
174
+ dict(
175
+ begin=0,
176
+ by_epoch=True,
177
+ convert_to_iter_based=True,
178
+ end=0.1,
179
+ start_factor=0.01,
180
+ type='mmengine.optim.LinearLR'),
181
+ dict(
182
+ begin=0.1,
183
+ by_epoch=True,
184
+ convert_to_iter_based=True,
185
+ end=2,
186
+ eta_min=0.0,
187
+ type='mmengine.optim.CosineAnnealingLR'),
188
+ ]
189
+ per_image_length = 10240
190
+ prompt_template = 'xtuner.utils.PROMPT_TEMPLATE.qwen_chat'
191
+ randomness = dict(deterministic=False, seed=None)
192
+ resume = False
193
+ runner_type = 'FlexibleRunner'
194
+ sample_type = 'wsi'
195
+ save_steps = 4096
196
+ save_total_limit = 8
197
+ seed = 42
198
+ strategy = dict(
199
+ config=dict(
200
+ bf16=dict(enabled=True),
201
+ fp16=dict(enabled=False, initial_scale_power=16),
202
+ gradient_accumulation_steps='auto',
203
+ gradient_clipping='auto',
204
+ train_micro_batch_size_per_gpu='auto',
205
+ zero_allow_untested_optimizer=True,
206
+ zero_force_ds_cpu_optimizer=False,
207
+ zero_optimization=dict(overlap_comm=False, stage=2)),
208
+ exclude_frozen_parameters=True,
209
+ gradient_accumulation_steps=64,
210
+ gradient_clipping=1,
211
+ sequence_parallel_size=1,
212
+ train_micro_batch_size_per_gpu=1,
213
+ type='xtuner.engine.DeepSpeedStrategy')
214
+ tokenizer = dict(
215
+ padding_side='right',
216
+ pretrained_model_name_or_path='Qwen/Qwen2.5-7B-Instruct',
217
+ trust_remote_code=True,
218
+ type='transformers.AutoTokenizer.from_pretrained')
219
+ train_cfg = dict(max_epochs=1, type='xtuner.engine.runner.TrainLoop')
220
+ train_dataloader = dict(
221
+ batch_size=1,
222
+ collate_fn=dict(type='xtuner.dataset.collate_fns.default_collate_fn'),
223
+ dataset=dict(
224
+ data_path=
225
+ '/mnt/bn/yuxuanwang/meilong/code/projects/efficient_foundation_wsi_llava/curriculum_training/merged_dataset_curriculum/stage2d_final_mix.json',
226
+ dataset_map_fn='xtuner.dataset.map_fns.llava_map_fn',
227
+ identifier='_224x224_b20_t15',
228
+ image_feature_prefix=
229
+ '/mnt/bn/xudong-va/meilong/datasets/Token_Compression',
230
+ image_feature_suffix='.h5',
231
+ image_folder='',
232
+ image_path_list=None,
233
+ max_length=15836,
234
+ pad_image_to_square=False,
235
+ per_image_length=10240,
236
+ sample_num=10240,
237
+ sample_strategy='linspace',
238
+ template_map_fn=dict(
239
+ template='xtuner.utils.PROMPT_TEMPLATE.qwen_chat',
240
+ type='xtuner.dataset.map_fns.template_map_fn_factory'),
241
+ tokenizer=dict(
242
+ padding_side='right',
243
+ pretrained_model_name_or_path='Qwen/Qwen2.5-7B-Instruct',
244
+ trust_remote_code=True,
245
+ type='transformers.AutoTokenizer.from_pretrained'),
246
+ type='xtuner.dataset.LLaVADataset',
247
+ unwanted_prefix_csv=
248
+ '/mnt/bn/yuxuanwang/meilong/code/projects/efficient_foundation_wsi_llava/merged_dataset/missing_slides3.csv'
249
+ ),
250
+ num_workers=10,
251
+ persistent_workers=True,
252
+ pin_memory=True,
253
+ prefetch_factor=4,
254
+ sampler=dict(shuffle=True, type='mmengine.dataset.DefaultSampler'))
255
+ visualizer = None
256
+ warmup_ratio = 0.05
257
+ weight_decay = 0.01
258
+ work_dir = '/mnt/bn/yuxuanwang/meilong/code/projects/efficient_foundation_wsi_llava/curriculum_training/models/outputs/multi_stage2_run_stage1_token_merging/stage2d'
multi_stage2_run_stage1_token_merging/stage2d/20250923_095348/vis_data/eval_outputs_iter_4095.txt ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Eval output 1:
2
+ <|im_start|>user
3
+ <image>
4
+ Are the tumor cells organized in a lobulated pattern within the slide?<|im_end|>
5
+ <|im_start|>assistant
6
+ Yes, the tumor cells are organized in a lobulated pattern. This arrangement is characterized by distinct, rounded masses of cells that are separated by fibrous septa, creating a lobular appearance.<|im_end|>
7
+
8
+
9
+ Eval output 2:
10
+ <|im_start|>user
11
+ <image>
12
+ Craft a comprehensive outline capturing the key findings of the pathology report based on the whole slide image.<|im_end|>
13
+ <|im_start|>assistant
14
+ Microscopic observation of the pathology slide reveals a proliferation of epithelial cells forming glandular structures, indicative of adenocarcinoma. The tumor cells exhibit moderate differentiation, with some areas showing more aggressive features. There is evidence of vascular invasion, with tumor cells present within blood vessels. The tumor extends into the surrounding stroma, but there is no lymphovascular space invasion noted. The adjacent lung tissue shows chronic inflammation, and there is no evidence of pleural invasion.<|im_end|>
15
+
16
+
17
+ Eval output 3:
18
+ <|im_start|>user
19
+ <image>
20
+ Based on the observed features, what do you think is the correct histological classification of the tumor? A) Poorly differentiated keratinizing squamous cell carcinoma B) Moderately differentiated squamous cell carcinoma C) Well-differentiated squamous cell carcinoma D) Adenocarcinoma<|im_end|>
21
+ <|im_start|>assistant
22
+ B) Moderately differentiated squamous cell carcinoma<|im_end|>
23
+
24
+
multi_stage2_run_stage1_token_merging/stage2d/20250923_095348/vis_data/eval_outputs_iter_5617.txt ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Eval output 1:
2
+ <|im_start|>user
3
+ <image>
4
+ Are the tumor cells organized in a lobulated pattern within the slide?<|im_end|>
5
+ <|im_start|>assistant
6
+ Yes, the tumor cells are organized in a lobulated pattern. This arrangement is characterized by distinct, rounded masses of cells that are separated by fibrous septa, creating a lobulated appearance.<|im_end|>
7
+
8
+
9
+ Eval output 2:
10
+ <|im_start|>user
11
+ <image>
12
+ Craft a comprehensive outline capturing the key findings of the pathology report based on the whole slide image.<|im_end|>
13
+ <|im_start|>assistant
14
+ Microscopic observation of the pathology slide reveals a moderately differentiated squamous cell carcinoma. The tumor cells exhibit a keratinizing pattern, with some areas showing intercellular bridges and keratin pearls. The nuclear features include moderate variability in size and shape, with occasional prominent nucleoli. The mitotic rate is moderate, with approximately 10 mitoses per 10 high-power fields. There is no evidence of vascular invasion, and the tumor is confined to the bladder wall without involvement of the perivesical fat. The adjacent urothelium is hyperplastic, and there is no evidence of carcinoma in situ.<|im_end|>
15
+
16
+
17
+ Eval output 3:
18
+ <|im_start|>user
19
+ <image>
20
+ Based on the observed features, what do you think is the correct histological classification of the tumor? A) Poorly differentiated keratinizing squamous cell carcinoma B) Moderately differentiated squamous cell carcinoma C) Well-differentiated squamous cell carcinoma D) Adenocarcinoma<|im_end|>
21
+ <|im_start|>assistant
22
+ B) Moderately differentiated squamous cell carcinoma<|im_end|>
23
+
24
+
multi_stage2_run_stage1_token_merging/stage2d/20250923_095348/vis_data/scalars.json ADDED
The diff for this file is too large to render. See raw diff
 
multi_stage2_run_stage1_token_merging/stage2d/iter_4096.pth/bf16_zero_pp_rank_0_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fbd80cd4a6c0084c5c41464cf0fd44d981163e6dd97519b98729244359253d3c
3
+ size 515355354
multi_stage2_run_stage1_token_merging/stage2d/iter_4096.pth/bf16_zero_pp_rank_1_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9ade331502f5990df64c4dd7dbfa877ceb47161b4a1bdeea1404cc561bcba8a9
3
+ size 515355354
multi_stage2_run_stage1_token_merging/stage2d/iter_4096.pth/bf16_zero_pp_rank_2_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:846e497e3eeda015a821490479eb8aa1a55f6e21bb471da1e3145fa06748bdd1
3
+ size 515355354
multi_stage2_run_stage1_token_merging/stage2d/iter_4096.pth/bf16_zero_pp_rank_3_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fecc889504a61f606fdf4674634445869bd10fe22c15310f28a1d9950801265b
3
+ size 515355354
multi_stage2_run_stage1_token_merging/stage2d/iter_4096.pth/bf16_zero_pp_rank_4_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ff9fcb26c26bc88d895491eb8ba1ec7f515ed9f926729a30c6a2d1a87e31a769
3
+ size 515355354
multi_stage2_run_stage1_token_merging/stage2d/iter_4096.pth/bf16_zero_pp_rank_5_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:446303108b0096735a86273c697fa22fce3c0e4ea1c0570ac4e2e2f1e5570eeb
3
+ size 515355354
multi_stage2_run_stage1_token_merging/stage2d/iter_4096.pth/bf16_zero_pp_rank_6_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:18b887ef146debb5b3143dac2785bb1fef31ea29ac7138cf7893baff748a9981
3
+ size 515355354
multi_stage2_run_stage1_token_merging/stage2d/iter_4096.pth/bf16_zero_pp_rank_7_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0048cfacf5150588c7958b41955d182557964ab0920d74df59d5239f86e83fe7
3
+ size 515355354
multi_stage2_run_stage1_token_merging/stage2d/iter_4096.pth/mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:aac2239bba7df7395e45f6547b5bf6a50608a02caaa5d44eb315a3f3f43f673d
3
+ size 687171552
multi_stage2_run_stage1_token_merging/stage2d/iter_5618.pth/bf16_zero_pp_rank_0_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7a3dbb98a154cbea38883bdc57840a35362a87249a9de83ef89238d79f19b7ed
3
+ size 515355354
multi_stage2_run_stage1_token_merging/stage2d/iter_5618.pth/bf16_zero_pp_rank_1_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2d6fee967de31e75eb8a76ad38fbe0ce9c9cacc4d4d64f184238c6629084c643
3
+ size 515355354
multi_stage2_run_stage1_token_merging/stage2d/iter_5618.pth/bf16_zero_pp_rank_2_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:18de5299f7610f7f4c2c2e9e6f9105b5651a187b1ac52f948e1579adc87c98bb
3
+ size 515355354
multi_stage2_run_stage1_token_merging/stage2d/iter_5618.pth/bf16_zero_pp_rank_3_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f6bedd8a3596ebf712e43c6b2b90e27a62526c909237a8fdac7dabcc5f8f5a5b
3
+ size 515355354
multi_stage2_run_stage1_token_merging/stage2d/iter_5618.pth/bf16_zero_pp_rank_4_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c5340cf8bcdebda38121836fc45f8ccc7c5c6ea986114fddbbed94eb79267bd3
3
+ size 515355354
multi_stage2_run_stage1_token_merging/stage2d/iter_5618.pth/bf16_zero_pp_rank_5_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ab8e1a5cd4934f49c24fa18fd3c2dbcfe4b359914e18f15954913bceaf46c78a
3
+ size 515355354
multi_stage2_run_stage1_token_merging/stage2d/iter_5618.pth/bf16_zero_pp_rank_6_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b5e3c498705e15e126701e8fd0e3acfbdf037a4d4c4d58c6997f6707eaa9a3f3
3
+ size 515355354
multi_stage2_run_stage1_token_merging/stage2d/iter_5618.pth/bf16_zero_pp_rank_7_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f586379cbf731f563ee2c06fef7ea44fce2007cfe4146da1aea99f9627b6d7db
3
+ size 515355354
multi_stage2_run_stage1_token_merging/stage2d/iter_5618.pth/mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:faf2a5c22d82d106f7870839b5f3d12be0c7f18817442d9b6b11698c27a45a2a
3
+ size 687398304
multi_stage2_run_stage1_token_merging/stage2d/last_checkpoint ADDED
@@ -0,0 +1 @@
 
 
1
+ /mnt/bn/yuxuanwang/meilong/code/projects/efficient_foundation_wsi_llava/curriculum_training/models/outputs/multi_stage2_run_stage1_token_merging/stage2d/iter_5618.pth
multi_stage2_run_stage1_token_merging/stage2d/temp_config_stage_2d.py ADDED
@@ -0,0 +1,258 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ SYSTEM = ''
2
+ accumulative_counts = 64
3
+ batch_size = 1
4
+ betas = (
5
+ 0.9,
6
+ 0.999,
7
+ )
8
+ bnb = dict(
9
+ bnb_4bit_compute_dtype='torch.bfloat16',
10
+ bnb_4bit_quant_type='nf4',
11
+ bnb_4bit_use_double_quant=True,
12
+ llm_int8_has_fp16_weight=False,
13
+ llm_int8_threshold=6.0,
14
+ load_in_4bit=True,
15
+ load_in_8bit=False,
16
+ type='transformers.BitsAndBytesConfig')
17
+ custom_hooks = [
18
+ dict(
19
+ tokenizer=dict(
20
+ padding_side='right',
21
+ pretrained_model_name_or_path='Qwen/Qwen2.5-7B-Instruct',
22
+ trust_remote_code=True,
23
+ type='transformers.AutoTokenizer.from_pretrained'),
24
+ type='xtuner.engine.hooks.DatasetInfoHook'),
25
+ dict(
26
+ evaluation_images=[
27
+ '/mnt/bn/xudong-va/meilong/datasets/Token_Compression/skcm_224x224_b20_t15/h5_files/TCGA-EB-A5UN-06Z-00-DX1.h5',
28
+ '/mnt/bn/xudong-va/meilong/datasets/Token_Compression/skcm_224x224_b20_t15/h5_files/TCGA-EE-A3AG-01Z-00-DX1.h5',
29
+ '/mnt/bn/xudong-va/meilong/datasets/Token_Compression/lusc_224x224_b20_t15/h5_files/TCGA-NC-A5HP-01Z-00-DX1.h5',
30
+ ],
31
+ evaluation_inputs=[
32
+ 'Are the tumor cells organized in a lobulated pattern within the slide?',
33
+ 'Craft a comprehensive outline capturing the key findings of the pathology report based on the whole slide image.',
34
+ 'Based on the observed features, what do you think is the correct histological classification of the tumor? A) Poorly differentiated keratinizing squamous cell carcinoma B) Moderately differentiated squamous cell carcinoma C) Well-differentiated squamous cell carcinoma D) Adenocarcinoma',
35
+ ],
36
+ every_n_iters=512,
37
+ prompt_template='xtuner.utils.PROMPT_TEMPLATE.qwen_chat',
38
+ system='',
39
+ tokenizer=dict(
40
+ padding_side='right',
41
+ pretrained_model_name_or_path='Qwen/Qwen2.5-7B-Instruct',
42
+ trust_remote_code=True,
43
+ type='transformers.AutoTokenizer.from_pretrained'),
44
+ type='xtuner.engine.hooks.EvaluateChatHook'),
45
+ dict(type='xtuner.engine.hooks.ThroughputHook'),
46
+ ]
47
+ data_path = '/mnt/bn/yuxuanwang/meilong/code/projects/efficient_foundation_wsi_llava/merged_dataset/stage2_tasks_plus_report.json'
48
+ dataloader_num_workers = 10
49
+ default_hooks = dict(
50
+ checkpoint=dict(
51
+ by_epoch=False,
52
+ interval=4096,
53
+ max_keep_ckpts=8,
54
+ type='mmengine.hooks.CheckpointHook'),
55
+ logger=dict(
56
+ interval=10,
57
+ log_metric_by_epoch=False,
58
+ type='mmengine.hooks.LoggerHook'),
59
+ param_scheduler=dict(type='mmengine.hooks.ParamSchedulerHook'),
60
+ sampler_seed=dict(type='mmengine.hooks.DistSamplerSeedHook'),
61
+ timer=dict(type='mmengine.hooks.IterTimerHook'))
62
+ env_cfg = dict(
63
+ cudnn_benchmark=False,
64
+ dist_cfg=dict(backend='nccl'),
65
+ mp_cfg=dict(mp_start_method='fork', opencv_num_threads=0))
66
+ evaluation_freq = 512
67
+ evaluation_images = [
68
+ '/mnt/bn/xudong-va/meilong/datasets/Token_Compression/skcm_224x224_b20_t15/h5_files/TCGA-EB-A5UN-06Z-00-DX1.h5',
69
+ '/mnt/bn/xudong-va/meilong/datasets/Token_Compression/skcm_224x224_b20_t15/h5_files/TCGA-EE-A3AG-01Z-00-DX1.h5',
70
+ '/mnt/bn/xudong-va/meilong/datasets/Token_Compression/lusc_224x224_b20_t15/h5_files/TCGA-NC-A5HP-01Z-00-DX1.h5',
71
+ ]
72
+ evaluation_inputs = [
73
+ 'Are the tumor cells organized in a lobulated pattern within the slide?',
74
+ 'Craft a comprehensive outline capturing the key findings of the pathology report based on the whole slide image.',
75
+ 'Based on the observed features, what do you think is the correct histological classification of the tumor? A) Poorly differentiated keratinizing squamous cell carcinoma B) Moderately differentiated squamous cell carcinoma C) Well-differentiated squamous cell carcinoma D) Adenocarcinoma',
76
+ ]
77
+ image_path_list = None
78
+ launcher = 'pytorch'
79
+ llava_dataset = dict(
80
+ data_path=
81
+ '/mnt/bn/yuxuanwang/meilong/code/projects/efficient_foundation_wsi_llava/merged_dataset/stage2_tasks_plus_report.json',
82
+ dataset_map_fn='xtuner.dataset.map_fns.llava_map_fn',
83
+ identifier='_224x224_b20_t15',
84
+ image_feature_prefix='/mnt/bn/xudong-va/meilong/datasets/Token_Compression',
85
+ image_feature_suffix='.h5',
86
+ image_folder='',
87
+ image_path_list=None,
88
+ max_length=15836,
89
+ pad_image_to_square=False,
90
+ per_image_length=10240,
91
+ sample_num=10240,
92
+ sample_strategy='linspace',
93
+ template_map_fn=dict(
94
+ template='xtuner.utils.PROMPT_TEMPLATE.qwen_chat',
95
+ type='xtuner.dataset.map_fns.template_map_fn_factory'),
96
+ tokenizer=dict(
97
+ padding_side='right',
98
+ pretrained_model_name_or_path='Qwen/Qwen2.5-7B-Instruct',
99
+ trust_remote_code=True,
100
+ type='transformers.AutoTokenizer.from_pretrained'),
101
+ type='xtuner.dataset.LLaVADataset',
102
+ unwanted_prefix_csv=
103
+ '/mnt/bn/yuxuanwang/meilong/code/projects/efficient_foundation_wsi_llava/merged_dataset/missing_slides3.csv'
104
+ )
105
+ llm_lora = dict(
106
+ bias='none',
107
+ lora_alpha=256,
108
+ lora_dropout=0.05,
109
+ r=128,
110
+ task_type='CAUSAL_LM',
111
+ type='peft.LoraConfig')
112
+ llm_name_or_path = 'Qwen/Qwen2.5-7B-Instruct'
113
+ load_from = '/mnt/bn/yuxuanwang/meilong/code/projects/efficient_foundation_wsi_llava/curriculum_training/models/outputs/multi_stage2_run_stage1_token_merging/stage2c/iter_2476.pth'
114
+ log_level = 'INFO'
115
+ log_processor = dict(
116
+ by_epoch=False,
117
+ mean_pattern='.*(loss|time|data_time|grad_norm|tflops).*',
118
+ window_size=1)
119
+ lr = 5e-06
120
+ max_epochs = 2
121
+ max_length = 15836
122
+ max_norm = 1
123
+ model = dict(
124
+ enable_token_merge=True,
125
+ freeze_llm=True,
126
+ freeze_mm_in_stage2=False,
127
+ llm=dict(
128
+ attn_implementation='flash_attention_2',
129
+ pretrained_model_name_or_path='Qwen/Qwen2.5-7B-Instruct',
130
+ quantization_config=dict(
131
+ bnb_4bit_compute_dtype='torch.bfloat16',
132
+ bnb_4bit_quant_type='nf4',
133
+ bnb_4bit_use_double_quant=True,
134
+ llm_int8_has_fp16_weight=False,
135
+ llm_int8_threshold=6.0,
136
+ load_in_4bit=True,
137
+ load_in_8bit=False,
138
+ type='transformers.BitsAndBytesConfig'),
139
+ torch_dtype='torch.bfloat16',
140
+ trust_remote_code=True,
141
+ type='transformers.AutoModelForCausalLM.from_pretrained'),
142
+ llm_lora=dict(
143
+ bias='none',
144
+ lora_alpha=256,
145
+ lora_dropout=0.05,
146
+ r=128,
147
+ task_type='CAUSAL_LM',
148
+ type='peft.LoraConfig'),
149
+ max_position_embeddings=None,
150
+ projector_pth=
151
+ '/mnt/bn/yuxuanwang/meilong/code/projects/efficient_foundation_wsi_llava/checkpoints/stage_1/token_merging/stage1_qwen25_token_merging/projector/projector.safetensors',
152
+ token_merge_pth=
153
+ '/mnt/bn/yuxuanwang/meilong/code/projects/efficient_foundation_wsi_llava/checkpoints/stage_1/token_merging/stage1_qwen25_token_merging/token_merger/merger.safetensors',
154
+ train_stage='2',
155
+ type='xtuner.model.llava_no_longnet.LLaVAModel',
156
+ use_perceiver_resampler=False)
157
+ optim_type = 'torch.optim.AdamW'
158
+ optim_wrapper = dict(
159
+ optimizer=dict(
160
+ betas=(
161
+ 0.9,
162
+ 0.999,
163
+ ),
164
+ lr=2e-05,
165
+ type='torch.optim.AdamW',
166
+ weight_decay=0.01),
167
+ paramwise_cfg=dict(
168
+ bias_decay_mult=0.0,
169
+ norm_decay_mult=0.0,
170
+ paramwise_cfg=dict(
171
+ custom_keys=dict({'^projector\.': dict(lr_mult=1.0)}))),
172
+ type='DeepSpeedOptimWrapper')
173
+ param_scheduler = [
174
+ dict(
175
+ begin=0,
176
+ by_epoch=True,
177
+ convert_to_iter_based=True,
178
+ end=0.1,
179
+ start_factor=0.01,
180
+ type='mmengine.optim.LinearLR'),
181
+ dict(
182
+ begin=0.1,
183
+ by_epoch=True,
184
+ convert_to_iter_based=True,
185
+ end=2,
186
+ eta_min=0.0,
187
+ type='mmengine.optim.CosineAnnealingLR'),
188
+ ]
189
+ per_image_length = 10240
190
+ prompt_template = 'xtuner.utils.PROMPT_TEMPLATE.qwen_chat'
191
+ randomness = dict(deterministic=False, seed=None)
192
+ resume = False
193
+ runner_type = 'FlexibleRunner'
194
+ sample_type = 'wsi'
195
+ save_steps = 4096
196
+ save_total_limit = 8
197
+ seed = 42
198
+ strategy = dict(
199
+ config=dict(
200
+ bf16=dict(enabled=True),
201
+ fp16=dict(enabled=False, initial_scale_power=16),
202
+ gradient_accumulation_steps='auto',
203
+ gradient_clipping='auto',
204
+ train_micro_batch_size_per_gpu='auto',
205
+ zero_allow_untested_optimizer=True,
206
+ zero_force_ds_cpu_optimizer=False,
207
+ zero_optimization=dict(overlap_comm=False, stage=2)),
208
+ exclude_frozen_parameters=True,
209
+ gradient_accumulation_steps=64,
210
+ gradient_clipping=1,
211
+ sequence_parallel_size=1,
212
+ train_micro_batch_size_per_gpu=1,
213
+ type='xtuner.engine.DeepSpeedStrategy')
214
+ tokenizer = dict(
215
+ padding_side='right',
216
+ pretrained_model_name_or_path='Qwen/Qwen2.5-7B-Instruct',
217
+ trust_remote_code=True,
218
+ type='transformers.AutoTokenizer.from_pretrained')
219
+ train_cfg = dict(max_epochs=1, type='xtuner.engine.runner.TrainLoop')
220
+ train_dataloader = dict(
221
+ batch_size=1,
222
+ collate_fn=dict(type='xtuner.dataset.collate_fns.default_collate_fn'),
223
+ dataset=dict(
224
+ data_path=
225
+ '/mnt/bn/yuxuanwang/meilong/code/projects/efficient_foundation_wsi_llava/curriculum_training/merged_dataset_curriculum/stage2d_final_mix.json',
226
+ dataset_map_fn='xtuner.dataset.map_fns.llava_map_fn',
227
+ identifier='_224x224_b20_t15',
228
+ image_feature_prefix=
229
+ '/mnt/bn/xudong-va/meilong/datasets/Token_Compression',
230
+ image_feature_suffix='.h5',
231
+ image_folder='',
232
+ image_path_list=None,
233
+ max_length=15836,
234
+ pad_image_to_square=False,
235
+ per_image_length=10240,
236
+ sample_num=10240,
237
+ sample_strategy='linspace',
238
+ template_map_fn=dict(
239
+ template='xtuner.utils.PROMPT_TEMPLATE.qwen_chat',
240
+ type='xtuner.dataset.map_fns.template_map_fn_factory'),
241
+ tokenizer=dict(
242
+ padding_side='right',
243
+ pretrained_model_name_or_path='Qwen/Qwen2.5-7B-Instruct',
244
+ trust_remote_code=True,
245
+ type='transformers.AutoTokenizer.from_pretrained'),
246
+ type='xtuner.dataset.LLaVADataset',
247
+ unwanted_prefix_csv=
248
+ '/mnt/bn/yuxuanwang/meilong/code/projects/efficient_foundation_wsi_llava/merged_dataset/missing_slides3.csv'
249
+ ),
250
+ num_workers=10,
251
+ persistent_workers=True,
252
+ pin_memory=True,
253
+ prefetch_factor=4,
254
+ sampler=dict(shuffle=True, type='mmengine.dataset.DefaultSampler'))
255
+ visualizer = None
256
+ warmup_ratio = 0.05
257
+ weight_decay = 0.01
258
+ work_dir = '/mnt/bn/yuxuanwang/meilong/code/projects/efficient_foundation_wsi_llava/curriculum_training/models/outputs/multi_stage2_run_stage1_token_merging/stage2d'
multi_stage2_run_stage1_token_merging/stage2d/zero_to_fp32.py ADDED
@@ -0,0 +1,760 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+
3
+ # Copyright (c) Microsoft Corporation.
4
+ # SPDX-License-Identifier: Apache-2.0
5
+
6
+ # DeepSpeed Team
7
+
8
+ # This script extracts fp32 consolidated weights from a zero 1, 2 and 3 DeepSpeed checkpoints. It gets
9
+ # copied into the top level checkpoint dir, so the user can easily do the conversion at any point in
10
+ # the future. Once extracted, the weights don't require DeepSpeed and can be used in any
11
+ # application.
12
+ #
13
+ # example:
14
+ # python zero_to_fp32.py . output_dir/
15
+ # or
16
+ # python zero_to_fp32.py . output_dir/ --safe_serialization
17
+
18
+ import argparse
19
+ import torch
20
+ import glob
21
+ import math
22
+ import os
23
+ import re
24
+ import gc
25
+ import json
26
+ import numpy as np
27
+ from tqdm import tqdm
28
+ from collections import OrderedDict
29
+ from dataclasses import dataclass
30
+
31
+ # while this script doesn't use deepspeed to recover data, since the checkpoints are pickled with
32
+ # DeepSpeed data structures it has to be available in the current python environment.
33
+ from deepspeed.utils import logger
34
+ from deepspeed.checkpoint.constants import (DS_VERSION, OPTIMIZER_STATE_DICT, SINGLE_PARTITION_OF_FP32_GROUPS,
35
+ FP32_FLAT_GROUPS, ZERO_STAGE, PARTITION_COUNT, PARAM_SHAPES, BUFFER_NAMES,
36
+ FROZEN_PARAM_SHAPES, FROZEN_PARAM_FRAGMENTS)
37
+
38
+
39
+ @dataclass
40
+ class zero_model_state:
41
+ buffers: dict()
42
+ param_shapes: dict()
43
+ shared_params: list
44
+ ds_version: int
45
+ frozen_param_shapes: dict()
46
+ frozen_param_fragments: dict()
47
+
48
+
49
+ debug = 0
50
+
51
+ # load to cpu
52
+ device = torch.device('cpu')
53
+
54
+
55
+ def atoi(text):
56
+ return int(text) if text.isdigit() else text
57
+
58
+
59
+ def natural_keys(text):
60
+ '''
61
+ alist.sort(key=natural_keys) sorts in human order
62
+ http://nedbatchelder.com/blog/200712/human_sorting.html
63
+ (See Toothy's implementation in the comments)
64
+ '''
65
+ return [atoi(c) for c in re.split(r'(\d+)', text)]
66
+
67
+
68
+ def get_model_state_file(checkpoint_dir, zero_stage):
69
+ if not os.path.isdir(checkpoint_dir):
70
+ raise FileNotFoundError(f"Directory '{checkpoint_dir}' doesn't exist")
71
+
72
+ # there should be only one file
73
+ if zero_stage <= 2:
74
+ file = os.path.join(checkpoint_dir, "mp_rank_00_model_states.pt")
75
+ elif zero_stage == 3:
76
+ file = os.path.join(checkpoint_dir, "zero_pp_rank_0_mp_rank_00_model_states.pt")
77
+
78
+ if not os.path.exists(file):
79
+ raise FileNotFoundError(f"can't find model states file at '{file}'")
80
+
81
+ return file
82
+
83
+
84
+ def get_checkpoint_files(checkpoint_dir, glob_pattern):
85
+ # XXX: need to test that this simple glob rule works for multi-node setup too
86
+ ckpt_files = sorted(glob.glob(os.path.join(checkpoint_dir, glob_pattern)), key=natural_keys)
87
+
88
+ if len(ckpt_files) == 0:
89
+ raise FileNotFoundError(f"can't find {glob_pattern} files in directory '{checkpoint_dir}'")
90
+
91
+ return ckpt_files
92
+
93
+
94
+ def get_optim_files(checkpoint_dir):
95
+ return get_checkpoint_files(checkpoint_dir, "*_optim_states.pt")
96
+
97
+
98
+ def get_model_state_files(checkpoint_dir):
99
+ return get_checkpoint_files(checkpoint_dir, "*_model_states.pt")
100
+
101
+
102
+ def parse_model_states(files):
103
+ zero_model_states = []
104
+ for file in files:
105
+ state_dict = torch.load(file, map_location=device, weights_only=False)
106
+
107
+ if BUFFER_NAMES not in state_dict:
108
+ raise ValueError(f"{file} is not a model state checkpoint")
109
+ buffer_names = state_dict[BUFFER_NAMES]
110
+ if debug:
111
+ print("Found buffers:", buffer_names)
112
+
113
+ # recover just the buffers while restoring them to fp32 if they were saved in fp16
114
+ buffers = {k: v.float() for k, v in state_dict["module"].items() if k in buffer_names}
115
+ param_shapes = state_dict[PARAM_SHAPES]
116
+
117
+ # collect parameters that are included in param_shapes
118
+ param_names = []
119
+ for s in param_shapes:
120
+ for name in s.keys():
121
+ param_names.append(name)
122
+
123
+ # update with frozen parameters
124
+ frozen_param_shapes = state_dict.get(FROZEN_PARAM_SHAPES, None)
125
+ if frozen_param_shapes is not None:
126
+ if debug:
127
+ print(f"Found frozen_param_shapes: {frozen_param_shapes}")
128
+ param_names += list(frozen_param_shapes.keys())
129
+
130
+ # handle shared params
131
+ shared_params = [[k, v] for k, v in state_dict["shared_params"].items()]
132
+
133
+ ds_version = state_dict.get(DS_VERSION, None)
134
+
135
+ frozen_param_fragments = state_dict.get(FROZEN_PARAM_FRAGMENTS, None)
136
+
137
+ z_model_state = zero_model_state(buffers=buffers,
138
+ param_shapes=param_shapes,
139
+ shared_params=shared_params,
140
+ ds_version=ds_version,
141
+ frozen_param_shapes=frozen_param_shapes,
142
+ frozen_param_fragments=frozen_param_fragments)
143
+ zero_model_states.append(z_model_state)
144
+
145
+ return zero_model_states
146
+
147
+
148
+ def parse_optim_states(files, ds_checkpoint_dir):
149
+ total_files = len(files)
150
+ state_dicts = []
151
+ for f in tqdm(files, desc='Loading checkpoint shards'):
152
+ state_dict = torch.load(f, map_location=device, mmap=True, weights_only=False)
153
+ # immediately discard the potentially huge 2 optimizer states as we only care for fp32 master weights
154
+ # and also handle the case where it was already removed by another helper script
155
+ state_dict["optimizer_state_dict"].pop("optimizer_state_dict", None)
156
+ state_dicts.append(state_dict)
157
+
158
+ if ZERO_STAGE not in state_dicts[0][OPTIMIZER_STATE_DICT]:
159
+ raise ValueError(f"{files[0]} is not a zero checkpoint")
160
+ zero_stage = state_dicts[0][OPTIMIZER_STATE_DICT][ZERO_STAGE]
161
+ world_size = state_dicts[0][OPTIMIZER_STATE_DICT][PARTITION_COUNT]
162
+
163
+ # For ZeRO-2 each param group can have different partition_count as data parallelism for expert
164
+ # parameters can be different from data parallelism for non-expert parameters. So we can just
165
+ # use the max of the partition_count to get the dp world_size.
166
+
167
+ if type(world_size) is list:
168
+ world_size = max(world_size)
169
+
170
+ if world_size != total_files:
171
+ raise ValueError(
172
+ f"Expected {world_size} of '*_optim_states.pt' under '{ds_checkpoint_dir}' but found {total_files} files. "
173
+ "Possibly due to an overwrite of an old checkpoint, or a checkpoint didn't get saved by one or more processes."
174
+ )
175
+
176
+ # the groups are named differently in each stage
177
+ if zero_stage <= 2:
178
+ fp32_groups_key = SINGLE_PARTITION_OF_FP32_GROUPS
179
+ elif zero_stage == 3:
180
+ fp32_groups_key = FP32_FLAT_GROUPS
181
+ else:
182
+ raise ValueError(f"unknown zero stage {zero_stage}")
183
+
184
+ fp32_flat_groups = [state_dicts[i][OPTIMIZER_STATE_DICT][fp32_groups_key] for i in range(len(state_dicts))]
185
+ return zero_stage, world_size, fp32_flat_groups
186
+
187
+
188
+ def _get_fp32_state_dict_from_zero_checkpoint(ds_checkpoint_dir, exclude_frozen_parameters):
189
+ """
190
+ Returns fp32 state_dict reconstructed from ds checkpoint
191
+
192
+ Args:
193
+ - ``ds_checkpoint_dir``: path to the deepspeed checkpoint folder (where the optimizer files are)
194
+
195
+ """
196
+ print(f"Processing zero checkpoint '{ds_checkpoint_dir}'")
197
+
198
+ optim_files = get_optim_files(ds_checkpoint_dir)
199
+ zero_stage, world_size, fp32_flat_groups = parse_optim_states(optim_files, ds_checkpoint_dir)
200
+ print(f"Detected checkpoint of type zero stage {zero_stage}, world_size: {world_size}")
201
+
202
+ model_files = get_model_state_files(ds_checkpoint_dir)
203
+
204
+ zero_model_states = parse_model_states(model_files)
205
+ print(f'Parsing checkpoint created by deepspeed=={zero_model_states[0].ds_version}')
206
+
207
+ if zero_stage <= 2:
208
+ return _get_fp32_state_dict_from_zero2_checkpoint(world_size, fp32_flat_groups, zero_model_states,
209
+ exclude_frozen_parameters)
210
+ elif zero_stage == 3:
211
+ return _get_fp32_state_dict_from_zero3_checkpoint(world_size, fp32_flat_groups, zero_model_states,
212
+ exclude_frozen_parameters)
213
+
214
+
215
+ def _zero2_merge_frozen_params(state_dict, zero_model_states):
216
+ if zero_model_states[0].frozen_param_shapes is None or len(zero_model_states[0].frozen_param_shapes) == 0:
217
+ return
218
+
219
+ frozen_param_shapes = zero_model_states[0].frozen_param_shapes
220
+ frozen_param_fragments = zero_model_states[0].frozen_param_fragments
221
+
222
+ if debug:
223
+ num_elem = sum(s.numel() for s in frozen_param_shapes.values())
224
+ print(f'rank 0: {FROZEN_PARAM_SHAPES}.numel = {num_elem}')
225
+
226
+ wanted_params = len(frozen_param_shapes)
227
+ wanted_numel = sum(s.numel() for s in frozen_param_shapes.values())
228
+ avail_numel = sum([p.numel() for p in frozen_param_fragments.values()])
229
+ print(f'Frozen params: Have {avail_numel} numels to process.')
230
+ print(f'Frozen params: Need {wanted_numel} numels in {wanted_params} params')
231
+
232
+ total_params = 0
233
+ total_numel = 0
234
+ for name, shape in frozen_param_shapes.items():
235
+ total_params += 1
236
+ unpartitioned_numel = shape.numel()
237
+ total_numel += unpartitioned_numel
238
+
239
+ state_dict[name] = frozen_param_fragments[name]
240
+
241
+ if debug:
242
+ print(f"{name} full shape: {shape} unpartitioned numel {unpartitioned_numel} ")
243
+
244
+ print(f"Reconstructed Frozen fp32 state dict with {total_params} params {total_numel} elements")
245
+
246
+
247
+ def _has_callable(obj, fn):
248
+ attr = getattr(obj, fn, None)
249
+ return callable(attr)
250
+
251
+
252
+ def _zero2_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states):
253
+ param_shapes = zero_model_states[0].param_shapes
254
+
255
+ # Reconstruction protocol:
256
+ #
257
+ # XXX: document this
258
+
259
+ if debug:
260
+ for i in range(world_size):
261
+ for j in range(len(fp32_flat_groups[0])):
262
+ print(f"{FP32_FLAT_GROUPS}[{i}][{j}].shape={fp32_flat_groups[i][j].shape}")
263
+
264
+ # XXX: memory usage doubles here (zero2)
265
+ num_param_groups = len(fp32_flat_groups[0])
266
+ merged_single_partition_of_fp32_groups = []
267
+ for i in range(num_param_groups):
268
+ merged_partitions = [sd[i] for sd in fp32_flat_groups]
269
+ full_single_fp32_vector = torch.cat(merged_partitions, 0)
270
+ merged_single_partition_of_fp32_groups.append(full_single_fp32_vector)
271
+ avail_numel = sum(
272
+ [full_single_fp32_vector.numel() for full_single_fp32_vector in merged_single_partition_of_fp32_groups])
273
+
274
+ if debug:
275
+ wanted_params = sum([len(shapes) for shapes in param_shapes])
276
+ wanted_numel = sum([sum(shape.numel() for shape in shapes.values()) for shapes in param_shapes])
277
+ # not asserting if there is a mismatch due to possible padding
278
+ print(f"Have {avail_numel} numels to process.")
279
+ print(f"Need {wanted_numel} numels in {wanted_params} params.")
280
+
281
+ # params
282
+ # XXX: for huge models that can't fit into the host's RAM we will have to recode this to support
283
+ # out-of-core computing solution
284
+ total_numel = 0
285
+ total_params = 0
286
+ for shapes, full_single_fp32_vector in zip(param_shapes, merged_single_partition_of_fp32_groups):
287
+ offset = 0
288
+ avail_numel = full_single_fp32_vector.numel()
289
+ for name, shape in shapes.items():
290
+
291
+ unpartitioned_numel = shape.numel() if _has_callable(shape, 'numel') else math.prod(shape)
292
+ total_numel += unpartitioned_numel
293
+ total_params += 1
294
+
295
+ if debug:
296
+ print(f"{name} full shape: {shape} unpartitioned numel {unpartitioned_numel} ")
297
+ state_dict[name] = full_single_fp32_vector.narrow(0, offset, unpartitioned_numel).view(shape)
298
+ offset += unpartitioned_numel
299
+
300
+ # Z2 started to align to 2*world_size to improve nccl performance. Therefore both offset and
301
+ # avail_numel can differ by anywhere between 0..2*world_size. Due to two unrelated complex
302
+ # paddings performed in the code it's almost impossible to predict the exact numbers w/o the
303
+ # live optimizer object, so we are checking that the numbers are within the right range
304
+ align_to = 2 * world_size
305
+
306
+ def zero2_align(x):
307
+ return align_to * math.ceil(x / align_to)
308
+
309
+ if debug:
310
+ print(f"original offset={offset}, avail_numel={avail_numel}")
311
+
312
+ offset = zero2_align(offset)
313
+ avail_numel = zero2_align(avail_numel)
314
+
315
+ if debug:
316
+ print(f"aligned offset={offset}, avail_numel={avail_numel}")
317
+
318
+ # Sanity check
319
+ if offset != avail_numel:
320
+ raise ValueError(f"consumed {offset} numels out of {avail_numel} - something is wrong")
321
+
322
+ print(f"Reconstructed fp32 state dict with {total_params} params {total_numel} elements")
323
+
324
+
325
+ def _get_fp32_state_dict_from_zero2_checkpoint(world_size, fp32_flat_groups, zero_model_states,
326
+ exclude_frozen_parameters):
327
+ state_dict = OrderedDict()
328
+
329
+ # buffers
330
+ buffers = zero_model_states[0].buffers
331
+ state_dict.update(buffers)
332
+ if debug:
333
+ print(f"added {len(buffers)} buffers")
334
+
335
+ if not exclude_frozen_parameters:
336
+ _zero2_merge_frozen_params(state_dict, zero_model_states)
337
+
338
+ _zero2_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states)
339
+
340
+ # recover shared parameters
341
+ for pair in zero_model_states[0].shared_params:
342
+ if pair[1] in state_dict:
343
+ state_dict[pair[0]] = state_dict[pair[1]]
344
+
345
+ return state_dict
346
+
347
+
348
+ def zero3_partitioned_param_info(unpartitioned_numel, world_size):
349
+ remainder = unpartitioned_numel % world_size
350
+ padding_numel = (world_size - remainder) if remainder else 0
351
+ partitioned_numel = math.ceil(unpartitioned_numel / world_size)
352
+ return partitioned_numel, padding_numel
353
+
354
+
355
+ def _zero3_merge_frozen_params(state_dict, world_size, zero_model_states):
356
+ if zero_model_states[0].frozen_param_shapes is None or len(zero_model_states[0].frozen_param_shapes) == 0:
357
+ return
358
+
359
+ if debug:
360
+ for i in range(world_size):
361
+ num_elem = sum(s.numel() for s in zero_model_states[i].frozen_param_fragments.values())
362
+ print(f'rank {i}: {FROZEN_PARAM_SHAPES}.numel = {num_elem}')
363
+
364
+ frozen_param_shapes = zero_model_states[0].frozen_param_shapes
365
+ wanted_params = len(frozen_param_shapes)
366
+ wanted_numel = sum(s.numel() for s in frozen_param_shapes.values())
367
+ avail_numel = sum([p.numel() for p in zero_model_states[0].frozen_param_fragments.values()]) * world_size
368
+ print(f'Frozen params: Have {avail_numel} numels to process.')
369
+ print(f'Frozen params: Need {wanted_numel} numels in {wanted_params} params')
370
+
371
+ total_params = 0
372
+ total_numel = 0
373
+ for name, shape in zero_model_states[0].frozen_param_shapes.items():
374
+ total_params += 1
375
+ unpartitioned_numel = shape.numel()
376
+ total_numel += unpartitioned_numel
377
+
378
+ param_frags = tuple(model_state.frozen_param_fragments[name] for model_state in zero_model_states)
379
+ state_dict[name] = torch.cat(param_frags, 0).narrow(0, 0, unpartitioned_numel).view(shape)
380
+
381
+ partitioned_numel, partitioned_padding_numel = zero3_partitioned_param_info(unpartitioned_numel, world_size)
382
+
383
+ if debug:
384
+ print(
385
+ f"Frozen params: {total_params} {name} full shape: {shape} partition0 numel={partitioned_numel} partitioned_padding_numel={partitioned_padding_numel}"
386
+ )
387
+
388
+ print(f"Reconstructed Frozen fp32 state dict with {total_params} params {total_numel} elements")
389
+
390
+
391
+ class GatheredTensor:
392
+ """
393
+ A pseudo tensor that collects partitioned weights.
394
+ It is more memory efficient when there are multiple groups.
395
+ """
396
+
397
+ def __init__(self, flat_groups, flat_groups_offset, offset, partitioned_numel, shape):
398
+ self.flat_groups = flat_groups
399
+ self.flat_groups_offset = flat_groups_offset
400
+ self.offset = offset
401
+ self.partitioned_numel = partitioned_numel
402
+ self.shape = shape
403
+ self.dtype = self.flat_groups[0][0].dtype
404
+
405
+ def contiguous(self):
406
+ """
407
+ Merge partitioned weights from flat_groups into a single tensor.
408
+ """
409
+ end_idx = self.offset + self.partitioned_numel
410
+ world_size = len(self.flat_groups)
411
+ pad_flat_param_chunks = []
412
+
413
+ for rank_i in range(world_size):
414
+ # for each rank, we need to collect weights from related group/groups
415
+ flat_groups_at_rank_i = self.flat_groups[rank_i]
416
+ start_group_id = None
417
+ end_group_id = None
418
+ for group_id in range(len(self.flat_groups_offset)):
419
+ if self.flat_groups_offset[group_id] <= self.offset < self.flat_groups_offset[group_id + 1]:
420
+ start_group_id = group_id
421
+ if self.flat_groups_offset[group_id] < end_idx <= self.flat_groups_offset[group_id + 1]:
422
+ end_group_id = group_id
423
+ break
424
+ # collect weights from related group/groups
425
+ for group_id in range(start_group_id, end_group_id + 1):
426
+ flat_tensor = flat_groups_at_rank_i[group_id]
427
+ start_offset = self.offset - self.flat_groups_offset[group_id]
428
+ end_offset = min(end_idx, self.flat_groups_offset[group_id + 1]) - self.flat_groups_offset[group_id]
429
+ pad_flat_param_chunks.append(flat_tensor[start_offset:end_offset])
430
+
431
+ # collect weights from all ranks
432
+ pad_flat_param = torch.cat(pad_flat_param_chunks, dim=0)
433
+ param = pad_flat_param[:self.shape.numel()].view(self.shape).contiguous()
434
+ return param
435
+
436
+
437
+ def _zero3_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states):
438
+ param_shapes = zero_model_states[0].param_shapes
439
+ avail_numel = sum([flat_group.numel() for flat_group in fp32_flat_groups[0]]) * world_size
440
+
441
+ # Reconstruction protocol: For zero3 we need to zip the partitions together at boundary of each
442
+ # param, re-consolidating each param, while dealing with padding if any
443
+
444
+ # merge list of dicts, preserving order
445
+ param_shapes = {k: v for d in param_shapes for k, v in d.items()}
446
+
447
+ if debug:
448
+ for i in range(world_size):
449
+ print(f"{FP32_FLAT_GROUPS}[{i}].shape={fp32_flat_groups[i].shape}")
450
+
451
+ wanted_params = len(param_shapes)
452
+ wanted_numel = sum(shape.numel() for shape in param_shapes.values())
453
+ # not asserting if there is a mismatch due to possible padding
454
+ avail_numel = fp32_flat_groups[0].numel() * world_size
455
+ print(f"Trainable params: Have {avail_numel} numels to process.")
456
+ print(f"Trainable params: Need {wanted_numel} numels in {wanted_params} params.")
457
+
458
+ # params
459
+ # XXX: for huge models that can't fit into the host's RAM we will have to recode this to support
460
+ # out-of-core computing solution
461
+ offset = 0
462
+ total_numel = 0
463
+ total_params = 0
464
+ flat_groups_offset = [0] + list(np.cumsum([flat_tensor.numel() for flat_tensor in fp32_flat_groups[0]]))
465
+ for name, shape in tqdm(param_shapes.items(), desc='Gathering sharded weights'):
466
+ unpartitioned_numel = shape.numel()
467
+ total_numel += unpartitioned_numel
468
+ total_params += 1
469
+ partitioned_numel, partitioned_padding_numel = zero3_partitioned_param_info(unpartitioned_numel, world_size)
470
+
471
+ if debug:
472
+ print(
473
+ f"Trainable params: {total_params} {name} full shape: {shape} partition0 numel={partitioned_numel} partitioned_padding_numel={partitioned_padding_numel}"
474
+ )
475
+
476
+ # memory efficient tensor
477
+ tensor = GatheredTensor(fp32_flat_groups, flat_groups_offset, offset, partitioned_numel, shape)
478
+ state_dict[name] = tensor
479
+ offset += partitioned_numel
480
+
481
+ offset *= world_size
482
+
483
+ # Sanity check
484
+ if offset != avail_numel:
485
+ raise ValueError(f"consumed {offset} numels out of {avail_numel} - something is wrong")
486
+
487
+ print(f"Reconstructed Trainable fp32 state dict with {total_params} params {total_numel} elements")
488
+
489
+
490
+ def _get_fp32_state_dict_from_zero3_checkpoint(world_size, fp32_flat_groups, zero_model_states,
491
+ exclude_frozen_parameters):
492
+ state_dict = OrderedDict()
493
+
494
+ # buffers
495
+ buffers = zero_model_states[0].buffers
496
+ state_dict.update(buffers)
497
+ if debug:
498
+ print(f"added {len(buffers)} buffers")
499
+
500
+ if not exclude_frozen_parameters:
501
+ _zero3_merge_frozen_params(state_dict, world_size, zero_model_states)
502
+
503
+ _zero3_merge_trainable_params(state_dict, world_size, fp32_flat_groups, zero_model_states)
504
+
505
+ # recover shared parameters
506
+ for pair in zero_model_states[0].shared_params:
507
+ if pair[1] in state_dict:
508
+ state_dict[pair[0]] = state_dict[pair[1]]
509
+
510
+ return state_dict
511
+
512
+
513
+ def to_torch_tensor(state_dict, return_empty_tensor=False):
514
+ """
515
+ Convert state_dict of GatheredTensor to torch tensor
516
+ """
517
+ torch_state_dict = {}
518
+ converted_tensors = {}
519
+ for name, tensor in state_dict.items():
520
+ tensor_id = id(tensor)
521
+ if tensor_id in converted_tensors: # shared tensors
522
+ shared_tensor = torch_state_dict[converted_tensors[tensor_id]]
523
+ torch_state_dict[name] = shared_tensor
524
+ else:
525
+ converted_tensors[tensor_id] = name
526
+ if return_empty_tensor:
527
+ torch_state_dict[name] = torch.empty(tensor.shape, dtype=tensor.dtype)
528
+ else:
529
+ torch_state_dict[name] = tensor.contiguous()
530
+ return torch_state_dict
531
+
532
+
533
+ def get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir,
534
+ tag=None,
535
+ exclude_frozen_parameters=False,
536
+ lazy_mode=False):
537
+ """
538
+ Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated state_dict that can be loaded with
539
+ ``load_state_dict()`` and used for training without DeepSpeed or shared with others, for example
540
+ via a model hub.
541
+
542
+ Args:
543
+ - ``checkpoint_dir``: path to the desired checkpoint folder
544
+ - ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in 'latest' file. e.g., ``global_step14``
545
+ - ``exclude_frozen_parameters``: exclude frozen parameters
546
+ - ``lazy_mode``: get state_dict in lazy mode. It returns a dict of pesduo tensor instead of torch tensor, which is more memory efficient.
547
+ Convert the pesduo tensor to torch tensor by ``.contiguous()``
548
+
549
+ Returns:
550
+ - pytorch ``state_dict``
551
+
552
+ A typical usage might be ::
553
+
554
+ from deepspeed.utils.zero_to_fp32 import get_fp32_state_dict_from_zero_checkpoint
555
+ # do the training and checkpoint saving
556
+ state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir) # already on cpu
557
+ model = model.cpu() # move to cpu
558
+ model.load_state_dict(state_dict)
559
+ # submit to model hub or save the model to share with others
560
+
561
+ In this example the ``model`` will no longer be usable in the deepspeed context of the same
562
+ application. i.e. you will need to re-initialize the deepspeed engine, since
563
+ ``model.load_state_dict(state_dict)`` will remove all the deepspeed magic from it.
564
+
565
+ If you want it all done for you, use ``load_state_dict_from_zero_checkpoint`` instead.
566
+
567
+ Note: the above usage may not work if your application doesn't have sufficient free CPU memory.
568
+ You may need to use the offline approach using the ``zero_to_fp32.py`` script that is saved with
569
+ the checkpoint. Or you can load state_dict in lazy mode ::
570
+
571
+ from deepspeed.utils.zero_to_fp32 import get_fp32_state_dict_from_zero_checkpoint
572
+ state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, lazy_mode=True) # not on cpu
573
+ for name, lazy_tensor in state_dict.item():
574
+ tensor = lazy_tensor.contiguous() # to cpu
575
+ print(name, tensor)
576
+ # del tensor to release memory if it no longer in use
577
+ """
578
+ if tag is None:
579
+ latest_path = os.path.join(checkpoint_dir, 'latest')
580
+ if os.path.isfile(latest_path):
581
+ with open(latest_path, 'r') as fd:
582
+ tag = fd.read().strip()
583
+ else:
584
+ raise ValueError(f"Unable to find 'latest' file at {latest_path}")
585
+
586
+ ds_checkpoint_dir = os.path.join(checkpoint_dir, tag)
587
+
588
+ if not os.path.isdir(ds_checkpoint_dir):
589
+ raise FileNotFoundError(f"Directory '{ds_checkpoint_dir}' doesn't exist")
590
+
591
+ state_dict = _get_fp32_state_dict_from_zero_checkpoint(ds_checkpoint_dir, exclude_frozen_parameters)
592
+ if lazy_mode:
593
+ return state_dict
594
+ else:
595
+ return to_torch_tensor(state_dict)
596
+
597
+
598
+ def convert_zero_checkpoint_to_fp32_state_dict(checkpoint_dir,
599
+ output_dir,
600
+ max_shard_size="5GB",
601
+ safe_serialization=False,
602
+ tag=None,
603
+ exclude_frozen_parameters=False):
604
+ """
605
+ Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated ``state_dict`` file that can be
606
+ loaded with ``torch.load(file)`` + ``load_state_dict()`` and used for training without DeepSpeed.
607
+
608
+ Args:
609
+ - ``checkpoint_dir``: path to the desired checkpoint folder. (one that contains the tag-folder, like ``global_step14``)
610
+ - ``output_dir``: directory to the pytorch fp32 state_dict output files
611
+ - ``max_shard_size``: the maximum size for a checkpoint before being sharded, default value is 5GB
612
+ - ``safe_serialization``: whether to save the model using `safetensors` or the traditional PyTorch way (that uses `pickle`).
613
+ - ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in the file named ``latest`` in the checkpoint folder, e.g., ``global_step14``
614
+ - ``exclude_frozen_parameters``: exclude frozen parameters
615
+ """
616
+
617
+ # Dependency pre-check
618
+ if safe_serialization:
619
+ try:
620
+ from safetensors.torch import save_file
621
+ except ImportError:
622
+ print('If you want to use `safe_serialization`, please `pip install safetensors`')
623
+ raise
624
+ if max_shard_size is not None:
625
+ try:
626
+ from huggingface_hub import split_torch_state_dict_into_shards
627
+ except ImportError:
628
+ print('If you want to use `max_shard_size`, please `pip install huggingface_hub`')
629
+ raise
630
+
631
+ # Convert zero checkpoint to state_dict
632
+ state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir,
633
+ tag,
634
+ exclude_frozen_parameters,
635
+ lazy_mode=True)
636
+
637
+ # Shard the model if it is too big.
638
+ weights_name = "model.safetensors" if safe_serialization else "pytorch_model.bin"
639
+ if max_shard_size is not None:
640
+ filename_pattern = weights_name.replace(".bin", "{suffix}.bin").replace(".safetensors", "{suffix}.safetensors")
641
+ # an memory-efficient approach for sharding
642
+ empty_state_dict = to_torch_tensor(state_dict, return_empty_tensor=True)
643
+ state_dict_split = split_torch_state_dict_into_shards(empty_state_dict,
644
+ filename_pattern=filename_pattern,
645
+ max_shard_size=max_shard_size)
646
+ else:
647
+ from collections import namedtuple
648
+ StateDictSplit = namedtuple("StateDictSplit", ["is_sharded", "filename_to_tensors"])
649
+ state_dict_split = StateDictSplit(is_sharded=False,
650
+ filename_to_tensors={weights_name: list(state_dict.keys())})
651
+
652
+ # Save the model by shard
653
+ os.makedirs(output_dir, exist_ok=True)
654
+ filename_to_tensors = state_dict_split.filename_to_tensors.items()
655
+ for shard_file, tensors in tqdm(filename_to_tensors, desc="Saving checkpoint shards"):
656
+ shard_state_dict = {tensor_name: state_dict[tensor_name] for tensor_name in tensors}
657
+ shard_state_dict = to_torch_tensor(shard_state_dict)
658
+ output_path = os.path.join(output_dir, shard_file)
659
+ if safe_serialization:
660
+ save_file(shard_state_dict, output_path, metadata={"format": "pt"})
661
+ else:
662
+ torch.save(shard_state_dict, output_path)
663
+ # release the memory of current shard
664
+ for tensor_name in list(shard_state_dict.keys()):
665
+ del state_dict[tensor_name]
666
+ del shard_state_dict[tensor_name]
667
+ del shard_state_dict
668
+ gc.collect()
669
+
670
+ # Save index if sharded
671
+ if state_dict_split.is_sharded:
672
+ index = {
673
+ "metadata": state_dict_split.metadata,
674
+ "weight_map": state_dict_split.tensor_to_filename,
675
+ }
676
+ save_index_file = "model.safetensors.index.json" if safe_serialization else "pytorch_model.bin.index.json"
677
+ save_index_file = os.path.join(output_dir, save_index_file)
678
+ with open(save_index_file, "w", encoding="utf-8") as f:
679
+ content = json.dumps(index, indent=2, sort_keys=True) + "\n"
680
+ f.write(content)
681
+
682
+
683
+ def load_state_dict_from_zero_checkpoint(model, checkpoint_dir, tag=None):
684
+ """
685
+ 1. Put the provided model to cpu
686
+ 2. Convert ZeRO 2 or 3 checkpoint into a single fp32 consolidated ``state_dict``
687
+ 3. Load it into the provided model
688
+
689
+ Args:
690
+ - ``model``: the model object to update
691
+ - ``checkpoint_dir``: path to the desired checkpoint folder. (one that contains the tag-folder, like ``global_step14``)
692
+ - ``tag``: checkpoint tag used as a unique identifier for checkpoint. If not provided will attempt to load tag in the file named ``latest`` in the checkpoint folder, e.g., ``global_step14``
693
+
694
+ Returns:
695
+ - ``model`: modified model
696
+
697
+ Make sure you have plenty of CPU memory available before you call this function. If you don't
698
+ have enough use the ``zero_to_fp32.py`` utility to do the conversion. You will find it
699
+ conveniently placed for you in the checkpoint folder.
700
+
701
+ A typical usage might be ::
702
+
703
+ from deepspeed.utils.zero_to_fp32 import load_state_dict_from_zero_checkpoint
704
+ model = load_state_dict_from_zero_checkpoint(trainer.model, checkpoint_dir)
705
+ # submit to model hub or save the model to share with others
706
+
707
+ Note, that once this was run, the ``model`` will no longer be usable in the deepspeed context
708
+ of the same application. i.e. you will need to re-initialize the deepspeed engine, since
709
+ ``model.load_state_dict(state_dict)`` will remove all the deepspeed magic from it.
710
+
711
+ """
712
+ logger.info("Extracting fp32 weights")
713
+ state_dict = get_fp32_state_dict_from_zero_checkpoint(checkpoint_dir, tag)
714
+
715
+ logger.info("Overwriting model with fp32 weights")
716
+ model = model.cpu()
717
+ model.load_state_dict(state_dict, strict=False)
718
+
719
+ return model
720
+
721
+
722
+ if __name__ == "__main__":
723
+ parser = argparse.ArgumentParser()
724
+ parser.add_argument("checkpoint_dir",
725
+ type=str,
726
+ help="path to the desired checkpoint folder, e.g., path/checkpoint-12")
727
+ parser.add_argument("output_dir",
728
+ type=str,
729
+ help="directory to the pytorch fp32 state_dict output files"
730
+ "(e.g. path/checkpoint-12-output/)")
731
+ parser.add_argument(
732
+ "--max_shard_size",
733
+ type=str,
734
+ default="5GB",
735
+ help="The maximum size for a checkpoint before being sharded. Checkpoints shard will then be each of size"
736
+ "lower than this size. If expressed as a string, needs to be digits followed by a unit (like `5MB`"
737
+ "We default it to 5GB in order for models to be able to run easily on free-tier google colab instances"
738
+ "without CPU OOM issues.")
739
+ parser.add_argument(
740
+ "--safe_serialization",
741
+ default=False,
742
+ action='store_true',
743
+ help="Whether to save the model using `safetensors` or the traditional PyTorch way (that uses `pickle`).")
744
+ parser.add_argument("-t",
745
+ "--tag",
746
+ type=str,
747
+ default=None,
748
+ help="checkpoint tag used as a unique identifier for checkpoint. e.g., global_step1")
749
+ parser.add_argument("--exclude_frozen_parameters", action='store_true', help="exclude frozen parameters")
750
+ parser.add_argument("-d", "--debug", action='store_true', help="enable debug")
751
+ args = parser.parse_args()
752
+
753
+ debug = args.debug
754
+
755
+ convert_zero_checkpoint_to_fp32_state_dict(args.checkpoint_dir,
756
+ args.output_dir,
757
+ max_shard_size=args.max_shard_size,
758
+ safe_serialization=args.safe_serialization,
759
+ tag=args.tag,
760
+ exclude_frozen_parameters=args.exclude_frozen_parameters)