asdjghh commited on
Commit
3cb97e7
·
verified ·
1 Parent(s): dd6daa0

Upload share_gpt4o.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. share_gpt4o.py +166 -0
share_gpt4o.py ADDED
@@ -0,0 +1,166 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import PIL.Image
3
+ import torch
4
+ import numpy as np
5
+ from transformers import AutoModelForCausalLM
6
+ from janus.models import MultiModalityCausalLM, VLChatProcessor
7
+ from dataclasses import dataclass
8
+
9
+
10
+ @dataclass
11
+ class VLChatProcessorOutput():
12
+ sft_format: str
13
+ input_ids: torch.Tensor
14
+ pixel_values: torch.Tensor
15
+ num_image_tokens: torch.IntTensor
16
+
17
+ def __len__(self):
18
+ return len(self.input_ids)
19
+
20
+
21
+ def process_image(image_paths, vl_chat_processor):
22
+ images = [PIL.Image.open(image_path).convert("RGB") for image_path in image_paths]
23
+ images_outputs = vl_chat_processor.image_processor(images, return_tensors="pt")
24
+ return images_outputs['pixel_values']
25
+
26
+
27
+ # Load model and processor
28
+ model_path = "/data5/czh/bxh/test_1/slice_end"
29
+ vl_chat_processor: VLChatProcessor = VLChatProcessor.from_pretrained(model_path)
30
+ tokenizer = vl_chat_processor.tokenizer
31
+ vl_gpt: MultiModalityCausalLM = AutoModelForCausalLM.from_pretrained(
32
+ model_path, trust_remote_code=True, torch_dtype=torch.bfloat16
33
+ )
34
+ vl_gpt = vl_gpt.cuda().eval()
35
+
36
+
37
+ # Define text+image-to-image generation function
38
+ def text_and_image_to_image_generate(input_prompt, input_image_path, output_path, vl_chat_processor, vl_gpt,
39
+ temperature=1.0, parallel_size=2, cfg_weight=5, cfg_weight2=5):
40
+ torch.cuda.empty_cache()
41
+
42
+ input_img_tokens = vl_chat_processor.image_start_tag + vl_chat_processor.image_tag * vl_chat_processor.num_image_tokens + vl_chat_processor.image_end_tag + vl_chat_processor.image_start_tag + vl_chat_processor.pad_tag * vl_chat_processor.num_image_tokens + vl_chat_processor.image_end_tag
43
+ output_img_tokens = vl_chat_processor.image_start_tag
44
+
45
+ pre_data = []
46
+ input_images = [input_image_path]
47
+ img_len = len(input_images)
48
+ prompts = input_img_tokens * img_len + input_prompt
49
+ conversation = [
50
+ {"role": "<|User|>", "content": prompts},
51
+ {"role": "<|Assistant|>", "content": ""}
52
+ ]
53
+ sft_format = vl_chat_processor.apply_sft_template_for_multi_turn_prompts(
54
+ conversations=conversation,
55
+ sft_format=vl_chat_processor.sft_format,
56
+ system_prompt="",
57
+ )
58
+
59
+ sft_format = sft_format + output_img_tokens
60
+ print('sft_format: ', len(sft_format))
61
+
62
+ mmgpt = vl_gpt
63
+
64
+ image_token_num_per_image = 576
65
+ img_size = 384
66
+ patch_size = 16
67
+
68
+ with torch.inference_mode():
69
+ input_image_pixel_values = process_image(input_images, vl_chat_processor).to(torch.bfloat16).cuda()
70
+ quant_input, emb_loss_input, info_input = mmgpt.gen_vision_model.encode(input_image_pixel_values)
71
+ image_tokens_input = info_input[2].detach().reshape(input_image_pixel_values.shape[0], -1)
72
+ image_embeds_input = mmgpt.prepare_gen_img_embeds(image_tokens_input)
73
+
74
+ input_ids = torch.LongTensor(vl_chat_processor.tokenizer.encode(sft_format))
75
+ print('input_ids.shape: ', input_ids.shape)
76
+ encoder_pixel_values = process_image(input_images, vl_chat_processor).cuda()
77
+ print('encoder: ', encoder_pixel_values[0][0][0][:2])
78
+ tokens = torch.zeros((parallel_size * 3, len(input_ids)), dtype=torch.long)
79
+ for i in range(parallel_size * 3):
80
+ tokens[i, :] = input_ids
81
+ if i % 3 == 2:
82
+ tokens[i, 1:-1] = vl_chat_processor.pad_id
83
+ print(vl_chat_processor.pad_id)
84
+ pre_data.append(VLChatProcessorOutput(sft_format=sft_format, pixel_values=encoder_pixel_values,
85
+ input_ids=tokens[i - 2],
86
+ num_image_tokens=[vl_chat_processor.num_image_tokens] * img_len))
87
+ pre_data.append(VLChatProcessorOutput(sft_format=sft_format, pixel_values=encoder_pixel_values,
88
+ input_ids=tokens[i - 1],
89
+ num_image_tokens=[vl_chat_processor.num_image_tokens] * img_len))
90
+ pre_data.append(VLChatProcessorOutput(sft_format=sft_format, pixel_values=None, input_ids=tokens[i],
91
+ num_image_tokens=[]))
92
+
93
+ prepare_inputs = vl_chat_processor.batchify(pre_data)
94
+
95
+ inputs_embeds = mmgpt.prepare_inputs_embeds(
96
+ input_ids=tokens.cuda(),
97
+ pixel_values=prepare_inputs['pixel_values'].to(torch.bfloat16).cuda(),
98
+ images_emb_mask=prepare_inputs['images_emb_mask'].cuda(),
99
+ images_seq_mask=prepare_inputs['images_seq_mask'].cuda()
100
+ )
101
+
102
+
103
+ image_gen_indices = (tokens == vl_chat_processor.image_end_id).nonzero()
104
+ print(inputs_embeds.shape)
105
+ print(inputs_embeds[0][0][:2])
106
+ print(image_embeds_input[0][0][:2])
107
+ for ii, ind in enumerate(image_gen_indices):
108
+ print('nmsl: ',ii, ind)
109
+ if ii % 4 == 0:
110
+ offset = ind[1] + 2
111
+ inputs_embeds[ind[0], offset: offset + image_embeds_input.shape[1], :] = image_embeds_input[
112
+ (ii // 2) % img_len]
113
+
114
+ generated_tokens = torch.zeros((parallel_size, image_token_num_per_image), dtype=torch.int).cuda()
115
+
116
+ for i in range(image_token_num_per_image):
117
+ outputs = mmgpt.language_model.model(inputs_embeds=inputs_embeds, use_cache=True,
118
+ past_key_values=outputs.past_key_values if i != 0 else None)
119
+ hidden_states = outputs.last_hidden_state
120
+ if i == 0:
121
+ print('DAS', hidden_states.shape)
122
+ # torch.save(inputs_embeds, '/data/bxh_data/unify_model/share.pt')
123
+
124
+
125
+ logits = mmgpt.gen_head(hidden_states[:, -1, :])
126
+ print('logits: ', logits.shape)
127
+ logit_cond_full = logits[0::3, :]
128
+ logit_cond_part = logits[1::3, :]
129
+ logit_uncond = logits[2::3, :]
130
+
131
+ logit_cond = (logit_cond_full + cfg_weight2 * (logit_cond_part)) / (1 + cfg_weight2)
132
+ logits = logit_uncond + cfg_weight * (logit_cond - logit_uncond)
133
+ probs = torch.softmax(logits / temperature, dim=-1)
134
+
135
+ next_token = torch.multinomial(probs, num_samples=1)
136
+ generated_tokens[:, i] = next_token.squeeze(dim=-1)
137
+
138
+ next_token = torch.cat(
139
+ [next_token.unsqueeze(dim=1), next_token.unsqueeze(dim=1), next_token.unsqueeze(dim=1)], dim=1).view(-1)
140
+ img_embeds = mmgpt.prepare_gen_img_embeds(next_token)
141
+ inputs_embeds = img_embeds.unsqueeze(dim=1)
142
+
143
+ dec = mmgpt.gen_vision_model.decode_code(generated_tokens.to(dtype=torch.int),
144
+ shape=[parallel_size, 8, img_size // patch_size,
145
+ img_size // patch_size])
146
+ dec = dec.to(torch.float32).cpu().numpy().transpose(0, 2, 3, 1)
147
+
148
+ dec = np.clip((dec + 1) / 2 * 255, 0, 255)
149
+
150
+ visual_img = np.zeros((parallel_size, img_size, img_size, 3), dtype=np.uint8)
151
+ visual_img[:, :, :] = dec
152
+
153
+ output_images = []
154
+ for i in range(parallel_size):
155
+ save_path = output_path.replace('.png', '') + f'_{i}.png'
156
+ PIL.Image.fromarray(visual_img[i]).save(save_path)
157
+ output_images.append(save_path)
158
+ return output_images
159
+
160
+
161
+ # Run
162
+ prompt = "Add a airplane in the sky."
163
+ input_image_path = "/data5/czh/bxh/SEED-Data-Edit-Part2-3/multi_turn_editing/images/data/20240318_278P_1069turns/Data/297/9934d3ef3944453da459e9012cdd6a03.jpg"
164
+ image_output_path = "test_1.png"
165
+ text_and_image_to_image_generate(prompt, input_image_path, image_output_path, vl_chat_processor, vl_gpt,
166
+ parallel_size=1)