czczup commited on
Commit
eca52d9
1 Parent(s): 95a0dbd

Upload folder using huggingface_hub

Browse files
Files changed (1) hide show
  1. README.md +30 -4
README.md CHANGED
@@ -166,6 +166,32 @@ def load_image(image_file, input_size=448, max_num=6):
166
  return pixel_values
167
 
168
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
169
  path = 'OpenGVLab/InternVL-Chat-V1-5'
170
  # If you have an 80G A100 GPU, you can put the entire model on a single GPU.
171
  model = AutoModel.from_pretrained(
@@ -173,15 +199,15 @@ model = AutoModel.from_pretrained(
173
  torch_dtype=torch.bfloat16,
174
  low_cpu_mem_usage=True,
175
  trust_remote_code=True).eval().cuda()
176
- # Otherwise, you need to set device_map='auto' to use multiple GPUs for inference.
177
- # import os
178
- # os.environ["CUDA_LAUNCH_BLOCKING"] = "1"
179
  # model = AutoModel.from_pretrained(
180
  # path,
181
  # torch_dtype=torch.bfloat16,
182
  # low_cpu_mem_usage=True,
183
  # trust_remote_code=True,
184
- # device_map='auto').eval()
185
 
186
  tokenizer = AutoTokenizer.from_pretrained(path, trust_remote_code=True)
187
  # set the max number of tiles in `max_num`
 
166
  return pixel_values
167
 
168
 
169
+ def split_model(model_name):
170
+ device_map = {}
171
+ world_size = torch.cuda.device_count()
172
+ num_layers = {'InternVL2-8B': 32, 'InternVL2-26B': 48, 'InternVL-Chat-V1-5': 48,
173
+ 'InternVL2-40B': 60, 'InternVL2-Llama3-76B': 80,}[model_name]
174
+ # Since the first GPU will be used for ViT, treat it as half a GPU.
175
+ num_layers_per_gpu = math.ceil(num_layers / (world_size - 0.5))
176
+ num_layers_per_gpu = [num_layers_per_gpu] * world_size
177
+ num_layers_per_gpu[0] = math.ceil(num_layers_per_gpu[0] * 0.5)
178
+ layer_cnt = 0
179
+ for i, num_layer in enumerate(num_layers_per_gpu):
180
+ for j in range(num_layer):
181
+ device_map[f'language_model.model.layers.{layer_cnt}'] = i
182
+ layer_cnt += 1
183
+ device_map['vision_model'] = 0
184
+ device_map['mlp1'] = 0
185
+ device_map['language_model.model.tok_embeddings'] = 0
186
+ device_map['language_model.model.embed_tokens'] = 0
187
+ device_map['language_model.output'] = 0
188
+ device_map['language_model.model.norm'] = 0
189
+ device_map['language_model.lm_head'] = 0
190
+ device_map[f'language_model.model.layers.{num_layers - 1}'] = 0
191
+
192
+ return device_map
193
+
194
+
195
  path = 'OpenGVLab/InternVL-Chat-V1-5'
196
  # If you have an 80G A100 GPU, you can put the entire model on a single GPU.
197
  model = AutoModel.from_pretrained(
 
199
  torch_dtype=torch.bfloat16,
200
  low_cpu_mem_usage=True,
201
  trust_remote_code=True).eval().cuda()
202
+ # Otherwise, you need to set device_map to use multiple GPUs for inference.
203
+ # device_map = split_model('InternVL-Chat-V1-5')
204
+ # print(device_map)
205
  # model = AutoModel.from_pretrained(
206
  # path,
207
  # torch_dtype=torch.bfloat16,
208
  # low_cpu_mem_usage=True,
209
  # trust_remote_code=True,
210
+ # device_map=device_map).eval()
211
 
212
  tokenizer = AutoTokenizer.from_pretrained(path, trust_remote_code=True)
213
  # set the max number of tiles in `max_num`