Wendy-Fly commited on
Commit
8a2fe71
·
verified ·
1 Parent(s): 4369fe7

Upload infer_qwen2_vl.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. infer_qwen2_vl.py +20 -8
infer_qwen2_vl.py CHANGED
@@ -91,18 +91,30 @@ for batch_idx in tqdm(range(begin, end, batch_size)):
91
  inputs = inputs.to("cuda")
92
 
93
  # Inference: Generation of the output
94
- generated_ids = model.generate(**inputs, max_new_tokens=8192)
95
- generated_ids_trimmed = [
96
- out_ids[len(in_ids) :] for in_ids, out_ids in zip(inputs.input_ids, generated_ids)
97
- ]
98
- output_text = processor.batch_decode(
99
- generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False
100
- )
 
 
 
 
 
 
 
 
 
 
 
 
101
  #ans.append(output_text)
102
  save_path = "output.json"
103
  counter = counter + 1
104
  if counter % 1 == 0:
105
  print(f"Saving data at iteration {idx + 1}")
106
- write_json(save_path, data)
107
 
108
 
 
91
  inputs = inputs.to("cuda")
92
 
93
  # Inference: Generation of the output
94
+ ans = []
95
+ for x in range(len(inputs)):
96
+ print(f"Generating {x}th image")
97
+ generated_ids = model.generate(**x, max_new_tokens=8192)
98
+ generated_ids_trimmed = [
99
+ out_ids[len(in_ids) :] for in_ids, out_ids in zip(x.input_ids, generated_ids)
100
+ ]
101
+ output_text = processor.batch_decode(
102
+ generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=True
103
+ )
104
+ ans.append(output_text)
105
+
106
+ # generated_ids = model.generate(**inputs, max_new_tokens=8192)
107
+ # generated_ids_trimmed = [
108
+ # out_ids[len(in_ids) :] for in_ids, out_ids in zip(inputs.input_ids, generated_ids)
109
+ # ]
110
+ # output_text = processor.batch_decode(
111
+ # generated_ids_trimmed, skip_special_tokens=True, clean_up_tokenization_spaces=False
112
+ # )
113
  #ans.append(output_text)
114
  save_path = "output.json"
115
  counter = counter + 1
116
  if counter % 1 == 0:
117
  print(f"Saving data at iteration {idx + 1}")
118
+ write_json(save_path, ans)
119
 
120