yuzaa commited on
Commit
17353d1
·
verified ·
1 Parent(s): c255f57

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +36 -1
README.md CHANGED
@@ -358,7 +358,42 @@ question = 'Compare image 1 and image 2, tell me about the differences between i
358
  msgs = [{'role': 'user', 'content': [image1, image2, question]}]
359
 
360
  answer = model.chat(
361
- image=None,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
362
  msgs=msgs,
363
  tokenizer=tokenizer
364
  )
 
358
  msgs = [{'role': 'user', 'content': [image1, image2, question]}]
359
 
360
  answer = model.chat(
361
+ msgs=msgs,
362
+ tokenizer=tokenizer
363
+ )
364
+ print(answer)
365
+ ```
366
+ </details>
367
+
368
+
369
+ #### In-context few-shot learning
370
+ <details>
371
+ <summary> Click to view Python code running MiniCPM-V 4.5 with few-shot input. </summary>
372
+
373
+ ```python
374
+ import torch
375
+ from PIL import Image
376
+ from transformers import AutoModel, AutoTokenizer
377
+
378
+ model = AutoModel.from_pretrained('openbmb/MiniCPM-V-4_5', trust_remote_code=True,
379
+ attn_implementation='sdpa', torch_dtype=torch.bfloat16)
380
+ model = model.eval().cuda()
381
+ tokenizer = AutoTokenizer.from_pretrained('openbmb/MiniCPM-V-4_5', trust_remote_code=True)
382
+
383
+ question = "production date"
384
+ image1 = Image.open('example1.jpg').convert('RGB')
385
+ answer1 = "2023.08.04"
386
+ image2 = Image.open('example2.jpg').convert('RGB')
387
+ answer2 = "2007.04.24"
388
+ image_test = Image.open('test.jpg').convert('RGB')
389
+
390
+ msgs = [
391
+ {'role': 'user', 'content': [image1, question]}, {'role': 'assistant', 'content': [answer1]},
392
+ {'role': 'user', 'content': [image2, question]}, {'role': 'assistant', 'content': [answer2]},
393
+ {'role': 'user', 'content': [image_test, question]}
394
+ ]
395
+
396
+ answer = model.chat(
397
  msgs=msgs,
398
  tokenizer=tokenizer
399
  )