wangchongyi
commited on
Commit
·
7584097
1
Parent(s):
11bfb79
add stream demo
Browse files
README.md
CHANGED
@@ -105,6 +105,7 @@ tokenizer = AutoTokenizer.from_pretrained('openbmb/MiniCPM-V-4_5', trust_remote_
|
|
105 |
image = Image.open('./assets/minicpmo2_6/show_demo.jpg').convert('RGB')
|
106 |
|
107 |
enable_thinking=False # If `enable_thinking=True`, the thinking mode is enabled.
|
|
|
108 |
|
109 |
# First round chat
|
110 |
question = "What is the landform in the picture?"
|
@@ -113,9 +114,14 @@ msgs = [{'role': 'user', 'content': [image, question]}]
|
|
113 |
answer = model.chat(
|
114 |
msgs=msgs,
|
115 |
tokenizer=tokenizer,
|
116 |
-
enable_thinking=enable_thinking
|
|
|
117 |
)
|
118 |
-
|
|
|
|
|
|
|
|
|
119 |
|
120 |
# Second round chat, pass history context of multi-turn conversation
|
121 |
msgs.append({"role": "assistant", "content": [answer]})
|
@@ -123,9 +129,14 @@ msgs.append({"role": "user", "content": ["What should I pay attention to when tr
|
|
123 |
|
124 |
answer = model.chat(
|
125 |
msgs=msgs,
|
126 |
-
tokenizer=tokenizer
|
|
|
127 |
)
|
128 |
-
|
|
|
|
|
|
|
|
|
129 |
```
|
130 |
|
131 |
You will get the following output:
|
|
|
105 |
image = Image.open('./assets/minicpmo2_6/show_demo.jpg').convert('RGB')
|
106 |
|
107 |
enable_thinking=False # If `enable_thinking=True`, the thinking mode is enabled.
|
108 |
+
stream=True # If `stream=True`, the answer is string
|
109 |
|
110 |
# First round chat
|
111 |
question = "What is the landform in the picture?"
|
|
|
114 |
answer = model.chat(
|
115 |
msgs=msgs,
|
116 |
tokenizer=tokenizer,
|
117 |
+
enable_thinking=enable_thinking,
|
118 |
+
stream=True
|
119 |
)
|
120 |
+
|
121 |
+
generated_text = ""
|
122 |
+
for new_text in answer:
|
123 |
+
generated_text += new_text
|
124 |
+
print(new_text, flush=True, end='')
|
125 |
|
126 |
# Second round chat, pass history context of multi-turn conversation
|
127 |
msgs.append({"role": "assistant", "content": [answer]})
|
|
|
129 |
|
130 |
answer = model.chat(
|
131 |
msgs=msgs,
|
132 |
+
tokenizer=tokenizer,
|
133 |
+
stream=True
|
134 |
)
|
135 |
+
|
136 |
+
generated_text = ""
|
137 |
+
for new_text in answer:
|
138 |
+
generated_text += new_text
|
139 |
+
print(new_text, flush=True, end='')
|
140 |
```
|
141 |
|
142 |
You will get the following output:
|