Update qwen_model.py
Browse files- qwen_model.py +9 -27
qwen_model.py
CHANGED
|
@@ -18,35 +18,17 @@ qwen_pipeline = pipeline(
|
|
| 18 |
tokenizer=tokenizer
|
| 19 |
)
|
| 20 |
|
| 21 |
-
def generate_response(retrieved_texts, query, max_new_tokens=
|
| 22 |
-
"""
|
| 23 |
-
Generates a response based on the retrieved texts and query using Qwen.
|
| 24 |
-
Args:
|
| 25 |
-
retrieved_texts (list): List of retrieved text strings (e.g., from BLIP).
|
| 26 |
-
query (str): The user's question about the image.
|
| 27 |
-
max_new_tokens (int): Maximum tokens to generate for the answer.
|
| 28 |
-
Returns:
|
| 29 |
-
str: The generated answer.
|
| 30 |
-
"""
|
| 31 |
-
# Construct a prompt that includes the image details as context
|
| 32 |
context = "\n".join(retrieved_texts)
|
| 33 |
prompt = f"This is the detail about the image:\n{context}\n\nQuestion: {query}\nAnswer:"
|
| 34 |
-
|
| 35 |
-
|
| 36 |
-
|
| 37 |
-
|
| 38 |
-
|
| 39 |
-
|
| 40 |
-
temperature=0.7, # tweak as needed
|
| 41 |
-
)
|
| 42 |
-
|
| 43 |
-
# The pipeline returns a list of dicts with key "generated_text"
|
| 44 |
-
full_generation = result[0]["generated_text"]
|
| 45 |
-
|
| 46 |
-
# Optionally parse out the final answer if the model repeats the prompt
|
| 47 |
-
if "Answer:" in full_generation:
|
| 48 |
-
final_answer = full_generation.split("Answer:")[-1].strip()
|
| 49 |
else:
|
| 50 |
-
final_answer =
|
| 51 |
|
| 52 |
return final_answer
|
|
|
|
|
|
| 18 |
tokenizer=tokenizer
|
| 19 |
)
|
| 20 |
|
| 21 |
+
def generate_response(retrieved_texts, query, max_new_tokens=200):
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 22 |
context = "\n".join(retrieved_texts)
|
| 23 |
prompt = f"This is the detail about the image:\n{context}\n\nQuestion: {query}\nAnswer:"
|
| 24 |
+
|
| 25 |
+
result = qwen_pipeline(prompt, max_new_tokens=max_new_tokens, ...)
|
| 26 |
+
generated_text = result[0]["generated_text"]
|
| 27 |
+
|
| 28 |
+
if "Answer:" in generated_text:
|
| 29 |
+
final_answer = generated_text.split("Answer:")[-1].strip()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 30 |
else:
|
| 31 |
+
final_answer = generated_text
|
| 32 |
|
| 33 |
return final_answer
|
| 34 |
+
|