Update model loading with English language support and system prompt
Browse files- Replace Spanish print statements with English equivalents
- Add a system prompt to the model message generation
- Improve language handling for the chatbot
app.py
CHANGED
@@ -21,7 +21,7 @@ try:
|
|
21 |
tokenizer = AutoTokenizer.from_pretrained(model_name)
|
22 |
|
23 |
if device == "cuda":
|
24 |
-
print("
|
25 |
model = AutoModelForCausalLM.from_pretrained(
|
26 |
model_name,
|
27 |
torch_dtype=torch.bfloat16,
|
@@ -29,16 +29,16 @@ try:
|
|
29 |
low_cpu_mem_usage=True
|
30 |
)
|
31 |
else:
|
32 |
-
print("
|
33 |
model = AutoModelForCausalLM.from_pretrained(
|
34 |
model_name,
|
35 |
device_map={"": device},
|
36 |
torch_dtype=torch.float32
|
37 |
)
|
38 |
|
39 |
-
print(f"
|
40 |
except Exception as e:
|
41 |
-
print(f"Error
|
42 |
raise
|
43 |
|
44 |
# Define the function that calls the model
|
@@ -53,7 +53,10 @@ def call_model(state: MessagesState):
|
|
53 |
dict: A dictionary containing the generated text and the thread ID
|
54 |
"""
|
55 |
# Convert LangChain messages to chat format
|
56 |
-
messages = [
|
|
|
|
|
|
|
57 |
for msg in state["messages"]:
|
58 |
if isinstance(msg, HumanMessage):
|
59 |
messages.append({"role": "user", "content": msg.content})
|
|
|
21 |
tokenizer = AutoTokenizer.from_pretrained(model_name)
|
22 |
|
23 |
if device == "cuda":
|
24 |
+
print("Using GPU for the model...")
|
25 |
model = AutoModelForCausalLM.from_pretrained(
|
26 |
model_name,
|
27 |
torch_dtype=torch.bfloat16,
|
|
|
29 |
low_cpu_mem_usage=True
|
30 |
)
|
31 |
else:
|
32 |
+
print("Using CPU for the model...")
|
33 |
model = AutoModelForCausalLM.from_pretrained(
|
34 |
model_name,
|
35 |
device_map={"": device},
|
36 |
torch_dtype=torch.float32
|
37 |
)
|
38 |
|
39 |
+
print(f"Model loaded successfully on: {device}")
|
40 |
except Exception as e:
|
41 |
+
print(f"Error loading the model: {str(e)}")
|
42 |
raise
|
43 |
|
44 |
# Define the function that calls the model
|
|
|
53 |
dict: A dictionary containing the generated text and the thread ID
|
54 |
"""
|
55 |
# Convert LangChain messages to chat format
|
56 |
+
messages = [
|
57 |
+
{"role": "system", "content": "You are a friendly Chatbot. Always reply in the language in which the user is writing to you."}
|
58 |
+
]
|
59 |
+
|
60 |
for msg in state["messages"]:
|
61 |
if isinstance(msg, HumanMessage):
|
62 |
messages.append({"role": "user", "content": msg.content})
|