Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -3,8 +3,17 @@ from transformers import pipeline
|
|
3 |
|
4 |
model_id = "thrishala/mental_health_chatbot"
|
5 |
|
|
|
|
|
6 |
try:
|
7 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
8 |
|
9 |
except Exception as e:
|
10 |
print(f"Error loading model: {e}")
|
|
|
3 |
|
4 |
model_id = "thrishala/mental_health_chatbot"
|
5 |
|
6 |
+
model_id = "thrishala/mental_health_chatbot"
|
7 |
+
|
8 |
try:
|
9 |
+
tokenizer = AutoTokenizer.from_pretrained(model_id)
|
10 |
+
model = AutoModelForCausalLM.from_pretrained(
|
11 |
+
model_id,
|
12 |
+
load_in_8bit=True, # Load in 8-bit quantization
|
13 |
+
device_map="auto", #Use GPU if available
|
14 |
+
torch_dtype=torch.float16 #Use float 16 for additional memory reduction
|
15 |
+
)
|
16 |
+
pipe = pipeline("text-generation", model=model, tokenizer=tokenizer)
|
17 |
|
18 |
except Exception as e:
|
19 |
print(f"Error loading model: {e}")
|