Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -21,6 +21,9 @@ try:
|
|
21 |
tokenizer = AutoTokenizer.from_pretrained(model_id)
|
22 |
tokenizer.model_max_length = 512
|
23 |
|
|
|
|
|
|
|
24 |
except Exception as e:
|
25 |
print(f"Error loading model: {e}")
|
26 |
exit()
|
@@ -87,4 +90,4 @@ demo = gr.ChatInterface(
|
|
87 |
)
|
88 |
|
89 |
if __name__ == "__main__":
|
90 |
-
demo.launch()
|
|
|
21 |
tokenizer = AutoTokenizer.from_pretrained(model_id)
|
22 |
tokenizer.model_max_length = 512
|
23 |
|
24 |
+
dummy_input = tokenizer("This is a test.", return_tensors="pt").to(model.device)
|
25 |
+
model.generate(input_ids=dummy_input.input_ids, return_dict=True) # Dummy call
|
26 |
+
|
27 |
except Exception as e:
|
28 |
print(f"Error loading model: {e}")
|
29 |
exit()
|
|
|
90 |
)
|
91 |
|
92 |
if __name__ == "__main__":
|
93 |
+
demo.launch()
|