Added Gradio translation app
Browse files- app.py +3 -4
- requirements.txt +1 -0
app.py
CHANGED
@@ -12,10 +12,10 @@ def load_model():
|
|
12 |
global tokenizer, model
|
13 |
try:
|
14 |
tokenizer = AutoTokenizer.from_pretrained(checkpoint_path)
|
15 |
-
model = AutoModelForSeq2SeqLM.from_pretrained(checkpoint_path)
|
16 |
model.to(device)
|
17 |
model.eval()
|
18 |
-
print(f"Model loaded successfully
|
19 |
except Exception as e:
|
20 |
print(f"Error loading model: {e}")
|
21 |
|
@@ -165,5 +165,4 @@ with gr.Blocks(css=css) as demo:
|
|
165 |
)
|
166 |
|
167 |
if __name__ == "__main__":
|
168 |
-
|
169 |
-
demo.launch(share=True, server_name="127.0.0.1", server_port=5000)
|
|
|
12 |
global tokenizer, model
|
13 |
try:
|
14 |
tokenizer = AutoTokenizer.from_pretrained(checkpoint_path)
|
15 |
+
model = AutoModelForSeq2SeqLM.from_pretrained(checkpoint_path, torch_dtype=torch.float16 if torch.cuda.is_available() else torch.float32)
|
16 |
model.to(device)
|
17 |
model.eval()
|
18 |
+
print(f"Model loaded successfully on {device}!")
|
19 |
except Exception as e:
|
20 |
print(f"Error loading model: {e}")
|
21 |
|
|
|
165 |
)
|
166 |
|
167 |
if __name__ == "__main__":
|
168 |
+
demo.launch(server_name="0.0.0.0", server_port=7860)
|
|
requirements.txt
CHANGED
@@ -2,3 +2,4 @@ gradio
|
|
2 |
torch
|
3 |
transformers
|
4 |
huggingface_hub
|
|
|
|
2 |
torch
|
3 |
transformers
|
4 |
huggingface_hub
|
5 |
+
sentencepiece
|