Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -27,13 +27,11 @@ model = MBartForConditionalGeneration.from_pretrained("facebook/mbart-large-50-m
|
|
27 |
tokenizer = MBart50TokenizerFast.from_pretrained("facebook/mbart-large-50-many-to-many-mmt")
|
28 |
tokenizer.src_lang = "en_XX"
|
29 |
text_l=st.text_area('Input sentence:', key=2)
|
30 |
-
|
31 |
-
|
32 |
-
**encoded_en,
|
33 |
-
|
34 |
-
)
|
35 |
-
out_l=tokenizer.batch_decode(generated_tokens, skip_special_tokens=True)
|
36 |
-
st.text_area(label="Output sentence:", value=out_l)
|
37 |
|
38 |
pipe_p=pipeline(model="ramsrigouthamg/t5_sentence_paraphraser")
|
39 |
st.title("Paraphraser")
|
|
|
27 |
tokenizer = MBart50TokenizerFast.from_pretrained("facebook/mbart-large-50-many-to-many-mmt")
|
28 |
tokenizer.src_lang = "en_XX"
|
29 |
text_l=st.text_area('Input sentence:', key=2)
|
30 |
+
if text_l:
|
31 |
+
encoded_en = tokenizer(text_l, return_tensors="pt")
|
32 |
+
generated_tokens = model.generate(**encoded_en,forced_bos_token_id=tokenizer.lang_code_to_id["hi_IN"])
|
33 |
+
out_l=tokenizer.batch_decode(generated_tokens, skip_special_tokens=True)
|
34 |
+
st.text_area(label="Output sentence:", value=out_l)
|
|
|
|
|
35 |
|
36 |
pipe_p=pipeline(model="ramsrigouthamg/t5_sentence_paraphraser")
|
37 |
st.title("Paraphraser")
|