Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -126,11 +126,11 @@ lang_id = [
|
|
126 |
d_lang = lang_id[21]
|
127 |
#d_lang_code = d_lang.code
|
128 |
|
129 |
-
def trans_page(input,
|
130 |
src_lang = d_lang.code
|
131 |
for lang in lang_id:
|
132 |
-
|
133 |
-
|
134 |
if trg_lang != src_lang:
|
135 |
tokenizer.src_lang = src_lang
|
136 |
with torch.no_grad():
|
@@ -147,7 +147,7 @@ def trans_page(input,input2,trg):
|
|
147 |
tokenizer.src_lang = src_lang
|
148 |
for langs in lang_id:
|
149 |
with torch.no_grad():
|
150 |
-
encoded_input = tokenizer(
|
151 |
generated_tokens = model.generate(**encoded_input, forced_bos_token_id=tokenizer.get_lang_id(trg_lang))
|
152 |
trans_langs = tokenizer.batch_decode(generated_tokens, skip_special_tokens=True)[0]
|
153 |
new_lang.append(trans_langs)
|
|
|
126 |
d_lang = lang_id[21]
|
127 |
#d_lang_code = d_lang.code
|
128 |
|
129 |
+
def trans_page(input,trg):
|
130 |
src_lang = d_lang.code
|
131 |
for lang in lang_id:
|
132 |
+
if lang.name == trg:
|
133 |
+
trg_lang = lang.code
|
134 |
if trg_lang != src_lang:
|
135 |
tokenizer.src_lang = src_lang
|
136 |
with torch.no_grad():
|
|
|
147 |
tokenizer.src_lang = src_lang
|
148 |
for langs in lang_id:
|
149 |
with torch.no_grad():
|
150 |
+
encoded_input = tokenizer(langs.name, return_tensors="pt").to(device)
|
151 |
generated_tokens = model.generate(**encoded_input, forced_bos_token_id=tokenizer.get_lang_id(trg_lang))
|
152 |
trans_langs = tokenizer.batch_decode(generated_tokens, skip_special_tokens=True)[0]
|
153 |
new_lang.append(trans_langs)
|