File size: 1,881 Bytes
48f8356
 
 
 
73aeabd
 
 
40058f2
6321fb3
40058f2
 
48f8356
73aeabd
 
 
 
 
 
 
 
ef6eea4
48f8356
 
ef6eea4
 
 
a60ea1d
ef6eea4
48f8356
 
73aeabd
 
48f8356
 
73aeabd
 
 
 
 
 
 
 
 
 
 
 
ef6eea4
48f8356
a446815
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
import os
import gradio as gr
from langchain.llms import HuggingFaceHub

model_repo = os.getenv('HF_MODEL_LLAMA_REPO')
model_repo = os.getenv('HF_MODEL_STARCHAT_REPO')
llamma_template = """[INST]<<SYS>>I want you to act as document language translator. You do translation {source} texts in document into then you return to me the translated document AND DO NOTHING ELSE.<</SYS>>
Begin of the document:
{query}
End of the document[/INST]
{target} translated document:

"""
starchat_template = """<|system|>I want you to act as document language translator. You do translation {source} texts in document into then you return to me the translated document AND DO NOTHING ELSE.<</SYS>>
Begin of the document:
{query}
End of the document<|end|>
<|assistant|>
{target} translated document:

"""

model_kwargs={
            "max_new_tokens":2048,
            "temperature": 0.01,
            "truncate": 4096,
            "seed" : 256,
            "stop" : ["</s>","<|endoftext|>","<|end|>"],
            }

llm1 = HuggingFaceHub(repo_id=model_repo, task="text-generation", model_kwargs=model_kwargs)
llm2 = HuggingFaceHub(repo_id=model_repo, task="text-generation", model_kwargs=model_kwargs)

def translation(source, target, text):
    response = text
    try:
        input_prompt = llamma_template.replace("{source}", source)
        input_prompt = input_prompt.replace("{target}", target)
        input_prompt = input_prompt.replace("{query}", text)
        response=llm1(input_prompt)
    except Exception as e:
        print(f"ERROR: LLM show {e}")
        input_prompt = starchat_template.replace("{source}", source)
        input_prompt = input_prompt.replace("{target}", target)
        input_prompt = input_prompt.replace("{query}", text)
        response=llm2(input_prompt)
    return response

gr.Interface(translation, inputs=["text","text","text"], outputs="text").launch()