File size: 2,285 Bytes
48f8356
 
 
 
f5d37e2
 
be5c707
 
4a44055
 
6321fb3
4a44055
40058f2
48f8356
73aeabd
 
 
 
 
 
 
 
ef6eea4
48f8356
be5c707
 
 
 
 
 
 
 
 
48f8356
ef6eea4
 
 
a60ea1d
ef6eea4
48f8356
 
f5d37e2
 
be5c707
48f8356
 
73aeabd
 
 
 
 
be5c707
73aeabd
 
 
 
 
be5c707
ef6eea4
48f8356
a446815
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
import os
import gradio as gr
from langchain.llms import HuggingFaceHub

llama_repo = os.getenv('HF_MODEL_LLAMA_REPO')
starchat_repo = os.getenv('HF_MODEL_STARCHAT_REPO')
bloom_repo = os.getenv('HF_MODEL_BLOOM_REPO')

llamma_template = """<s>[INST]<<SYS>>I want you to act as document language translator. You do translation {source} texts in document into then you return to me the translated document AND DO NOTHING ELSE.<</SYS>>[/INST]
[INST]Begin of the document:
{query}
End of the document.[/INST]
{target} translated document:

"""
starchat_template = """<|system|>I want you to act as document language translator. You do translation {source} texts in document into then you return to me the translated document AND DO NOTHING ELSE.<</SYS>>
Begin of the document:
{query}
End of the document<|end|>
<|assistant|>
{target} translated document:

"""

starchat_template = """Translation {source} texts into {target}. then you return to me the translated document AND DO NOTHING ELSE.<</SYS>>
Begin of the texts:
{query}
End of the texts

{target} translated texts:

"""

model_kwargs={
            "max_new_tokens":2048,
            "temperature": 0.01,
            "truncate": 4096,
            "seed" : 256,
            "stop" : ["</s>","<|endoftext|>","<|end|>"],
            }

llm1 = HuggingFaceHub(repo_id=llama_repo, task="text-generation", model_kwargs=model_kwargs)
llm2 = HuggingFaceHub(repo_id=starchat_repo, task="text-generation", model_kwargs=model_kwargs)
llm3 = HuggingFaceHub(repo_id=bloom_repo, task="text-generation", model_kwargs=model_kwargs)

def translation(source, target, text):
    response = text
    try:
        input_prompt = llamma_template.replace("{source}", source)
        input_prompt = input_prompt.replace("{target}", target)
        input_prompt = input_prompt.replace("{query}", text)
        response=llm3(input_prompt)
    except Exception as e:
        print(f"ERROR: LLM show {e}")
        input_prompt = starchat_template.replace("{source}", source)
        input_prompt = input_prompt.replace("{target}", target)
        input_prompt = input_prompt.replace("{query}", text)
        response=llm1(input_prompt).replace("<|end|>","")
    return response

gr.Interface(translation, inputs=["text","text","text"], outputs="text").launch()