futranbg commited on
Commit
be5c707
·
1 Parent(s): 4a44055

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +14 -2
app.py CHANGED
@@ -4,6 +4,8 @@ from langchain.llms import HuggingFaceHub
4
 
5
  llama_repo = os.getenv('HF_MODEL_LLAMA_REPO')
6
  starchat_repo = os.getenv('HF_MODEL_STARCHAT_REPO')
 
 
7
  llamma_template = """<s>[INST]<<SYS>>I want you to act as document language translator. You do translation {source} texts in document into then you return to me the translated document AND DO NOTHING ELSE.<</SYS>>[/INST]
8
  [INST]Begin of the document:
9
  {query}
@@ -20,6 +22,15 @@ End of the document<|end|>
20
 
21
  """
22
 
 
 
 
 
 
 
 
 
 
23
  model_kwargs={
24
  "max_new_tokens":2048,
25
  "temperature": 0.01,
@@ -30,6 +41,7 @@ model_kwargs={
30
 
31
  llm1 = HuggingFaceHub(repo_id=llama_repo, task="text-generation", model_kwargs=model_kwargs)
32
  llm2 = HuggingFaceHub(repo_id=starchat_repo, task="text-generation", model_kwargs=model_kwargs)
 
33
 
34
  def translation(source, target, text):
35
  response = text
@@ -37,13 +49,13 @@ def translation(source, target, text):
37
  input_prompt = llamma_template.replace("{source}", source)
38
  input_prompt = input_prompt.replace("{target}", target)
39
  input_prompt = input_prompt.replace("{query}", text)
40
- response=llm1(input_prompt)
41
  except Exception as e:
42
  print(f"ERROR: LLM show {e}")
43
  input_prompt = starchat_template.replace("{source}", source)
44
  input_prompt = input_prompt.replace("{target}", target)
45
  input_prompt = input_prompt.replace("{query}", text)
46
- response=llm2(input_prompt).replace("<|end|>","")
47
  return response
48
 
49
  gr.Interface(translation, inputs=["text","text","text"], outputs="text").launch()
 
4
 
5
  llama_repo = os.getenv('HF_MODEL_LLAMA_REPO')
6
  starchat_repo = os.getenv('HF_MODEL_STARCHAT_REPO')
7
+ bloom_repo = os.getenv('HF_MODEL_BLOOM_REPO')
8
+
9
  llamma_template = """<s>[INST]<<SYS>>I want you to act as document language translator. You do translation {source} texts in document into then you return to me the translated document AND DO NOTHING ELSE.<</SYS>>[/INST]
10
  [INST]Begin of the document:
11
  {query}
 
22
 
23
  """
24
 
25
+ starchat_template = """Translation {source} texts into {target}. then you return to me the translated document AND DO NOTHING ELSE.<</SYS>>
26
+ Begin of the texts:
27
+ {query}
28
+ End of the texts
29
+
30
+ {target} translated texts:
31
+
32
+ """
33
+
34
  model_kwargs={
35
  "max_new_tokens":2048,
36
  "temperature": 0.01,
 
41
 
42
  llm1 = HuggingFaceHub(repo_id=llama_repo, task="text-generation", model_kwargs=model_kwargs)
43
  llm2 = HuggingFaceHub(repo_id=starchat_repo, task="text-generation", model_kwargs=model_kwargs)
44
+ llm3 = HuggingFaceHub(repo_id=bloom_repo, task="text-generation", model_kwargs=model_kwargs)
45
 
46
  def translation(source, target, text):
47
  response = text
 
49
  input_prompt = llamma_template.replace("{source}", source)
50
  input_prompt = input_prompt.replace("{target}", target)
51
  input_prompt = input_prompt.replace("{query}", text)
52
+ response=llm3(input_prompt)
53
  except Exception as e:
54
  print(f"ERROR: LLM show {e}")
55
  input_prompt = starchat_template.replace("{source}", source)
56
  input_prompt = input_prompt.replace("{target}", target)
57
  input_prompt = input_prompt.replace("{query}", text)
58
+ response=llm1(input_prompt).replace("<|end|>","")
59
  return response
60
 
61
  gr.Interface(translation, inputs=["text","text","text"], outputs="text").launch()