futranbg commited on
Commit
ef6eea4
·
1 Parent(s): 052ad9b

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +16 -12
app.py CHANGED
@@ -1,27 +1,31 @@
1
  import os
2
  import gradio as gr
3
  from langchain.llms import HuggingFaceHub
4
- from langchain.prompts import PromptTemplate
5
- from langchain.chains import LLMChain
6
 
7
  model_repo = os.getenv('HF_MODEL_REPO')
8
- eos_string = "</s>"
9
- template = """<s>[INST]<<SYS>>You work as nation language translator. You job is translate user request from {source} to {target}<<SYS>>[/INST]</s>
10
- <s>[INST]{query}[/INST]</s>\n"""
 
 
11
 
12
- prompt = PromptTemplate(template=template, input_variables=["source","target","query"])
13
 
14
  model_kwargs={
15
- "max_new_tokens":2048,
16
- "temperature":0.5,
17
- "stop" : ["</s>","<|endoftext|>","<|end|>"]
 
18
  }
19
 
20
  llm = HuggingFaceHub(repo_id=model_repo, task="text-generation", model_kwargs=model_kwargs)
21
- chain = LLMChain(prompt=prompt, llm=llm)
22
 
23
  def translation(source, target, text):
24
- response=chain.run(source=source,target=target,query=text)
25
- return response.partition(eos_string)[0]
 
 
 
 
26
 
27
  gr.Interface(translation, inputs=["text","text","text"], outputs="text").launch()
 
1
  import os
2
  import gradio as gr
3
  from langchain.llms import HuggingFaceHub
 
 
4
 
5
  model_repo = os.getenv('HF_MODEL_REPO')
6
+ template = """[INST]<<SYS>>I want you to be a translator. You do translate {source} texts in the context into {target} then you return to me the whole translated context AND DO NOTHING ELSE.<</SYS>>
7
+ Begin of the context:
8
+ {query}
9
+ End of the context.[/INST]
10
+ {target} translation of the context:
11
 
12
+ """
13
 
14
  model_kwargs={
15
+ "max_new_tokens":2048,
16
+ "temperature": 0.01,
17
+ "truncate": 4096,
18
+ "stop" : ["</s>","<|endoftext|>","<|end|>"],
19
  }
20
 
21
  llm = HuggingFaceHub(repo_id=model_repo, task="text-generation", model_kwargs=model_kwargs)
 
22
 
23
  def translation(source, target, text):
24
+ input_prompt = template.replace("{source}", source)
25
+ input_prompt = input_prompt.replace("{target}", target)
26
+ input_prompt = input_prompt.replace("{query}", text)
27
+ print(input_prompt)
28
+ response=llm(input_prompt)
29
+ return response
30
 
31
  gr.Interface(translation, inputs=["text","text","text"], outputs="text").launch()