File size: 938 Bytes
48f8356
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
a446815
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
import os
import gradio as gr
from langchain.llms import HuggingFaceHub
from langchain.prompts import PromptTemplate
from langchain.chains import LLMChain

model_repo = os.getenv('HF_MODEL_REPO')
eos_string = "</s>"
template = """<s>[INST]<<SYS>>You work as translator. You job is translate user requests from {source} to {target}<<SYS>>
{query}[/INST]</s>\n"""

prompt = PromptTemplate(template=template, input_variables=["source","target","query"])

model_kwargs={
            "max_new_tokens":2048, 
            "temperature":0.5,
            "stop" : ["</s>","<|endoftext|>","<|end|>"]
            }

llm = HuggingFaceHub(repo_id=model_repo, task="text-generation", model_kwargs=model_kwargs)
chain = LLMChain(prompt=prompt, llm=llm)

def translation(source, target, text):
    response=chain.run(question)
    return response.partition(eos_string)[0]

gr.Interface(translation, inputs=["text","text","text"], outputs="text").launch()