Spaces:
Sleeping
Sleeping
File size: 4,490 Bytes
0ca1054 8d2abc9 6d6a186 8d2abc9 eb70223 8d2abc9 eb70223 8d2abc9 eb70223 8d2abc9 48921b7 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 |
import gradio as gr
from langchain_huggingface import ChatHuggingFace, HuggingFacePipeline
from transformers import BitsAndBytesConfig
import re
from deep_translator import (GoogleTranslator,
PonsTranslator,
LingueeTranslator,
MyMemoryTranslator,
YandexTranslator,
DeeplTranslator,
QcriTranslator,
single_detection,
batch_detection)
from pyaspeller import YandexSpeller
def error_correct_pyspeller(sample_text):
""" grammer correction of input text"""
speller = YandexSpeller()
fixed = speller.spelled(sample_text)
return fixed
def postprocerssing(inp_text: str):
"""Post preocessing of the llm response"""
inp_text = re.sub('<[^>]+>', '', inp_text)
inp_text = inp_text.split('##', 1)[0]
inp_text = error_correct_pyspeller(inp_text)
return inp_text
quantization_config = BitsAndBytesConfig(
load_in_4bit=True,
bnb_4bit_quant_type="nf4",
bnb_4bit_compute_dtype="float16",
bnb_4bit_use_double_quant=True,
)
llm = HuggingFacePipeline.from_model_id(
model_id="Danielrahmai1991/nvlm_adapt_basic_model_16bit",
task="text-generation",
pipeline_kwargs=dict(
max_new_tokens=512,
do_sample=True,
repetition_penalty=1.15,
trust_remote_code= True,
temperature= 0.75
),
model_kwargs={"quantization_config": quantization_config,
},
)
chat_model = ChatHuggingFace(llm=llm)
# history of the messages
def clear_memory(messages):
messages.clear()
return "Memory cleaned."
def llm_run(prompt, messages):
print(f"question is {prompt}")
lang = single_detection(prompt, api_key='4ab77f25578d450f0902fb42c66d5e11')
if lang == 'en':
prompt = error_correct_pyspeller(prompt)
en_translated = GoogleTranslator(source='auto', target='en').translate(prompt)
messages.append({"role": "user", "content": en_translated})
ai_msg = chat_model.invoke(messages, skip_prompt = True)
response_of_llm = postprocerssing(ai_msg.content)
messages.append({"role": "assistant", "content": response_of_llm})
response_of_llm = GoogleTranslator(source='auto', target=lang).translate(response_of_llm)
print(f"out is: {response_of_llm}")
return response_of_llm
# def greet(prompt, m_type):
# return "hi"
print("donnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnn")
with gr.Blocks(theme=gr.themes.Default(primary_hue=gr.themes.colors.orange, secondary_hue=gr.themes.colors.pink)) as demo:
stored_message = gr.State([])
with gr.Row():
with gr.Column(scale=2):
text1 = gr.Textbox(lines=7, label="Prompt", scale=2)
with gr.Row():
btn1 = gr.Button("Submit", scale=1)
btn2 = gr.Button("Clear", scale=1)
btn3 = gr.Button("Clean Memory", scale=2)
with gr.Column(scale=2):
out_text = gr.Text(lines=15, label="Output", scale=2)
btn1.click(fn=llm_run, inputs=[text1, stored_message], outputs=out_text)
btn2.click(lambda: [None, None], outputs=[text1, out_text])
btn3.click(fn=clear_memory, inputs=[stored_message], outputs=[out_text])
# demo = gr.Interface(fn=llm_run, inputs=["text"], outputs="text")
demo.launch(debug=True, share=True)
# import gradio as gr
# def greet(inp, messages):
# messages.append(inp)
# print(messages)
# return "Hello " + inp + "!"
# def clear_memory(messages):
# messages.clear()
# return "Memory cleaned."
# with gr.Blocks(theme=gr.themes.Default(primary_hue=gr.themes.colors.orange, secondary_hue=gr.themes.colors.pink)) as demo:
# stored_message = gr.State([])
# with gr.Row():
# with gr.Column(scale=2):
# text1 = gr.Textbox(lines=7, label="Prompt", scale=2)
# with gr.Row():
# btn1 = gr.Button("Submit", scale=1)
# btn2 = gr.Button("Clear", scale=1)
# btn3 = gr.Button("Clean Memory", scale=2)
# with gr.Column(scale=2):
# out_text = gr.Text(lines=15, label="Output", scale=2)
# btn1.click(fn=greet, inputs=[text1, stored_message], outputs=out_text)
# btn2.click(lambda: [None, None], outputs=[text1, out_text])
# btn3.click(fn=clear_memory, inputs=[stored_message], outputs=[out_text])
# demo.launch()
|