Danielrahmai1991 commited on
Commit
8d2abc9
·
verified ·
1 Parent(s): 6dc75fc

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +103 -102
app.py CHANGED
@@ -1,109 +1,81 @@
1
- # import gradio as gr
2
-
3
- # from langchain_huggingface import ChatHuggingFace, HuggingFacePipeline
4
- # from transformers import BitsAndBytesConfig
5
- # import re
6
- # from deep_translator import (GoogleTranslator,
7
- # PonsTranslator,
8
- # LingueeTranslator,
9
- # MyMemoryTranslator,
10
- # YandexTranslator,
11
- # DeeplTranslator,
12
- # QcriTranslator,
13
- # single_detection,
14
- # batch_detection)
15
- # from pyaspeller import YandexSpeller
16
-
17
- # def error_correct_pyspeller(sample_text):
18
- # """ grammer correction of input text"""
19
- # speller = YandexSpeller()
20
- # fixed = speller.spelled(sample_text)
21
- # return fixed
22
-
23
- # def postprocerssing(inp_text: str):
24
- # """Post preocessing of the llm response"""
25
- # inp_text = re.sub('<[^>]+>', '', inp_text)
26
- # inp_text = inp_text.split('##', 1)[0]
27
- # inp_text = error_correct_pyspeller(inp_text)
28
- # return inp_text
29
-
30
-
31
- # quantization_config = BitsAndBytesConfig(
32
- # load_in_4bit=True,
33
- # bnb_4bit_quant_type="nf4",
34
- # bnb_4bit_compute_dtype="float16",
35
- # bnb_4bit_use_double_quant=True,
36
- # )
37
-
38
- # llm = HuggingFacePipeline.from_model_id(
39
- # model_id="Danielrahmai1991/nvlm_adapt_basic_model_16bit",
40
- # task="text-generation",
41
- # pipeline_kwargs=dict(
42
- # max_new_tokens=512,
43
- # do_sample=True,
44
- # repetition_penalty=1.15,
45
- # trust_remote_code= True,
46
- # temperature= 0.70
47
-
48
- # ),
49
- # model_kwargs={"quantization_config": quantization_config,
50
- # },
51
- # )
52
-
53
- # chat_model = ChatHuggingFace(llm=llm)
54
-
55
- # # history of the messages
56
-
57
- # def clear_memory(messages):
58
- # messages.clear()
59
- # return "Memory cleaned."
60
-
61
- # def llm_run(prompt, messages):
62
- # print(f"question is {prompt}")
63
- # lang = single_detection(prompt, api_key='4ab77f25578d450f0902fb42c66d5e11')
64
- # if lang == 'en':
65
- # prompt = error_correct_pyspeller(prompt)
66
- # en_translated = GoogleTranslator(source='auto', target='en').translate(prompt)
67
- # messages.append({"role": "user", "content": en_translated})
68
- # ai_msg = chat_model.invoke(messages, skip_prompt = True)
69
- # response_of_llm = postprocerssing(ai_msg.content)
70
- # messages.append({"role": "assistant", "content": response_of_llm})
71
- # response_of_llm = GoogleTranslator(source='auto', target=lang).translate(response_of_llm)
72
- # print(f"out is: {response_of_llm}")
73
- # return response_of_llm
74
-
75
- # # def greet(prompt, m_type):
76
- # # return "hi"
77
-
78
- # with gr.Blocks(theme=gr.themes.Default(primary_hue=gr.themes.colors.orange, secondary_hue=gr.themes.colors.pink)) as demo:
79
- # stored_message = gr.State([])
80
- # with gr.Row():
81
- # with gr.Column(scale=2):
82
- # text1 = gr.Textbox(lines=7, label="Prompt", scale=2)
83
- # with gr.Row():
84
- # btn1 = gr.Button("Submit", scale=1)
85
- # btn2 = gr.Button("Clear", scale=1)
86
- # btn3 = gr.Button("Clean Memory", scale=2)
87
- # with gr.Column(scale=2):
88
- # out_text = gr.Text(lines=15, label="Output", scale=2)
89
- # btn1.click(fn=llm_run, inputs=[text1, stored_message], outputs=out_text)
90
- # btn2.click(lambda: [None, None], outputs=[text1, out_text])
91
- # btn3.click(fn=clear_memory, inputs=[stored_message], outputs=[out_text])
92
-
93
- # # demo = gr.Interface(fn=llm_run, inputs=["text"], outputs="text")
94
- # demo.launch(debug=True, share=True)
95
-
96
  import gradio as gr
97
 
98
- def greet(inp, messages):
99
- messages.append(inp)
100
- print(messages)
101
- return "Hello " + inp + "!"
102
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
103
  def clear_memory(messages):
104
  messages.clear()
105
  return "Memory cleaned."
106
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
107
  with gr.Blocks(theme=gr.themes.Default(primary_hue=gr.themes.colors.orange, secondary_hue=gr.themes.colors.pink)) as demo:
108
  stored_message = gr.State([])
109
  with gr.Row():
@@ -115,9 +87,38 @@ with gr.Blocks(theme=gr.themes.Default(primary_hue=gr.themes.colors.orange, seco
115
  btn3 = gr.Button("Clean Memory", scale=2)
116
  with gr.Column(scale=2):
117
  out_text = gr.Text(lines=15, label="Output", scale=2)
118
- btn1.click(fn=greet, inputs=[text1, stored_message], outputs=out_text)
119
  btn2.click(lambda: [None, None], outputs=[text1, out_text])
120
  btn3.click(fn=clear_memory, inputs=[stored_message], outputs=[out_text])
121
- demo.launch()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
122
 
123
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  import gradio as gr
2
 
3
+ from langchain_huggingface import ChatHuggingFace, HuggingFacePipeline
4
+ from transformers import BitsAndBytesConfig
5
+ import re
6
+ from deep_translator import (GoogleTranslator,
7
+ PonsTranslator,
8
+ LingueeTranslator,
9
+ MyMemoryTranslator,
10
+ YandexTranslator,
11
+ DeeplTranslator,
12
+ QcriTranslator,
13
+ single_detection,
14
+ batch_detection)
15
+ from pyaspeller import YandexSpeller
16
+
17
+ def error_correct_pyspeller(sample_text):
18
+ """ grammer correction of input text"""
19
+ speller = YandexSpeller()
20
+ fixed = speller.spelled(sample_text)
21
+ return fixed
22
+
23
+ def postprocerssing(inp_text: str):
24
+ """Post preocessing of the llm response"""
25
+ inp_text = re.sub('<[^>]+>', '', inp_text)
26
+ inp_text = inp_text.split('##', 1)[0]
27
+ inp_text = error_correct_pyspeller(inp_text)
28
+ return inp_text
29
+
30
+
31
+ quantization_config = BitsAndBytesConfig(
32
+ load_in_4bit=True,
33
+ bnb_4bit_quant_type="nf4",
34
+ bnb_4bit_compute_dtype="float16",
35
+ bnb_4bit_use_double_quant=True,
36
+ )
37
+
38
+ llm = HuggingFacePipeline.from_model_id(
39
+ model_id="Danielrahmai1991/nvlm_adapt_basic_model_16bit",
40
+ task="text-generation",
41
+ pipeline_kwargs=dict(
42
+ max_new_tokens=512,
43
+ do_sample=True,
44
+ repetition_penalty=1.15,
45
+ trust_remote_code= True,
46
+ temperature= 0.70
47
+
48
+ ),
49
+ model_kwargs={"quantization_config": quantization_config,
50
+ },
51
+ )
52
+
53
+ chat_model = ChatHuggingFace(llm=llm)
54
+
55
+ # history of the messages
56
+
57
  def clear_memory(messages):
58
  messages.clear()
59
  return "Memory cleaned."
60
 
61
+ def llm_run(prompt, messages):
62
+ print(f"question is {prompt}")
63
+ lang = single_detection(prompt, api_key='4ab77f25578d450f0902fb42c66d5e11')
64
+ if lang == 'en':
65
+ prompt = error_correct_pyspeller(prompt)
66
+ en_translated = GoogleTranslator(source='auto', target='en').translate(prompt)
67
+ messages.append({"role": "user", "content": en_translated})
68
+ ai_msg = chat_model.invoke(messages, skip_prompt = True)
69
+ response_of_llm = postprocerssing(ai_msg.content)
70
+ messages.append({"role": "assistant", "content": response_of_llm})
71
+ response_of_llm = GoogleTranslator(source='auto', target=lang).translate(response_of_llm)
72
+ print(f"out is: {response_of_llm}")
73
+ return response_of_llm
74
+
75
+ # def greet(prompt, m_type):
76
+ # return "hi"
77
+ print("donnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnnn")
78
+
79
  with gr.Blocks(theme=gr.themes.Default(primary_hue=gr.themes.colors.orange, secondary_hue=gr.themes.colors.pink)) as demo:
80
  stored_message = gr.State([])
81
  with gr.Row():
 
87
  btn3 = gr.Button("Clean Memory", scale=2)
88
  with gr.Column(scale=2):
89
  out_text = gr.Text(lines=15, label="Output", scale=2)
90
+ btn1.click(fn=llm_run, inputs=[text1, stored_message], outputs=out_text)
91
  btn2.click(lambda: [None, None], outputs=[text1, out_text])
92
  btn3.click(fn=clear_memory, inputs=[stored_message], outputs=[out_text])
93
+
94
+ # demo = gr.Interface(fn=llm_run, inputs=["text"], outputs="text")
95
+ demo.launch(debug=True, share=True)
96
+
97
+ # import gradio as gr
98
+
99
+ # def greet(inp, messages):
100
+ # messages.append(inp)
101
+ # print(messages)
102
+ # return "Hello " + inp + "!"
103
+
104
+ # def clear_memory(messages):
105
+ # messages.clear()
106
+ # return "Memory cleaned."
107
+
108
+ # with gr.Blocks(theme=gr.themes.Default(primary_hue=gr.themes.colors.orange, secondary_hue=gr.themes.colors.pink)) as demo:
109
+ # stored_message = gr.State([])
110
+ # with gr.Row():
111
+ # with gr.Column(scale=2):
112
+ # text1 = gr.Textbox(lines=7, label="Prompt", scale=2)
113
+ # with gr.Row():
114
+ # btn1 = gr.Button("Submit", scale=1)
115
+ # btn2 = gr.Button("Clear", scale=1)
116
+ # btn3 = gr.Button("Clean Memory", scale=2)
117
+ # with gr.Column(scale=2):
118
+ # out_text = gr.Text(lines=15, label="Output", scale=2)
119
+ # btn1.click(fn=greet, inputs=[text1, stored_message], outputs=out_text)
120
+ # btn2.click(lambda: [None, None], outputs=[text1, out_text])
121
+ # btn3.click(fn=clear_memory, inputs=[stored_message], outputs=[out_text])
122
+ # demo.launch()
123
 
124