contenteaseAI commited on
Commit
e715f4c
·
verified ·
1 Parent(s): b21d975

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +194 -159
app.py CHANGED
@@ -1,160 +1,195 @@
1
- import spaces
2
- import json
3
- import subprocess
4
- from llama_cpp import Llama
5
- from llama_cpp_agent import LlamaCppAgent, MessagesFormatterType
6
- from llama_cpp_agent.providers import LlamaCppPythonProvider
7
- import gradio as gr
8
- from huggingface_hub import hf_hub_download
9
- import logging
10
- import time
11
-
12
- logging.basicConfig(level=logging.INFO)
13
- logger = logging.getLogger(__name__)
14
-
15
- repo_id = "QuantFactory/Meta-Llama-3-8B-Instruct-GGUF"
16
- filename = "Meta-Llama-3-8B-Instruct.Q8_0.gguf"
17
-
18
- try:
19
- start_time = time.time()
20
- logger.info("Downloading Model....")
21
-
22
- hf_hub_download(
23
- repo_id = repo_id ,
24
- filename = filename,
25
- local_dir="./model"
26
- )
27
-
28
- end_time = time.time()
29
- logger.info(f"Download complete. Time taken : {end_time - start_time} seconds.")
30
-
31
- except Exception as e:
32
- logger.error(f"Unable to download Model : {e}")
33
- raise
34
-
35
- llm = None
36
-
37
- @spaces.GPU(duration=120)
38
- def respond(message, history, temperature, max_tokens):
39
- """
40
- Generate a streaming response using the llama3-8b model with chunking.
41
-
42
- Args:
43
- message (str): The input message.
44
- history (list): The conversation history used by ChatInterface. - Not used.
45
- temperature (float): The temperature for generating the response.
46
- max_new_tokens (int): The maximum number of new tokens to generate.
47
-
48
- Returns:
49
- str: The generated response.
50
- """
51
-
52
- chat_template = MessagesFormatterType.LLAMA_3
53
-
54
- global llm
55
-
56
- start_time = time.time()
57
- logging.info("Loading Model...")
58
-
59
- if llm is None:
60
- model = Llama(
61
- model_path=f"model/{filename}",
62
- flash_attn=True,
63
- n_gpu_layers=-1,
64
- n_batch=1,
65
- n_ctx=8192,
66
- last_n_tokens = 0
67
- )
68
- llm = model
69
-
70
- end_time = time.time()
71
- logger.info(f"Model Loaded. Time taken : {end_time - start_time} seconds.")
72
-
73
- start_time = time.time()
74
- logger.info("Loading Provider and Agent for the Llama Model....")
75
-
76
- provider = LlamaCppPythonProvider(llm)
77
-
78
- SYS_PROMPT ="""
79
- Extract all relevant keywords and add quantity from the following text and format the result in nested JSON, ignoring personal details and focusing only on the scope of work as shown in the example:
80
- Good JSON example: {'lobby': {'frcm': {'replace': {'carpet': 1, 'carpet_pad': 1, 'base': 1, 'window_treatments': 1, 'artwork_and_decorative_accessories': 1, 'portable_lighting': 1, 'upholstered_furniture_and_decorative_pillows': 1, 'millwork': 1} } } }
81
- Bad JSON example: {'lobby': { 'frcm': { 'replace': [ 'carpet', 'carpet_pad', 'base', 'window_treatments', 'artwork_and_decorative_accessories', 'portable_lighting', 'upholstered_furniture_and_decorative_pillows', 'millwork'] } } }
82
- Make sure to fetch details from the provided text and ignore unnecessary information. The response should be in JSON format only, without any additional comments.
83
- """
84
-
85
- agent = LlamaCppAgent(
86
- provider,
87
- system_prompt=SYS_PROMPT,
88
- predefined_messages_formatter_type=chat_template,
89
- debug_output=False
90
- )
91
-
92
- settings = provider.get_provider_default_settings()
93
- settings.temperature = temperature
94
- settings.max_tokens = max_tokens
95
- settings.stream = True
96
-
97
- end_time = time.time()
98
- logger.info(f"Provider settings updated. Prompt Loaded.Time taken : {end_time - start_time} seconds.")
99
-
100
- start_time = time.time()
101
- logger.info("Generating responses...")
102
- response = agent.get_chat_response(
103
- message,
104
- llm_sampling_settings=settings,
105
- returns_streaming_generator = False, #generate streamer
106
- print_output = False
107
- )
108
- logger.info(f"Responses generated. Time taken : {time.time() - start_time} seconds.")
109
-
110
- return response
111
-
112
- DESCRIPTION = '''
113
- <div>
114
- <h1 style="text-align: center;">ContenteaseAI custom trained model</h1>
115
- </div>
116
- '''
117
-
118
- LICENSE = """
119
- <p/>
120
- ---
121
- For more information, visit our [website](https://contentease.ai).
122
- """
123
-
124
- PLACEHOLDER = """
125
- <div style="padding: 30px; text-align: center; display: flex; flex-direction: column; align-items: center;">
126
- <h1 style="font-size: 28px; margin-bottom: 2px; opacity: 0.55;">ContenteaseAI Custom AI trained model</h1>
127
- <p style="font-size: 18px; margin-bottom: 2px; opacity: 0.65;">Enter the text extracted from the PDF:</p>
128
- </div>
129
- """
130
-
131
- css = """
132
- h1 {
133
- text-align: center;
134
- display: block;
135
- }
136
- """
137
- # Gradio block
138
- chatbot = gr.Chatbot(height=450, placeholder=PLACEHOLDER, label='Gradio ChatInterface')
139
-
140
- with gr.Blocks(fill_height=True, css=css) as demo:
141
- gr.Markdown(DESCRIPTION)
142
-
143
- gr.ChatInterface(
144
- fn=respond,
145
- chatbot=chatbot,
146
- fill_height=True,
147
- additional_inputs_accordion=gr.Accordion(label="⚙️ Parameters", open=False, render=False),
148
- additional_inputs=[
149
- gr.Slider(minimum=0, maximum=1, step=0.1, value=0.90, label="Temperature", render=False),
150
- gr.Slider(minimum=128, maximum=2000, step=1, value=1500, label="Max new tokens", render=False),
151
- ]
152
- )
153
-
154
- gr.Markdown(LICENSE)
155
-
156
- if __name__ == "__main__":
157
- try:
158
- demo.launch(show_error=True, debug = True)
159
- except Exception as e:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
160
  logger.error(f"Error launching Gradio demo: {e}")
 
1
+ import spaces
2
+ import json
3
+ import subprocess
4
+ from llama_cpp import Llama
5
+ from llama_cpp_agent import LlamaCppAgent, MessagesFormatterType
6
+ from llama_cpp_agent.providers import LlamaCppPythonProvider
7
+ import gradio as gr
8
+ from huggingface_hub import hf_hub_download
9
+ import logging
10
+ import time
11
+
12
+ logging.basicConfig(level=logging.INFO)
13
+ logger = logging.getLogger(__name__)
14
+
15
+ repo_id = "QuantFactory/Meta-Llama-3-8B-Instruct-GGUF"
16
+ filename = "Meta-Llama-3-8B-Instruct.Q8_0.gguf"
17
+
18
+ try:
19
+ start_time = time.time()
20
+ logger.info("Downloading Model....")
21
+
22
+ hf_hub_download(
23
+ repo_id = repo_id ,
24
+ filename = filename,
25
+ local_dir="./model"
26
+ )
27
+
28
+ end_time = time.time()
29
+ logger.info(f"Download complete. Time taken : {end_time - start_time} seconds.")
30
+
31
+ except Exception as e:
32
+ logger.error(f"Unable to download Model : {e}")
33
+ raise
34
+
35
+ llm = None
36
+
37
+ @spaces.GPU(duration=120)
38
+ def respond(message, history, temperature, max_tokens):
39
+ """
40
+ Generate a streaming response using the llama3-8b model with chunking.
41
+
42
+ Args:
43
+ message (str): The input message.
44
+ history (list): The conversation history used by ChatInterface. - Not used.
45
+ temperature (float): The temperature for generating the response.
46
+ max_new_tokens (int): The maximum number of new tokens to generate.
47
+
48
+ Returns:
49
+ str: The generated response.
50
+ """
51
+
52
+ chat_template = MessagesFormatterType.LLAMA_3
53
+
54
+ global llm
55
+
56
+ start_time = time.time()
57
+ logging.info("Loading Model...")
58
+
59
+ if llm is None:
60
+ model = Llama(
61
+ model_path=f"model/{filename}",
62
+ flash_attn=True,
63
+ n_gpu_layers=-1,
64
+ n_batch=1,
65
+ n_ctx=8192,
66
+ last_n_tokens = 0
67
+ )
68
+ llm = model
69
+
70
+ end_time = time.time()
71
+ logger.info(f"Model Loaded. Time taken : {end_time - start_time} seconds.")
72
+
73
+ start_time = time.time()
74
+ logger.info("Loading Provider and Agent for the Llama Model....")
75
+
76
+ provider = LlamaCppPythonProvider(llm)
77
+
78
+ SYS_PROMPT ="""
79
+ Extract the following information from the given text:
80
+ Identify the specific areas where the work needs to be done and Add the furniture that has to be changed.
81
+ Do not specify the work that has to be done.
82
+ Format the extracted information in the following JSON structure:
83
+
84
+ {
85
+ "Area Type": {
86
+ "Furnture1": units (integer),
87
+ "Furnture2": units (integer),
88
+ ...
89
+ }
90
+ }
91
+
92
+ Requirements:
93
+ 1. Each area type (e.g., lobby, bar, etc.) should have its own node.
94
+ 3. List the furniture on which the work needs to be performed without specifying the work.
95
+ 4. specify the units as integers.
96
+ 5. Ignore any personal information or irrelevant details.
97
+ 6. Follow the JSON pattern strictly and ensure clarity and accuracy in the extracted information.
98
+
99
+ Example:
100
+
101
+ Given the paragraph: "In the lobby, replace 5 light fixtures and remove 2 old carpets. In the bar,
102
+ install 3 new tables and remove 4 broken chairs."
103
+
104
+ The JSON output should be:
105
+ {
106
+ "Lobby": {
107
+ "Light fixtures": 5
108
+ "Old carpets": 2
109
+ },
110
+ "Bar": {
111
+ "New tables": 3
112
+ "Broken chairs": 4
113
+ }
114
+ }
115
+ }
116
+
117
+ Please ensure that the output JSON is well-structured and includes only relevant details about the work to be done.
118
+ """
119
+
120
+ agent = LlamaCppAgent(
121
+ provider,
122
+ system_prompt=SYS_PROMPT,
123
+ predefined_messages_formatter_type=chat_template,
124
+ debug_output=False
125
+ )
126
+
127
+ settings = provider.get_provider_default_settings()
128
+ settings.temperature = temperature
129
+ settings.max_tokens = max_tokens
130
+ settings.stream = True
131
+
132
+ end_time = time.time()
133
+ logger.info(f"Provider settings updated. Prompt Loaded.Time taken : {end_time - start_time} seconds.")
134
+
135
+ start_time = time.time()
136
+ logger.info("Generating responses...")
137
+ response = agent.get_chat_response(
138
+ message,
139
+ llm_sampling_settings=settings,
140
+ returns_streaming_generator = False, #generate streamer
141
+ print_output = False
142
+ )
143
+ logger.info(f"Responses generated. Time taken : {time.time() - start_time} seconds.")
144
+
145
+ return response
146
+
147
+ DESCRIPTION = '''
148
+ <div>
149
+ <h1 style="text-align: center;">ContenteaseAI custom trained model</h1>
150
+ </div>
151
+ '''
152
+
153
+ LICENSE = """
154
+ <p/>
155
+ ---
156
+ For more information, visit our [website](https://contentease.ai).
157
+ """
158
+
159
+ PLACEHOLDER = """
160
+ <div style="padding: 30px; text-align: center; display: flex; flex-direction: column; align-items: center;">
161
+ <h1 style="font-size: 28px; margin-bottom: 2px; opacity: 0.55;">ContenteaseAI Custom AI trained model</h1>
162
+ <p style="font-size: 18px; margin-bottom: 2px; opacity: 0.65;">Enter the text extracted from the PDF:</p>
163
+ </div>
164
+ """
165
+
166
+ css = """
167
+ h1 {
168
+ text-align: center;
169
+ display: block;
170
+ }
171
+ """
172
+ # Gradio block
173
+ chatbot = gr.Chatbot(height=450, placeholder=PLACEHOLDER, label='Gradio ChatInterface')
174
+
175
+ with gr.Blocks(fill_height=True, css=css) as demo:
176
+ gr.Markdown(DESCRIPTION)
177
+
178
+ gr.ChatInterface(
179
+ fn=respond,
180
+ chatbot=chatbot,
181
+ fill_height=True,
182
+ additional_inputs_accordion=gr.Accordion(label="⚙️ Parameters", open=False, render=False),
183
+ additional_inputs=[
184
+ gr.Slider(minimum=0, maximum=1, step=0.1, value=0.90, label="Temperature", render=False),
185
+ gr.Slider(minimum=128, maximum=2000, step=1, value=1500, label="Max new tokens", render=False),
186
+ ]
187
+ )
188
+
189
+ gr.Markdown(LICENSE)
190
+
191
+ if __name__ == "__main__":
192
+ try:
193
+ demo.launch(show_error=True, debug = True)
194
+ except Exception as e:
195
  logger.error(f"Error launching Gradio demo: {e}")