hmrizal commited on
Commit
afb99d9
·
verified ·
1 Parent(s): 4555e56

first-deepseek-coder-1.3b-instruct

Browse files
Files changed (1) hide show
  1. app.py +392 -0
app.py ADDED
@@ -0,0 +1,392 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import os
3
+ import uuid
4
+ import threading
5
+ import pandas as pd
6
+ import torch
7
+ from langchain.document_loaders import CSVLoader
8
+ from langchain.embeddings import HuggingFaceEmbeddings
9
+ from langchain.vectorstores import FAISS
10
+ from langchain.llms import HuggingFacePipeline
11
+ from langchain.chains import LLMChain
12
+ from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline
13
+ from langchain.prompts import PromptTemplate
14
+
15
+ # Global model cache
16
+ MODEL_CACHE = {
17
+ "model": None,
18
+ "tokenizer": None,
19
+ "init_lock": threading.Lock()
20
+ }
21
+
22
+ # Create directories for user data
23
+ os.makedirs("user_data", exist_ok=True)
24
+
25
+ def initialize_model_once():
26
+ """Initialize the model once and cache it"""
27
+ with MODEL_CACHE["init_lock"]:
28
+ if MODEL_CACHE["model"] is None:
29
+ # Use a smaller model for CPU environment
30
+ model_name = "deepseek-ai/deepseek-coder-1.3b-instruct"
31
+
32
+ # Load tokenizer and model with CPU-friendly configuration
33
+ MODEL_CACHE["tokenizer"] = AutoTokenizer.from_pretrained(model_name)
34
+ MODEL_CACHE["model"] = AutoModelForCausalLM.from_pretrained(
35
+ model_name,
36
+ torch_dtype=torch.float32, # Use float32 for CPU
37
+ device_map="auto",
38
+ low_cpu_mem_usage=True, # Optimize for low memory
39
+ trust_remote_code=True
40
+ )
41
+
42
+ return MODEL_CACHE["tokenizer"], MODEL_CACHE["model"]
43
+
44
+ def create_llm_pipeline():
45
+ """Create a new pipeline using the cached model"""
46
+ tokenizer, model = initialize_model_once()
47
+
48
+ # Create a CPU-friendly pipeline
49
+ pipe = pipeline(
50
+ "text-generation",
51
+ model=model,
52
+ tokenizer=tokenizer,
53
+ max_new_tokens=256, # Reduced for faster responses
54
+ temperature=0.3,
55
+ top_p=0.9,
56
+ top_k=30,
57
+ repetition_penalty=1.2,
58
+ return_full_text=False,
59
+ )
60
+
61
+ # Wrap pipeline in HuggingFacePipeline for LangChain compatibility
62
+ return HuggingFacePipeline(pipeline=pipe)
63
+
64
+ def create_conversational_chain(db, file_path):
65
+ llm = create_llm_pipeline()
66
+
67
+ # Load the file into pandas to enable code execution for data analysis
68
+ df = pd.read_csv(file_path)
69
+
70
+ # Create improved prompt template that focuses on direct answers, not code
71
+ template = """
72
+ Berikut ini adalah informasi tentang file CSV:
73
+
74
+ Kolom-kolom dalam file: {columns}
75
+
76
+ Beberapa baris pertama:
77
+ {sample_data}
78
+
79
+ Konteks tambahan dari vector database:
80
+ {context}
81
+
82
+ Pertanyaan: {question}
83
+
84
+ INSTRUKSI PENTING:
85
+ 1. Jangan tampilkan kode Python, berikan jawaban langsung dalam Bahasa Indonesia.
86
+ 2. Jika pertanyaan terkait statistik data (rata-rata, maksimum dll), lakukan perhitungan dan berikan hasilnya.
87
+ 3. Jawaban harus singkat, jelas dan akurat berdasarkan data yang ada.
88
+ 4. Gunakan format yang sesuai untuk angka (desimal 2 digit untuk nilai non-integer).
89
+ 5. Jangan menyebutkan proses perhitungan, fokus pada hasil akhir.
90
+
91
+ Jawaban:
92
+ """
93
+
94
+ PROMPT = PromptTemplate(
95
+ template=template,
96
+ input_variables=["columns", "sample_data", "context", "question"]
97
+ )
98
+
99
+ # Create retriever
100
+ retriever = db.as_retriever(search_kwargs={"k": 3}) # Reduced k for better performance
101
+
102
+ # Process query with better error handling
103
+ def process_query(query, chat_history):
104
+ try:
105
+ # Get information from dataframe for context
106
+ columns_str = ", ".join(df.columns.tolist())
107
+ sample_data = df.head(2).to_string() # Reduced to 2 rows for performance
108
+
109
+ # Get context from vector database
110
+ docs = retriever.get_relevant_documents(query)
111
+ context = "\n\n".join([doc.page_content for doc in docs])
112
+
113
+ # Dynamically calculate answers for common statistical queries
114
+ def preprocess_query():
115
+ query_lower = query.lower()
116
+ result = None
117
+
118
+ # Handle statistical queries directly
119
+ if "rata-rata" in query_lower or "mean" in query_lower or "average" in query_lower:
120
+ for col in df.columns:
121
+ if col.lower() in query_lower and pd.api.types.is_numeric_dtype(df[col]):
122
+ try:
123
+ result = f"Rata-rata {col} adalah {df[col].mean():.2f}"
124
+ except:
125
+ pass
126
+
127
+ elif "maksimum" in query_lower or "max" in query_lower or "tertinggi" in query_lower:
128
+ for col in df.columns:
129
+ if col.lower() in query_lower and pd.api.types.is_numeric_dtype(df[col]):
130
+ try:
131
+ result = f"Nilai maksimum {col} adalah {df[col].max():.2f}"
132
+ except:
133
+ pass
134
+
135
+ elif "minimum" in query_lower or "min" in query_lower or "terendah" in query_lower:
136
+ for col in df.columns:
137
+ if col.lower() in query_lower and pd.api.types.is_numeric_dtype(df[col]):
138
+ try:
139
+ result = f"Nilai minimum {col} adalah {df[col].min():.2f}"
140
+ except:
141
+ pass
142
+
143
+ elif "total" in query_lower or "jumlah" in query_lower or "sum" in query_lower:
144
+ for col in df.columns:
145
+ if col.lower() in query_lower and pd.api.types.is_numeric_dtype(df[col]):
146
+ try:
147
+ result = f"Total {col} adalah {df[col].sum():.2f}"
148
+ except:
149
+ pass
150
+
151
+ elif "baris" in query_lower or "jumlah data" in query_lower or "row" in query_lower:
152
+ result = f"Jumlah baris data adalah {len(df)}"
153
+
154
+ elif "kolom" in query_lower or "field" in query_lower:
155
+ if "nama" in query_lower or "list" in query_lower or "sebutkan" in query_lower:
156
+ result = f"Kolom dalam data: {', '.join(df.columns.tolist())}"
157
+
158
+ return result
159
+
160
+ # Try direct calculation first
161
+ direct_answer = preprocess_query()
162
+ if direct_answer:
163
+ return {"answer": direct_answer}
164
+
165
+ # If no direct calculation, use the LLM
166
+ chain = LLMChain(llm=llm, prompt=PROMPT)
167
+ raw_result = chain.run(
168
+ columns=columns_str,
169
+ sample_data=sample_data,
170
+ context=context,
171
+ question=query
172
+ )
173
+
174
+ # Clean the result
175
+ cleaned_result = raw_result.strip()
176
+
177
+ # If result is empty after cleaning, use a fallback
178
+ if not cleaned_result:
179
+ return {"answer": "Tidak dapat memproses jawaban. Silakan coba pertanyaan lain."}
180
+
181
+ return {"answer": cleaned_result}
182
+ except Exception as e:
183
+ import traceback
184
+ print(f"Error in process_query: {str(e)}")
185
+ print(traceback.format_exc())
186
+ return {"answer": f"Terjadi kesalahan saat memproses pertanyaan: {str(e)}"}
187
+
188
+ return process_query
189
+
190
+ class ChatBot:
191
+ def __init__(self, session_id):
192
+ self.session_id = session_id
193
+ self.chat_history = []
194
+ self.chain = None
195
+ self.user_dir = f"user_data/{session_id}"
196
+ self.csv_file_path = None
197
+ os.makedirs(self.user_dir, exist_ok=True)
198
+
199
+ def process_file(self, file):
200
+ if file is None:
201
+ return "Mohon upload file CSV terlebih dahulu."
202
+
203
+ try:
204
+ # Handle file from Gradio
205
+ file_path = file.name if hasattr(file, 'name') else str(file)
206
+ self.csv_file_path = file_path
207
+
208
+ # Copy to user directory
209
+ user_file_path = f"{self.user_dir}/uploaded.csv"
210
+
211
+ # Verify the CSV can be loaded
212
+ try:
213
+ df = pd.read_csv(file_path)
214
+ print(f"CSV verified: {df.shape[0]} rows, {len(df.columns)} columns")
215
+
216
+ # Save a copy in user directory
217
+ df.to_csv(user_file_path, index=False)
218
+ self.csv_file_path = user_file_path
219
+ except Exception as e:
220
+ return f"Error membaca CSV: {str(e)}"
221
+
222
+ # Load document with reduced chunk size for better memory usage
223
+ try:
224
+ loader = CSVLoader(file_path=file_path, encoding="utf-8", csv_args={
225
+ 'delimiter': ','})
226
+ data = loader.load()
227
+ print(f"Documents loaded: {len(data)}")
228
+ except Exception as e:
229
+ return f"Error loading documents: {str(e)}"
230
+
231
+ # Create vector database with optimized settings
232
+ try:
233
+ db_path = f"{self.user_dir}/db_faiss"
234
+
235
+ # Use CPU-friendly embeddings with smaller dimensions
236
+ embeddings = HuggingFaceEmbeddings(
237
+ model_name='sentence-transformers/all-MiniLM-L6-v2',
238
+ model_kwargs={'device': 'cpu'}
239
+ )
240
+
241
+ db = FAISS.from_documents(data, embeddings)
242
+ db.save_local(db_path)
243
+ print(f"Vector database created at {db_path}")
244
+ except Exception as e:
245
+ return f"Error creating vector database: {str(e)}"
246
+
247
+ # Create custom chain
248
+ try:
249
+ self.chain = create_conversational_chain(db, self.csv_file_path)
250
+ print("Chain created successfully")
251
+ except Exception as e:
252
+ return f"Error creating chain: {str(e)}"
253
+
254
+ # Add basic file info to chat history for context
255
+ file_info = f"CSV berhasil dimuat dengan {df.shape[0]} baris dan {len(df.columns)} kolom. Kolom: {', '.join(df.columns.tolist())}"
256
+ self.chat_history.append(("System", file_info))
257
+
258
+ return "File CSV berhasil diproses! Anda dapat mulai chat dengan model untuk analisis data."
259
+ except Exception as e:
260
+ import traceback
261
+ print(traceback.format_exc())
262
+ return f"Error pemrosesan file: {str(e)}"
263
+
264
+ def chat(self, message, history):
265
+ if self.chain is None:
266
+ return "Mohon upload file CSV terlebih dahulu."
267
+
268
+ try:
269
+ # Process the question with the chain
270
+ result = self.chain(message, self.chat_history)
271
+
272
+ # Get the answer with fallback
273
+ answer = result.get("answer", "Maaf, tidak dapat menghasilkan jawaban. Silakan coba pertanyaan lain.")
274
+
275
+ # Ensure we never return empty
276
+ if not answer or answer.strip() == "":
277
+ answer = "Maaf, tidak dapat menghasilkan jawaban yang sesuai. Silakan coba pertanyaan lain."
278
+
279
+ # Update internal chat history
280
+ self.chat_history.append((message, answer))
281
+
282
+ # Return just the answer for Gradio
283
+ return answer
284
+ except Exception as e:
285
+ import traceback
286
+ print(traceback.format_exc())
287
+ return f"Error: {str(e)}"
288
+
289
+ # UI Code
290
+ def create_gradio_interface():
291
+ with gr.Blocks(title="Chat with CSV using DeepSeek") as interface:
292
+ session_id = gr.State(lambda: str(uuid.uuid4()))
293
+ chatbot_state = gr.State(lambda: None)
294
+
295
+ gr.HTML("<h1 style='text-align: center;'>Chat with CSV using DeepSeek</h1>")
296
+ gr.HTML("<h3 style='text-align: center;'>Asisten analisis CSV untuk berbagai kebutuhan</h3>")
297
+
298
+ with gr.Row():
299
+ with gr.Column(scale=1):
300
+ file_input = gr.File(
301
+ label="Upload CSV Anda",
302
+ file_types=[".csv"]
303
+ )
304
+ process_button = gr.Button("Proses CSV")
305
+
306
+ with gr.Accordion("Informasi Model", open=False):
307
+ gr.Markdown("""
308
+ **Fitur**:
309
+ - Tanya jawab berbasis data
310
+ - Analisis statistik otomatis
311
+ - Support berbagai format CSV
312
+ - Manajemen sesi per pengguna
313
+ """)
314
+
315
+ with gr.Column(scale=2):
316
+ chatbot_interface = gr.Chatbot(
317
+ label="Riwayat Chat",
318
+ height=400
319
+ )
320
+ message_input = gr.Textbox(
321
+ label="Ketik pesan Anda",
322
+ placeholder="Tanyakan tentang data CSV Anda...",
323
+ lines=2
324
+ )
325
+ submit_button = gr.Button("Kirim")
326
+ clear_button = gr.Button("Bersihkan Chat")
327
+
328
+ # Process file handler
329
+ def handle_process_file(file, sess_id):
330
+ chatbot = ChatBot(sess_id)
331
+ result = chatbot.process_file(file)
332
+ return chatbot, [(None, result)]
333
+
334
+ process_button.click(
335
+ fn=handle_process_file,
336
+ inputs=[file_input, session_id],
337
+ outputs=[chatbot_state, chatbot_interface]
338
+ )
339
+
340
+ # Chat handlers
341
+ def user_message_submitted(message, history, chatbot, sess_id):
342
+ history = history + [(message, None)]
343
+ return history, "", chatbot, sess_id
344
+
345
+ def bot_response(history, chatbot, sess_id):
346
+ if chatbot is None:
347
+ chatbot = ChatBot(sess_id)
348
+ history[-1] = (history[-1][0], "Mohon upload file CSV terlebih dahulu.")
349
+ return chatbot, history
350
+
351
+ user_message = history[-1][0]
352
+ response = chatbot.chat(user_message, history[:-1])
353
+ history[-1] = (user_message, response)
354
+ return chatbot, history
355
+
356
+ submit_button.click(
357
+ fn=user_message_submitted,
358
+ inputs=[message_input, chatbot_interface, chatbot_state, session_id],
359
+ outputs=[chatbot_interface, message_input, chatbot_state, session_id]
360
+ ).then(
361
+ fn=bot_response,
362
+ inputs=[chatbot_interface, chatbot_state, session_id],
363
+ outputs=[chatbot_state, chatbot_interface]
364
+ )
365
+
366
+ message_input.submit(
367
+ fn=user_message_submitted,
368
+ inputs=[message_input, chatbot_interface, chatbot_state, session_id],
369
+ outputs=[chatbot_interface, message_input, chatbot_state, session_id]
370
+ ).then(
371
+ fn=bot_response,
372
+ inputs=[chatbot_interface, chatbot_state, session_id],
373
+ outputs=[chatbot_state, chatbot_interface]
374
+ )
375
+
376
+ # Clear chat handler
377
+ def handle_clear_chat(chatbot):
378
+ if chatbot is not None:
379
+ chatbot.chat_history = []
380
+ return chatbot, []
381
+
382
+ clear_button.click(
383
+ fn=handle_clear_chat,
384
+ inputs=[chatbot_state],
385
+ outputs=[chatbot_state, chatbot_interface]
386
+ )
387
+
388
+ return interface
389
+
390
+ # Launch the interface
391
+ demo = create_gradio_interface()
392
+ demo.launch(share=True)