File size: 15,258 Bytes
afb99d9
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
import gradio as gr
import os
import uuid
import threading
import pandas as pd
import torch
from langchain.document_loaders import CSVLoader
from langchain.embeddings import HuggingFaceEmbeddings
from langchain.vectorstores import FAISS
from langchain.llms import HuggingFacePipeline
from langchain.chains import LLMChain
from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline
from langchain.prompts import PromptTemplate

# Global model cache
MODEL_CACHE = {
    "model": None,
    "tokenizer": None,
    "init_lock": threading.Lock()
}

# Create directories for user data
os.makedirs("user_data", exist_ok=True)

def initialize_model_once():
    """Initialize the model once and cache it"""
    with MODEL_CACHE["init_lock"]:
        if MODEL_CACHE["model"] is None:
            # Use a smaller model for CPU environment
            model_name = "deepseek-ai/deepseek-coder-1.3b-instruct"

            # Load tokenizer and model with CPU-friendly configuration
            MODEL_CACHE["tokenizer"] = AutoTokenizer.from_pretrained(model_name)
            MODEL_CACHE["model"] = AutoModelForCausalLM.from_pretrained(
                model_name,
                torch_dtype=torch.float32,  # Use float32 for CPU
                device_map="auto",
                low_cpu_mem_usage=True,     # Optimize for low memory
                trust_remote_code=True
            )

    return MODEL_CACHE["tokenizer"], MODEL_CACHE["model"]

def create_llm_pipeline():
    """Create a new pipeline using the cached model"""
    tokenizer, model = initialize_model_once()

    # Create a CPU-friendly pipeline
    pipe = pipeline(
        "text-generation",
        model=model,
        tokenizer=tokenizer,
        max_new_tokens=256,     # Reduced for faster responses
        temperature=0.3,
        top_p=0.9,
        top_k=30,
        repetition_penalty=1.2,
        return_full_text=False,
    )

    # Wrap pipeline in HuggingFacePipeline for LangChain compatibility
    return HuggingFacePipeline(pipeline=pipe)

def create_conversational_chain(db, file_path):
    llm = create_llm_pipeline()
    
    # Load the file into pandas to enable code execution for data analysis
    df = pd.read_csv(file_path)
    
    # Create improved prompt template that focuses on direct answers, not code
    template = """
    Berikut ini adalah informasi tentang file CSV:
    
    Kolom-kolom dalam file: {columns}
    
    Beberapa baris pertama:
    {sample_data}
    
    Konteks tambahan dari vector database:
    {context}
    
    Pertanyaan: {question}
    
    INSTRUKSI PENTING:
    1. Jangan tampilkan kode Python, berikan jawaban langsung dalam Bahasa Indonesia.
    2. Jika pertanyaan terkait statistik data (rata-rata, maksimum dll), lakukan perhitungan dan berikan hasilnya.
    3. Jawaban harus singkat, jelas dan akurat berdasarkan data yang ada.
    4. Gunakan format yang sesuai untuk angka (desimal 2 digit untuk nilai non-integer).
    5. Jangan menyebutkan proses perhitungan, fokus pada hasil akhir.
    
    Jawaban:
    """
    
    PROMPT = PromptTemplate(
        template=template,
        input_variables=["columns", "sample_data", "context", "question"]
    )
    
    # Create retriever
    retriever = db.as_retriever(search_kwargs={"k": 3})  # Reduced k for better performance
    
    # Process query with better error handling
    def process_query(query, chat_history):
        try:
            # Get information from dataframe for context
            columns_str = ", ".join(df.columns.tolist())
            sample_data = df.head(2).to_string()  # Reduced to 2 rows for performance
            
            # Get context from vector database
            docs = retriever.get_relevant_documents(query)
            context = "\n\n".join([doc.page_content for doc in docs])
            
            # Dynamically calculate answers for common statistical queries
            def preprocess_query():
                query_lower = query.lower()
                result = None
                
                # Handle statistical queries directly
                if "rata-rata" in query_lower or "mean" in query_lower or "average" in query_lower:
                    for col in df.columns:
                        if col.lower() in query_lower and pd.api.types.is_numeric_dtype(df[col]):
                            try:
                                result = f"Rata-rata {col} adalah {df[col].mean():.2f}"
                            except:
                                pass
                
                elif "maksimum" in query_lower or "max" in query_lower or "tertinggi" in query_lower:
                    for col in df.columns:
                        if col.lower() in query_lower and pd.api.types.is_numeric_dtype(df[col]):
                            try:
                                result = f"Nilai maksimum {col} adalah {df[col].max():.2f}"
                            except:
                                pass
                
                elif "minimum" in query_lower or "min" in query_lower or "terendah" in query_lower:
                    for col in df.columns:
                        if col.lower() in query_lower and pd.api.types.is_numeric_dtype(df[col]):
                            try:
                                result = f"Nilai minimum {col} adalah {df[col].min():.2f}"
                            except:
                                pass
                
                elif "total" in query_lower or "jumlah" in query_lower or "sum" in query_lower:
                    for col in df.columns:
                        if col.lower() in query_lower and pd.api.types.is_numeric_dtype(df[col]):
                            try:
                                result = f"Total {col} adalah {df[col].sum():.2f}"
                            except:
                                pass
                
                elif "baris" in query_lower or "jumlah data" in query_lower or "row" in query_lower:
                    result = f"Jumlah baris data adalah {len(df)}"
                
                elif "kolom" in query_lower or "field" in query_lower:
                    if "nama" in query_lower or "list" in query_lower or "sebutkan" in query_lower:
                        result = f"Kolom dalam data: {', '.join(df.columns.tolist())}"
                
                return result
            
            # Try direct calculation first
            direct_answer = preprocess_query()
            if direct_answer:
                return {"answer": direct_answer}
            
            # If no direct calculation, use the LLM
            chain = LLMChain(llm=llm, prompt=PROMPT)
            raw_result = chain.run(
                columns=columns_str,
                sample_data=sample_data,
                context=context,
                question=query
            )
            
            # Clean the result
            cleaned_result = raw_result.strip()
            
            # If result is empty after cleaning, use a fallback
            if not cleaned_result:
                return {"answer": "Tidak dapat memproses jawaban. Silakan coba pertanyaan lain."}
                
            return {"answer": cleaned_result}
        except Exception as e:
            import traceback
            print(f"Error in process_query: {str(e)}")
            print(traceback.format_exc())
            return {"answer": f"Terjadi kesalahan saat memproses pertanyaan: {str(e)}"}
    
    return process_query

class ChatBot:
    def __init__(self, session_id):
        self.session_id = session_id
        self.chat_history = []
        self.chain = None
        self.user_dir = f"user_data/{session_id}"
        self.csv_file_path = None
        os.makedirs(self.user_dir, exist_ok=True)

    def process_file(self, file):
        if file is None:
            return "Mohon upload file CSV terlebih dahulu."

        try:
            # Handle file from Gradio
            file_path = file.name if hasattr(file, 'name') else str(file)
            self.csv_file_path = file_path

            # Copy to user directory
            user_file_path = f"{self.user_dir}/uploaded.csv"

            # Verify the CSV can be loaded
            try:
                df = pd.read_csv(file_path)
                print(f"CSV verified: {df.shape[0]} rows, {len(df.columns)} columns")

                # Save a copy in user directory
                df.to_csv(user_file_path, index=False)
                self.csv_file_path = user_file_path
            except Exception as e:
                return f"Error membaca CSV: {str(e)}"

            # Load document with reduced chunk size for better memory usage
            try:
                loader = CSVLoader(file_path=file_path, encoding="utf-8", csv_args={
                    'delimiter': ','})
                data = loader.load()
                print(f"Documents loaded: {len(data)}")
            except Exception as e:
                return f"Error loading documents: {str(e)}"

            # Create vector database with optimized settings
            try:
                db_path = f"{self.user_dir}/db_faiss"
                
                # Use CPU-friendly embeddings with smaller dimensions
                embeddings = HuggingFaceEmbeddings(
                    model_name='sentence-transformers/all-MiniLM-L6-v2',
                    model_kwargs={'device': 'cpu'}
                )

                db = FAISS.from_documents(data, embeddings)
                db.save_local(db_path)
                print(f"Vector database created at {db_path}")
            except Exception as e:
                return f"Error creating vector database: {str(e)}"

            # Create custom chain
            try:
                self.chain = create_conversational_chain(db, self.csv_file_path)
                print("Chain created successfully")
            except Exception as e:
                return f"Error creating chain: {str(e)}"

            # Add basic file info to chat history for context
            file_info = f"CSV berhasil dimuat dengan {df.shape[0]} baris dan {len(df.columns)} kolom. Kolom: {', '.join(df.columns.tolist())}"
            self.chat_history.append(("System", file_info))

            return "File CSV berhasil diproses! Anda dapat mulai chat dengan model untuk analisis data."
        except Exception as e:
            import traceback
            print(traceback.format_exc())
            return f"Error pemrosesan file: {str(e)}"

    def chat(self, message, history):
        if self.chain is None:
            return "Mohon upload file CSV terlebih dahulu."

        try:
            # Process the question with the chain
            result = self.chain(message, self.chat_history)

            # Get the answer with fallback
            answer = result.get("answer", "Maaf, tidak dapat menghasilkan jawaban. Silakan coba pertanyaan lain.")

            # Ensure we never return empty
            if not answer or answer.strip() == "":
                answer = "Maaf, tidak dapat menghasilkan jawaban yang sesuai. Silakan coba pertanyaan lain."
            
            # Update internal chat history
            self.chat_history.append((message, answer))

            # Return just the answer for Gradio
            return answer
        except Exception as e:
            import traceback
            print(traceback.format_exc())
            return f"Error: {str(e)}"

# UI Code
def create_gradio_interface():
    with gr.Blocks(title="Chat with CSV using DeepSeek") as interface:
        session_id = gr.State(lambda: str(uuid.uuid4()))
        chatbot_state = gr.State(lambda: None)

        gr.HTML("<h1 style='text-align: center;'>Chat with CSV using DeepSeek</h1>")
        gr.HTML("<h3 style='text-align: center;'>Asisten analisis CSV untuk berbagai kebutuhan</h3>")

        with gr.Row():
            with gr.Column(scale=1):
                file_input = gr.File(
                    label="Upload CSV Anda",
                    file_types=[".csv"]
                )
                process_button = gr.Button("Proses CSV")

                with gr.Accordion("Informasi Model", open=False):
                    gr.Markdown("""
                    **Fitur**:
                    - Tanya jawab berbasis data
                    - Analisis statistik otomatis
                    - Support berbagai format CSV
                    - Manajemen sesi per pengguna
                    """)

            with gr.Column(scale=2):
                chatbot_interface = gr.Chatbot(
                    label="Riwayat Chat",
                    height=400
                )
                message_input = gr.Textbox(
                    label="Ketik pesan Anda",
                    placeholder="Tanyakan tentang data CSV Anda...",
                    lines=2
                )
                submit_button = gr.Button("Kirim")
                clear_button = gr.Button("Bersihkan Chat")

        # Process file handler
        def handle_process_file(file, sess_id):
            chatbot = ChatBot(sess_id)
            result = chatbot.process_file(file)
            return chatbot, [(None, result)]

        process_button.click(
            fn=handle_process_file,
            inputs=[file_input, session_id],
            outputs=[chatbot_state, chatbot_interface]
        )

        # Chat handlers
        def user_message_submitted(message, history, chatbot, sess_id):
            history = history + [(message, None)]
            return history, "", chatbot, sess_id

        def bot_response(history, chatbot, sess_id):
            if chatbot is None:
                chatbot = ChatBot(sess_id)
                history[-1] = (history[-1][0], "Mohon upload file CSV terlebih dahulu.")
                return chatbot, history

            user_message = history[-1][0]
            response = chatbot.chat(user_message, history[:-1])
            history[-1] = (user_message, response)
            return chatbot, history

        submit_button.click(
            fn=user_message_submitted,
            inputs=[message_input, chatbot_interface, chatbot_state, session_id],
            outputs=[chatbot_interface, message_input, chatbot_state, session_id]
        ).then(
            fn=bot_response,
            inputs=[chatbot_interface, chatbot_state, session_id],
            outputs=[chatbot_state, chatbot_interface]
        )

        message_input.submit(
            fn=user_message_submitted,
            inputs=[message_input, chatbot_interface, chatbot_state, session_id],
            outputs=[chatbot_interface, message_input, chatbot_state, session_id]
        ).then(
            fn=bot_response,
            inputs=[chatbot_interface, chatbot_state, session_id],
            outputs=[chatbot_state, chatbot_interface]
        )

        # Clear chat handler
        def handle_clear_chat(chatbot):
            if chatbot is not None:
                chatbot.chat_history = []
            return chatbot, []

        clear_button.click(
            fn=handle_clear_chat,
            inputs=[chatbot_state],
            outputs=[chatbot_state, chatbot_interface]
        )

    return interface

# Launch the interface
demo = create_gradio_interface()
demo.launch(share=True)