Spaces:
Runtime error
Runtime error
# llm_logic.py | |
# from langchain_ollama import ChatOllama | |
from langchain_core.messages import HumanMessage, SystemMessage, AIMessage | |
import streamlit as st | |
import multiprocessing | |
from langchain_community.chat_models import ChatLlamaCpp | |
from langchain_google_genai import ChatGoogleGenerativeAI | |
local_model = "qwen2.5-coder-3b-instruct-q4_k_m.gguf" | |
stop = [ | |
"<|image_pad|>", | |
"<|endoftext|>", | |
"<|quad_end|>", | |
"<|object_ref_end|>", | |
"<|object_ref_start|>", | |
"<|file_sep|>", | |
"<|repo_name|>", | |
"<|PAD_TOKEN|>", | |
"<|quad_start|>", | |
"<|box_start|>", | |
"<|box_end|>", | |
"<|im_start|>", | |
"</tool_call>", | |
"<|video_pad|>", | |
"<tool_call>", | |
"<|im_end|>", | |
"<|vision_", | |
"<|fim_", | |
] | |
def get_local_llm(): | |
llm = ChatLlamaCpp( | |
temperature=0.0, | |
model_path=local_model, | |
n_ctx=10000, | |
n_gpu_layers=0, | |
n_batch=1024, | |
max_tokens=500, | |
n_threads=multiprocessing.cpu_count() - 1, | |
top_p=0.95, | |
verbose=False, | |
stop=stop, | |
) | |
# llm = ChatOllama( | |
# model="qwen2.5-coder:3b", | |
# temperature=0.0, | |
# num_predict=150, | |
# top_p=0.95, | |
# stop=stop, | |
# ) | |
return llm | |
local_llm = get_local_llm() | |
def get_gemini_llm(): | |
gemini = ChatGoogleGenerativeAI( | |
model="gemini-2.0-flash", | |
temperature=0, | |
max_tokens=None, | |
timeout=None, | |
max_retries=2, | |
top_p=0.95, | |
) | |
return gemini | |
gemini_llm = get_gemini_llm() | |
db_schema = """### **customers** | |
| | customer_id | customer_zip_code_prefix | customer_city | customer_state | | |
|------:|:--------------|---------------------------:|:----------------|:-----------------| | |
| 21921 | 0tgYlOTGgpO6 | 79230 | russas | CE | | |
| 9748 | jGhRQF3CIew4 | 81460 | joao monlevade | MG | | |
| 22679 | 1UutQTIhBvcP | 94480 | pelotas | RS | | |
Rows: 38279, Columns: 4 | |
--- | |
### **order_items** | |
| | order_id | product_id | seller_id | price | shipping_charges | | |
|------:|:-------------|:-------------|:-------------|--------:|-------------------:| | |
| 19729 | PDEzZdebLSn3 | aBpYjaBcwz6e | bzfcwRPnZzVO | 55.83 | 27.8 | | |
| 6001 | R7bIPjjYqlHP | ZM2JJXV5m9hl | Ivbw25fb5t2Z | 100 | 42.05 | | |
| 282 | Biqo21nETaMO | XqmdGKRbTetH | P2nCHWuo0HC0 | 113.49 | 91.32 | | |
Rows: 38279, Columns: 5 | |
--- | |
### **orders** | |
| | order_id | customer_id | order_purchase_timestamp | order_approved_at | | |
|------:|:-------------|:--------------|:---------------------------|:--------------------| | |
| 7294 | PMqwQc01iDTJ | c9ueC6k6V5WS | 2018-06-19 21:23:48 | 2018-06-20 08:38:30 | | |
| 13800 | P4l8R2Qat5n7 | ovKkGaXi5TmN | 2018-01-05 08:26:03 | 2018-01-05 08:47:20 | | |
| 17679 | NxIseZjAQCdC | o9qzmUQVJOxA | 2018-01-28 23:46:53 | 2018-01-28 23:58:31 | | |
Rows: 38279, Columns: 4 | |
--- | |
### **payments** | |
| | order_id | payment_sequential | payment_type | payment_installments | payment_value | | |
|------:|:-------------|---------------------:|:---------------|-----------------------:|----------------:| | |
| 35526 | cQXl0pQtiMad | 1 | wallet | 1 | 172.58 | | |
| 35799 | olImD2k316Gz | 1 | credit_card | 3 | 16.78 | | |
| 13278 | G9MJYXXtPZSz | 1 | credit_card | 10 | 221.86 | | |
Rows: 38279, Columns: 5 | |
--- | |
### **products** | |
| | product_id | product_category_name | product_weight_g | product_length_cm | product_height_cm | product_width_cm | | |
|------:|:-------------|:------------------------|-------------------:|--------------------:|--------------------:|-------------------:| | |
| 18191 | hpiXwRzTkhkL | bed_bath_table | 1150 | 40 | 9 | 50 | | |
| 2202 | iPoRkE7dkmlc | toys | 15800 | 38 | 62 | 57 | | |
| 27442 | hrjNaMt3Wyo5 | toys | 1850 | 37 | 22 | 40 | | |
Rows: 38279, Columns: 6 | |
""" | |
# Improved SQL generation prompt | |
sql_system_prompt = """You are a highly skilled natural language to SQL translator. Your goal is to generate accurate SQL queries based on the provided database schema. You must only return the SQL query and no other text or explanations. | |
DATABASE SCHEMA: | |
{db_schema} | |
The timestamp columns are of type 'VarChar'. I am using DuckDB to execute the queries. | |
""" | |
sql_chat_template = """ | |
Translate the following natural language question into an accurate SQL query. Return only the SQL query. | |
QUESTION: {question} | |
### assistant: | |
""" | |
# Improved prompt for classifying the question | |
classification_system_prompt = """You are an expert at classifying user questions as requiring a SQL query or being generic based on the provided database schema. Your response should be ONLY 'SQL' or 'GENERIC'. | |
A question requires a SQL query if it asks for specific data that can be retrieved from the tables in the schema. A question is generic if it asks for explanations, definitions, or information not directly retrievable through a SQL query on the given schema. | |
Consider the following database schema: | |
{db_schema} | |
Here are some examples: | |
Question: What are the names of all customers? | |
Response: SQL | |
Question: Tell me about the sales table. | |
Response: GENERIC | |
Question: How much did product 'Product A' sell for? | |
Response: SQL | |
Question: What is a primary key? | |
Response: GENERIC | |
""" | |
classification_chat_template = """ | |
Determine if the following question requires a SQL query based on the database schema. Respond with 'SQL' or 'GENERIC'. | |
QUESTION: {question} | |
### assistant: | |
""" | |
def classify_question(question: str, llm, use_default_schema: bool = True): | |
classification_system_prompt_local = classification_system_prompt # Initialize here | |
if use_default_schema: | |
classification_system_prompt_local = classification_system_prompt_local.format( | |
db_schema=db_schema | |
) | |
else: | |
uploaded_schema = st.session_state.uploaded_df_schema | |
classification_system_prompt_local = classification_system_prompt_local.format( | |
db_schema=uploaded_schema | |
) | |
classification_messages = [ | |
SystemMessage(content=classification_system_prompt_local), | |
HumanMessage(content=classification_chat_template.format(question=question)), | |
] | |
response = llm.invoke(classification_messages) | |
return response.content.strip().upper() | |
def generate_llm_response(prompt: str, llm: str, use_default_schema: bool = True): | |
if llm == "gemini": | |
llm = gemini_llm | |
else: | |
llm = local_llm | |
question_type = classify_question(prompt, llm, use_default_schema) | |
chosen_schema = None | |
if use_default_schema: | |
chosen_schema = db_schema | |
sql_system_prompt_local = sql_system_prompt.format(db_schema=chosen_schema) | |
else: | |
uploaded_schema = st.session_state.uploaded_df_schema | |
chosen_schema = uploaded_schema | |
sql_system_prompt_local = sql_system_prompt.format(db_schema=chosen_schema) | |
# Retrieve the chat history from the session state | |
chat_history = st.session_state.get("chat_history", []) | |
if "SQL" in question_type: | |
print("SQL question detected") | |
st.toast("Detected Task: SQL Query Generation", icon="π¨") | |
formatted_prompt = sql_chat_template.format(question=prompt) | |
# Create the messages list, including the system prompt and the chat history | |
messages_for_llm = [SystemMessage(content=sql_system_prompt_local)] | |
for message in chat_history: | |
if isinstance(message, HumanMessage): | |
messages_for_llm.append(HumanMessage(content=message.content)) | |
elif isinstance(message, AIMessage): | |
# Only include the assistant's text response, not the additional kwargs | |
messages_for_llm.append(AIMessage(content=message.content)) | |
messages_for_llm.append(HumanMessage(content=formatted_prompt)) | |
full_response = "" | |
for chunk in llm.stream(messages_for_llm): | |
full_response += chunk.content | |
yield f"<sql>\n```sql\n{full_response.strip()}\n```\n</sql>" | |
elif "GENERIC" in question_type: | |
print("Generic question detected") | |
st.toast("Detected Task: Generic QA", icon="π¨") | |
generic_prompt = f"Answer the following question related to SQL or coding:\n\nQUESTION: {prompt}\n\n### assistant:" | |
# Create the messages list, including the system prompt and the chat history | |
messages_for_generic = [ | |
SystemMessage( | |
content=f"You are a helpful assistant finetuned from Qwen2.5-coder:3B-Instruct for answering questions about SQL.\nYou have a database with the Database Schema:\n{chosen_schema}.\n" | |
) | |
] | |
for message in chat_history: | |
if isinstance(message, HumanMessage): | |
messages_for_generic.append(HumanMessage(content=message.content)) | |
elif isinstance(message, AIMessage): | |
# Only include the assistant's text response, not the additional kwargs | |
messages_for_generic.append(AIMessage(content=message.content)) | |
messages_for_generic.append(HumanMessage(content=generic_prompt)) | |
generic_response = "" | |
for chunk in llm.stream(messages_for_generic): | |
generic_response += chunk.content | |
yield generic_response | |
else: | |
yield "I am sorry, I am small language model fine-tuned specifically to answer questions that can be solved using SQL. I won't be able to answer this question." | |