DebopamC commited on
Commit
2729888
·
verified ·
1 Parent(s): 74b8ad8

Upload 🤖SQL_Agent.py

Browse files
Files changed (1) hide show
  1. 🤖SQL_Agent.py +321 -320
🤖SQL_Agent.py CHANGED
@@ -1,320 +1,321 @@
1
- import re
2
- import streamlit as st
3
- from langchain_core.messages import HumanMessage, AIMessage
4
- from utils.llm_logic import generate_llm_response
5
- from utils.sql_utils import (
6
- extract_sql_command,
7
- load_defaultdb_schema_text,
8
- load_defaultdb_queries,
9
- load_data,
10
- )
11
- from utils.handle_sql_commands import execute_sql_duckdb
12
-
13
-
14
- st.set_page_config(
15
- page_title="Text-to-SQL Agent",
16
- page_icon="🤖",
17
- layout="wide",
18
- initial_sidebar_state="expanded",
19
- )
20
-
21
- default_db_questions = {}
22
- default_dfs = load_data()
23
- selected_df = default_dfs
24
- use_default_schema = True
25
- llm_option = "gemini"
26
-
27
-
28
- st.markdown(
29
- """
30
- <style>
31
- /* Base styles for both themes */
32
- .stPageLink {
33
- background-image: linear-gradient(to right, #007BFF, #6610F2); /* Gradient background */
34
- color: white !important; /* Ensure text is readable on the gradient */
35
- padding: 12px 20px !important; /* Slightly larger padding */
36
- border-radius: 8px !important; /* More rounded corners */
37
- border: none !important; /* Remove default border */
38
- text-decoration: none !important;
39
- font-weight: 500 !important; /* Slightly lighter font weight */
40
- transition: transform 0.2s ease-in-out, box-shadow 0.2s ease-in-out; /* Smooth transitions */
41
- box-shadow: 0 2px 5px rgba(0, 0, 0, 0.15); /* Subtle shadow for depth */
42
- display: inline-flex;
43
- align-items: center;
44
- justify-content: center;
45
- }
46
-
47
- .stPageLink:hover {
48
- transform: scale(1.03); /* Slight scale up on hover */
49
- box-shadow: 0 4px 8px rgba(0, 0, 0, 0.2); /* Increased shadow on hover */
50
- }
51
-
52
- .stPageLink span { /* Style the label text */
53
- margin-left: 5px; /* Space between icon and text */
54
- }
55
-
56
- /* Dark theme adjustments (optional, if needed for better contrast) */
57
- /* Consider using Streamlit's theme variables if possible for a more robust solution */
58
- /* For simplicity, this example uses fixed colors that should work reasonably well */
59
- /* [data-theme="dark"] .stPageLink {
60
- }
61
-
62
- [data-theme="dark"] .stPageLink:hover {
63
- } */
64
- </style>
65
- """,
66
- unsafe_allow_html=True,
67
- )
68
-
69
-
70
- with st.popover("Click here to see Database Schema", use_container_width=True):
71
- uploaded_df_schema = st.session_state.get("uploaded_df_schema", False)
72
-
73
- choice = st.segmented_control(
74
- "Choose",
75
- ["Default DB", "Uploaded Files"],
76
- label_visibility="collapsed",
77
- disabled=uploaded_df_schema == False,
78
- default="Default DB" if uploaded_df_schema == False else "Uploaded Files",
79
- )
80
-
81
- if uploaded_df_schema is False:
82
- st.markdown(
83
- """> You can also upload your own files, to get your schemas. You can then use those schemas to cross-check our answers with ChatGpt/Gemini/Claude (Preferred if the Question is very Complex). You can run the queries directly with our Manual SQL Executer😊.
84
- - Ask Questions
85
- - Run Queries: automatic + manual
86
- - Download Results """
87
- )
88
- st.page_link(
89
- page="pages/3 📂File Upload for SQL.py",
90
- label="Upload your own CSV or Excel files",
91
- icon="📜",
92
- )
93
- schema = load_defaultdb_schema_text()
94
- st.markdown(schema, unsafe_allow_html=True)
95
- elif choice == "Default DB":
96
- schema = load_defaultdb_schema_text()
97
- st.markdown(schema, unsafe_allow_html=True)
98
- else:
99
- pretty_schema, markdown = st.tabs(["Schema", "Copy Schema in Markdown"])
100
- with pretty_schema:
101
- st.info(
102
- "You can copy this schema, and give it to any state of the art LLM models like (Gemini /ChatGPT /Claude etc) to cross check your answers.\n You can run the queries directly here, by using ***Manual Query Executer*** in the sidebar and download your results 😊",
103
- icon="ℹ️",
104
- )
105
- st.markdown(uploaded_df_schema, unsafe_allow_html=True)
106
- with markdown:
107
- st.info(
108
- "You can copy this schema, and give it to any state of the art LLM models like (Gemini /ChatGPT /Claude etc) to cross check your answers.\n You can run the queries directly here, by using ***Manual Query Executer*** in the sidebar and download your results 😊",
109
- icon="ℹ️",
110
- )
111
- st.markdown(f"```\n{uploaded_df_schema}\n```")
112
-
113
-
114
- col1, col2 = st.columns([2, 1], vertical_alignment="bottom")
115
- with col1:
116
- st.header("Natural Language to SQL Query Agent🤖")
117
-
118
- with col2:
119
- st.caption("> ***Execute on the Go!*** 🚀 In-Built DuckDB Execution Engine")
120
-
121
- st.caption(
122
- "This is a Qwen2.5-Coder-3B model fine-tuned for SQL queries integrated with langchain for Agentic Workflow. To see the Fine-Tuning code - [click here](https://www.kaggle.com/code/debopamchowdhury/qwen-2-5coder-3b-instruct-finetuning)."
123
- )
124
-
125
-
126
- col1, col2, col3 = st.columns([1.5, 2, 1], vertical_alignment="top")
127
- with col1:
128
- disabled_selection = True
129
- if (
130
- "uploaded_dataframes" in st.session_state
131
- ) and st.session_state.uploaded_dataframes:
132
- disabled_selection = False
133
- options = ["default_db", "uploaded_files"]
134
- selected = st.segmented_control(
135
- "Choose",
136
- options,
137
- selection_mode="single",
138
- disabled=disabled_selection,
139
- label_visibility="collapsed",
140
- default="default_db" if disabled_selection else "uploaded_files",
141
- )
142
- if not disabled_selection:
143
- if selected == "uploaded_files":
144
- selected_df = st.session_state.uploaded_dataframes
145
- # print(selected_df)
146
- use_default_schema = False
147
- else:
148
- selected_df = default_dfs
149
- # print(selected_df)
150
- use_default_schema = True
151
- if selected_df == default_dfs:
152
- with st.popover("Default Database Queries 📚 - Trial"):
153
- default_db_questions = load_defaultdb_queries()
154
- st.markdown(default_db_questions)
155
-
156
- with col2:
157
- llm_option_radio = st.radio(
158
- "Choose LLM Model",
159
- ["Gemini-2.0-Flash-Exp", "FineTuned Qwen2.5-Coder-3B for SQL"],
160
- captions=[
161
- "Used via API",
162
- "Run Locally on this Server. Extremely Slow because of Free vCPUs",
163
- ],
164
- label_visibility="collapsed",
165
- )
166
- if llm_option_radio == "Gemini-2.0-Flash":
167
- llm_option = "gemini"
168
- else:
169
- llm_option = "qwen"
170
-
171
- with col3:
172
- # Button to refresh the conversation
173
- if st.button("Start New Conversation", type="primary"):
174
- st.session_state.chat_history = []
175
- st.session_state.conversation_turns = 0
176
- st.rerun()
177
-
178
- # Initialize chat history in session state
179
- if "chat_history" not in st.session_state:
180
- st.session_state.chat_history = []
181
-
182
- # Initialize conversation turn counter
183
- if "conversation_turns" not in st.session_state:
184
- st.session_state.conversation_turns = 0
185
-
186
- # Set the maximum number of conversation turns
187
- MAX_TURNS = 5
188
-
189
- # Display existing chat messages
190
- for message in st.session_state.chat_history:
191
- with st.chat_message(message.type):
192
- st.markdown(message.content)
193
- if (
194
- isinstance(message, AIMessage)
195
- and "response_df" in message.additional_kwargs
196
- and message.additional_kwargs["response_df"] is not None
197
- and not message.additional_kwargs["response_df"].empty
198
- ):
199
- with st.expander("View SQL-Query Execution Result"):
200
- df = message.additional_kwargs["response_df"]
201
- # download_csv = convert_df(df)
202
- # st.download_button(
203
- # label="Download data as CSV",
204
- # data=download_csv,
205
- # file_name="query_results.csv",
206
- # mime="text/csv",
207
- # )
208
- # renderer = StreamlitRenderer(
209
- # df,
210
- # spec_io_mode="rw",
211
- # default_tab="data",
212
- # appearance="dark",
213
- # kernel_computation=True,
214
- # )
215
- # renderer.explorer(default_tab="data")
216
- st.dataframe(df)
217
- st.info(f"Rows x Columns: {df.shape[0]} x {df.shape[1]}")
218
- st.subheader("Data Description:")
219
- st.markdown(df.describe().T.to_markdown())
220
- st.subheader("Data Types:")
221
- st.write(df.dtypes)
222
-
223
- # Get user input only if the conversation turn limit is not reached
224
- if st.session_state.conversation_turns < MAX_TURNS:
225
- if prompt := st.chat_input("Ask me a SQL query question"):
226
- # Add user message to chat history in session state
227
- st.session_state.chat_history.append(HumanMessage(content=prompt))
228
- # Display user message in chat
229
- with st.chat_message("user"):
230
- st.markdown(prompt)
231
-
232
- duckdb_result = None
233
- # Get assistant response with streaming
234
- with st.chat_message("assistant"):
235
- message_placeholder = st.empty()
236
- full_response = ""
237
- spinner_text = ""
238
- if llm_option == "gemini":
239
- spinner_text = (
240
- "Using Gemini-2.0-Flash-Exp to run your query. Please wait...😊"
241
- )
242
- else:
243
- spinner_text = "I know it is taking a lot of time. To run the model I'm using `Free` small vCPUs provided by `HuggingFace Spaces` for deployment. Thank you so much for your patience😊"
244
- with st.spinner(
245
- spinner_text,
246
- ):
247
- for response_so_far in generate_llm_response(
248
- prompt, llm_option, use_default_schema
249
- ):
250
- # Remove <sql> and </sql> tags for streaming display
251
- streaming_response = response_so_far.replace("<sql>", "").replace(
252
- "</sql>", ""
253
- )
254
- # Remove duplicate ```sql tags with or without space for streaming display
255
- streaming_response = re.sub(
256
- r"```sql\s*```sql", "```sql", streaming_response
257
- )
258
- message_placeholder.markdown(streaming_response + "▌")
259
- full_response = response_so_far
260
-
261
- # Remove <sql> and </sql> tags from the full response
262
- full_response = full_response.replace("<sql>", "").replace("</sql>", "")
263
- # Remove duplicate ```sql tags with or without space from the full response
264
- full_response = re.sub(r"```sql\s*```sql", "```sql", full_response)
265
- # Remove trailing duplicate ``` tags from the full response
266
- full_response = re.sub(r"[\s\n]*`+$", "```", full_response)
267
- message_placeholder.markdown(full_response)
268
- # st.text(extract_sql_command(full_response))
269
-
270
- sql_command = extract_sql_command(full_response)
271
- # dataframe_html = None
272
- if sql_command:
273
- # st.text("Extracted SQL Command:")
274
- # st.code(sql_command, language="sql")
275
- duckdb_result = execute_sql_duckdb(sql_command, selected_df)
276
- if duckdb_result is not None:
277
- st.text("Query Execution Result:")
278
- with st.expander("View Result"):
279
- # st.dataframe(duckdb_result)
280
- st.dataframe(duckdb_result)
281
- st.info(
282
- f"Rows x Columns: {duckdb_result.shape[0]} x {duckdb_result.shape[1]}"
283
- )
284
- st.subheader("Data Description:")
285
- st.markdown(duckdb_result.describe().T.to_markdown())
286
- st.subheader("Data Types:")
287
- st.write(duckdb_result.dtypes)
288
- # renderer = StreamlitRenderer(
289
- # duckdb_result,
290
- # spec_io_mode="rw",
291
- # default_tab="data",
292
- # appearance="dark",
293
- # kernel_computation=True,
294
- # )
295
- # renderer.explorer(default_tab="data")
296
-
297
- else:
298
- # st.warning("No SQL command found in the response.")
299
- pass
300
-
301
- # Add assistant response to chat history in session state
302
- st.session_state.chat_history.append(
303
- AIMessage(
304
- content=full_response,
305
- additional_kwargs={"response_df": duckdb_result},
306
- )
307
- )
308
-
309
- # Increment the conversation turn counter
310
- st.session_state.conversation_turns += 1
311
- else:
312
- st.warning(
313
- "Maximum number of questions reached. Please click 'Start New Conversation' to continue."
314
- )
315
- st.chat_input(
316
- "Ask me a SQL query question", disabled=True
317
- ) # Disable the input field
318
-
319
- with st.sidebar:
320
- st.caption("Made with ❤️ by @Debopam_Chowdhury")
 
 
1
+ # main.py
2
+ import re
3
+ import streamlit as st
4
+ from langchain_core.messages import HumanMessage, AIMessage
5
+ from utils.llm_logic import generate_llm_response
6
+ from utils.sql_utils import (
7
+ extract_sql_command,
8
+ load_defaultdb_schema_text,
9
+ load_defaultdb_queries,
10
+ load_data,
11
+ )
12
+ from utils.handle_sql_commands import execute_sql_duckdb
13
+
14
+
15
+ st.set_page_config(
16
+ page_title="Text-to-SQL Agent",
17
+ page_icon="🤖",
18
+ layout="wide",
19
+ initial_sidebar_state="expanded",
20
+ )
21
+
22
+ default_db_questions = {}
23
+ default_dfs = load_data()
24
+ selected_df = default_dfs
25
+ use_default_schema = True
26
+ llm_option = "gemini"
27
+
28
+
29
+ st.markdown(
30
+ """
31
+ <style>
32
+ /* Base styles for both themes */
33
+ .stPageLink {
34
+ background-image: linear-gradient(to right, #007BFF, #6610F2); /* Gradient background */
35
+ color: white !important; /* Ensure text is readable on the gradient */
36
+ padding: 12px 20px !important; /* Slightly larger padding */
37
+ border-radius: 8px !important; /* More rounded corners */
38
+ border: none !important; /* Remove default border */
39
+ text-decoration: none !important;
40
+ font-weight: 500 !important; /* Slightly lighter font weight */
41
+ transition: transform 0.2s ease-in-out, box-shadow 0.2s ease-in-out; /* Smooth transitions */
42
+ box-shadow: 0 2px 5px rgba(0, 0, 0, 0.15); /* Subtle shadow for depth */
43
+ display: inline-flex;
44
+ align-items: center;
45
+ justify-content: center;
46
+ }
47
+
48
+ .stPageLink:hover {
49
+ transform: scale(1.03); /* Slight scale up on hover */
50
+ box-shadow: 0 4px 8px rgba(0, 0, 0, 0.2); /* Increased shadow on hover */
51
+ }
52
+
53
+ .stPageLink span { /* Style the label text */
54
+ margin-left: 5px; /* Space between icon and text */
55
+ }
56
+
57
+ /* Dark theme adjustments (optional, if needed for better contrast) */
58
+ /* Consider using Streamlit's theme variables if possible for a more robust solution */
59
+ /* For simplicity, this example uses fixed colors that should work reasonably well */
60
+ /* [data-theme="dark"] .stPageLink {
61
+ }
62
+
63
+ [data-theme="dark"] .stPageLink:hover {
64
+ } */
65
+ </style>
66
+ """,
67
+ unsafe_allow_html=True,
68
+ )
69
+
70
+
71
+ with st.popover("Click here to see Database Schema", use_container_width=True):
72
+ uploaded_df_schema = st.session_state.get("uploaded_df_schema", False)
73
+
74
+ choice = st.segmented_control(
75
+ "Choose",
76
+ ["Default DB", "Uploaded Files"],
77
+ label_visibility="collapsed",
78
+ disabled=uploaded_df_schema == False,
79
+ default="Default DB" if uploaded_df_schema == False else "Uploaded Files",
80
+ )
81
+
82
+ if uploaded_df_schema is False:
83
+ st.markdown(
84
+ """> You can also upload your own files, to get your schemas. You can then use those schemas to cross-check our answers with ChatGpt/Gemini/Claude (Preferred if the Question is very Complex). You can run the queries directly with our Manual SQL Executer😊.
85
+ - Ask Questions
86
+ - Run Queries: automatic + manual
87
+ - Download Results """
88
+ )
89
+ st.page_link(
90
+ page="pages/3 📂File Upload for SQL.py",
91
+ label="Upload your own CSV or Excel files",
92
+ icon="📜",
93
+ )
94
+ schema = load_defaultdb_schema_text()
95
+ st.markdown(schema, unsafe_allow_html=True)
96
+ elif choice == "Default DB":
97
+ schema = load_defaultdb_schema_text()
98
+ st.markdown(schema, unsafe_allow_html=True)
99
+ else:
100
+ pretty_schema, markdown = st.tabs(["Schema", "Copy Schema in Markdown"])
101
+ with pretty_schema:
102
+ st.info(
103
+ "You can copy this schema, and give it to any state of the art LLM models like (Gemini /ChatGPT /Claude etc) to cross check your answers.\n You can run the queries directly here, by using ***Manual Query Executer*** in the sidebar and download your results 😊",
104
+ icon="ℹ️",
105
+ )
106
+ st.markdown(uploaded_df_schema, unsafe_allow_html=True)
107
+ with markdown:
108
+ st.info(
109
+ "You can copy this schema, and give it to any state of the art LLM models like (Gemini /ChatGPT /Claude etc) to cross check your answers.\n You can run the queries directly here, by using ***Manual Query Executer*** in the sidebar and download your results 😊",
110
+ icon="ℹ️",
111
+ )
112
+ st.markdown(f"```\n{uploaded_df_schema}\n```")
113
+
114
+
115
+ col1, col2 = st.columns([2, 1], vertical_alignment="bottom")
116
+ with col1:
117
+ st.header("Natural Language to SQL Query Agent🤖")
118
+
119
+ with col2:
120
+ st.caption("> ***Execute on the Go!*** 🚀 In-Built DuckDB Execution Engine")
121
+
122
+ st.caption(
123
+ "This is a Qwen2.5-Coder-3B model fine-tuned for SQL queries integrated with langchain for Agentic Workflow. To see the Fine-Tuning code - [click here](https://www.kaggle.com/code/debopamchowdhury/qwen-2-5coder-3b-instruct-finetuning)."
124
+ )
125
+
126
+
127
+ col1, col2, col3 = st.columns([1.5, 2, 1], vertical_alignment="top")
128
+ with col1:
129
+ disabled_selection = True
130
+ if (
131
+ "uploaded_dataframes" in st.session_state
132
+ ) and st.session_state.uploaded_dataframes:
133
+ disabled_selection = False
134
+ options = ["default_db", "uploaded_files"]
135
+ selected = st.segmented_control(
136
+ "Choose",
137
+ options,
138
+ selection_mode="single",
139
+ disabled=disabled_selection,
140
+ label_visibility="collapsed",
141
+ default="default_db" if disabled_selection else "uploaded_files",
142
+ )
143
+ if not disabled_selection:
144
+ if selected == "uploaded_files":
145
+ selected_df = st.session_state.uploaded_dataframes
146
+ # print(selected_df)
147
+ use_default_schema = False
148
+ else:
149
+ selected_df = default_dfs
150
+ # print(selected_df)
151
+ use_default_schema = True
152
+ if selected_df == default_dfs:
153
+ with st.popover("Default Database Queries 📚 - Trial"):
154
+ default_db_questions = load_defaultdb_queries()
155
+ st.markdown(default_db_questions)
156
+
157
+ with col2:
158
+ llm_option_radio = st.radio(
159
+ "Choose LLM Model",
160
+ ["Gemini-2.0-Flash-Exp", "FineTuned Qwen2.5-Coder-3B for SQL"],
161
+ captions=[
162
+ "Used via API",
163
+ "Run Locally on this Server. Extremely Slow because of Free vCPUs, [Download & Run Locally](https://huggingface.co/DebopamC/Text-to-SQL__Qwen2.5-Coder-3B-FineTuned/tree/main)",
164
+ ],
165
+ label_visibility="collapsed",
166
+ )
167
+ if llm_option_radio == "Gemini-2.0-Flash-Exp":
168
+ llm_option = "gemini"
169
+ else:
170
+ llm_option = "qwen"
171
+
172
+ with col3:
173
+ # Button to refresh the conversation
174
+ if st.button("Start New Conversation", type="primary"):
175
+ st.session_state.chat_history = []
176
+ st.session_state.conversation_turns = 0
177
+ st.rerun()
178
+
179
+ # Initialize chat history in session state
180
+ if "chat_history" not in st.session_state:
181
+ st.session_state.chat_history = []
182
+
183
+ # Initialize conversation turn counter
184
+ if "conversation_turns" not in st.session_state:
185
+ st.session_state.conversation_turns = 0
186
+
187
+ # Set the maximum number of conversation turns
188
+ MAX_TURNS = 5
189
+
190
+ # Display existing chat messages
191
+ for message in st.session_state.chat_history:
192
+ with st.chat_message(message.type):
193
+ st.markdown(message.content)
194
+ if (
195
+ isinstance(message, AIMessage)
196
+ and "response_df" in message.additional_kwargs
197
+ and message.additional_kwargs["response_df"] is not None
198
+ and not message.additional_kwargs["response_df"].empty
199
+ ):
200
+ with st.expander("View SQL-Query Execution Result"):
201
+ df = message.additional_kwargs["response_df"]
202
+ # download_csv = convert_df(df)
203
+ # st.download_button(
204
+ # label="Download data as CSV",
205
+ # data=download_csv,
206
+ # file_name="query_results.csv",
207
+ # mime="text/csv",
208
+ # )
209
+ # renderer = StreamlitRenderer(
210
+ # df,
211
+ # spec_io_mode="rw",
212
+ # default_tab="data",
213
+ # appearance="dark",
214
+ # kernel_computation=True,
215
+ # )
216
+ # renderer.explorer(default_tab="data")
217
+ st.dataframe(df)
218
+ st.info(f"Rows x Columns: {df.shape[0]} x {df.shape[1]}")
219
+ st.subheader("Data Description:")
220
+ st.markdown(df.describe().T.to_markdown())
221
+ st.subheader("Data Types:")
222
+ st.write(df.dtypes)
223
+
224
+ # Get user input only if the conversation turn limit is not reached
225
+ if st.session_state.conversation_turns < MAX_TURNS:
226
+ if prompt := st.chat_input("Ask me a SQL query question"):
227
+ # Add user message to chat history in session state
228
+ st.session_state.chat_history.append(HumanMessage(content=prompt))
229
+ # Display user message in chat
230
+ with st.chat_message("user"):
231
+ st.markdown(prompt)
232
+
233
+ duckdb_result = None
234
+ # Get assistant response with streaming
235
+ with st.chat_message("assistant"):
236
+ message_placeholder = st.empty()
237
+ full_response = ""
238
+ spinner_text = ""
239
+ if llm_option == "gemini":
240
+ spinner_text = (
241
+ "Using Gemini-2.0-Flash-Exp to run your query. Please wait...😊"
242
+ )
243
+ else:
244
+ spinner_text = "I know it is taking a lot of time. To run the model I'm using `Free` small vCPUs provided by `HuggingFace Spaces` for deployment. Thank you so much for your patience😊"
245
+ with st.spinner(
246
+ spinner_text,
247
+ ):
248
+ for response_so_far in generate_llm_response(
249
+ prompt, llm_option, use_default_schema
250
+ ):
251
+ # Remove <sql> and </sql> tags for streaming display
252
+ streaming_response = response_so_far.replace("<sql>", "").replace(
253
+ "</sql>", ""
254
+ )
255
+ # Remove duplicate ```sql tags with or without space for streaming display
256
+ streaming_response = re.sub(
257
+ r"```sql\s*```sql", "```sql", streaming_response
258
+ )
259
+ message_placeholder.markdown(streaming_response + "▌")
260
+ full_response = response_so_far
261
+
262
+ # Remove <sql> and </sql> tags from the full response
263
+ full_response = full_response.replace("<sql>", "").replace("</sql>", "")
264
+ # Remove duplicate ```sql tags with or without space from the full response
265
+ full_response = re.sub(r"```sql\s*```sql", "```sql", full_response)
266
+ # Remove trailing duplicate ``` tags from the full response
267
+ full_response = re.sub(r"[\s\n]*`+$", "```", full_response)
268
+ message_placeholder.markdown(full_response)
269
+ # st.text(extract_sql_command(full_response))
270
+
271
+ sql_command = extract_sql_command(full_response)
272
+ # dataframe_html = None
273
+ if sql_command:
274
+ # st.text("Extracted SQL Command:")
275
+ # st.code(sql_command, language="sql")
276
+ duckdb_result = execute_sql_duckdb(sql_command, selected_df)
277
+ if duckdb_result is not None:
278
+ st.text("Query Execution Result:")
279
+ with st.expander("View Result"):
280
+ # st.dataframe(duckdb_result)
281
+ st.dataframe(duckdb_result)
282
+ st.info(
283
+ f"Rows x Columns: {duckdb_result.shape[0]} x {duckdb_result.shape[1]}"
284
+ )
285
+ st.subheader("Data Description:")
286
+ st.markdown(duckdb_result.describe().T.to_markdown())
287
+ st.subheader("Data Types:")
288
+ st.write(duckdb_result.dtypes)
289
+ # renderer = StreamlitRenderer(
290
+ # duckdb_result,
291
+ # spec_io_mode="rw",
292
+ # default_tab="data",
293
+ # appearance="dark",
294
+ # kernel_computation=True,
295
+ # )
296
+ # renderer.explorer(default_tab="data")
297
+
298
+ else:
299
+ # st.warning("No SQL command found in the response.")
300
+ pass
301
+
302
+ # Add assistant response to chat history in session state
303
+ st.session_state.chat_history.append(
304
+ AIMessage(
305
+ content=full_response,
306
+ additional_kwargs={"response_df": duckdb_result},
307
+ )
308
+ )
309
+
310
+ # Increment the conversation turn counter
311
+ st.session_state.conversation_turns += 1
312
+ else:
313
+ st.warning(
314
+ "Maximum number of questions reached. Please click 'Start New Conversation' to continue."
315
+ )
316
+ st.chat_input(
317
+ "Ask me a SQL query question", disabled=True
318
+ ) # Disable the input field
319
+
320
+ with st.sidebar:
321
+ st.caption("Made with ❤️ by @Debopam_Chowdhury")