DebopamC commited on
Commit
9820eac
·
verified ·
1 Parent(s): 2729888

Update 🤖SQL_Agent.py

Browse files
Files changed (1) hide show
  1. 🤖SQL_Agent.py +321 -321
🤖SQL_Agent.py CHANGED
@@ -1,321 +1,321 @@
1
- # main.py
2
- import re
3
- import streamlit as st
4
- from langchain_core.messages import HumanMessage, AIMessage
5
- from utils.llm_logic import generate_llm_response
6
- from utils.sql_utils import (
7
- extract_sql_command,
8
- load_defaultdb_schema_text,
9
- load_defaultdb_queries,
10
- load_data,
11
- )
12
- from utils.handle_sql_commands import execute_sql_duckdb
13
-
14
-
15
- st.set_page_config(
16
- page_title="Text-to-SQL Agent",
17
- page_icon="🤖",
18
- layout="wide",
19
- initial_sidebar_state="expanded",
20
- )
21
-
22
- default_db_questions = {}
23
- default_dfs = load_data()
24
- selected_df = default_dfs
25
- use_default_schema = True
26
- llm_option = "gemini"
27
-
28
-
29
- st.markdown(
30
- """
31
- <style>
32
- /* Base styles for both themes */
33
- .stPageLink {
34
- background-image: linear-gradient(to right, #007BFF, #6610F2); /* Gradient background */
35
- color: white !important; /* Ensure text is readable on the gradient */
36
- padding: 12px 20px !important; /* Slightly larger padding */
37
- border-radius: 8px !important; /* More rounded corners */
38
- border: none !important; /* Remove default border */
39
- text-decoration: none !important;
40
- font-weight: 500 !important; /* Slightly lighter font weight */
41
- transition: transform 0.2s ease-in-out, box-shadow 0.2s ease-in-out; /* Smooth transitions */
42
- box-shadow: 0 2px 5px rgba(0, 0, 0, 0.15); /* Subtle shadow for depth */
43
- display: inline-flex;
44
- align-items: center;
45
- justify-content: center;
46
- }
47
-
48
- .stPageLink:hover {
49
- transform: scale(1.03); /* Slight scale up on hover */
50
- box-shadow: 0 4px 8px rgba(0, 0, 0, 0.2); /* Increased shadow on hover */
51
- }
52
-
53
- .stPageLink span { /* Style the label text */
54
- margin-left: 5px; /* Space between icon and text */
55
- }
56
-
57
- /* Dark theme adjustments (optional, if needed for better contrast) */
58
- /* Consider using Streamlit's theme variables if possible for a more robust solution */
59
- /* For simplicity, this example uses fixed colors that should work reasonably well */
60
- /* [data-theme="dark"] .stPageLink {
61
- }
62
-
63
- [data-theme="dark"] .stPageLink:hover {
64
- } */
65
- </style>
66
- """,
67
- unsafe_allow_html=True,
68
- )
69
-
70
-
71
- with st.popover("Click here to see Database Schema", use_container_width=True):
72
- uploaded_df_schema = st.session_state.get("uploaded_df_schema", False)
73
-
74
- choice = st.segmented_control(
75
- "Choose",
76
- ["Default DB", "Uploaded Files"],
77
- label_visibility="collapsed",
78
- disabled=uploaded_df_schema == False,
79
- default="Default DB" if uploaded_df_schema == False else "Uploaded Files",
80
- )
81
-
82
- if uploaded_df_schema is False:
83
- st.markdown(
84
- """> You can also upload your own files, to get your schemas. You can then use those schemas to cross-check our answers with ChatGpt/Gemini/Claude (Preferred if the Question is very Complex). You can run the queries directly with our Manual SQL Executer😊.
85
- - Ask Questions
86
- - Run Queries: automatic + manual
87
- - Download Results """
88
- )
89
- st.page_link(
90
- page="pages/3 📂File Upload for SQL.py",
91
- label="Upload your own CSV or Excel files",
92
- icon="📜",
93
- )
94
- schema = load_defaultdb_schema_text()
95
- st.markdown(schema, unsafe_allow_html=True)
96
- elif choice == "Default DB":
97
- schema = load_defaultdb_schema_text()
98
- st.markdown(schema, unsafe_allow_html=True)
99
- else:
100
- pretty_schema, markdown = st.tabs(["Schema", "Copy Schema in Markdown"])
101
- with pretty_schema:
102
- st.info(
103
- "You can copy this schema, and give it to any state of the art LLM models like (Gemini /ChatGPT /Claude etc) to cross check your answers.\n You can run the queries directly here, by using ***Manual Query Executer*** in the sidebar and download your results 😊",
104
- icon="ℹ️",
105
- )
106
- st.markdown(uploaded_df_schema, unsafe_allow_html=True)
107
- with markdown:
108
- st.info(
109
- "You can copy this schema, and give it to any state of the art LLM models like (Gemini /ChatGPT /Claude etc) to cross check your answers.\n You can run the queries directly here, by using ***Manual Query Executer*** in the sidebar and download your results 😊",
110
- icon="ℹ️",
111
- )
112
- st.markdown(f"```\n{uploaded_df_schema}\n```")
113
-
114
-
115
- col1, col2 = st.columns([2, 1], vertical_alignment="bottom")
116
- with col1:
117
- st.header("Natural Language to SQL Query Agent🤖")
118
-
119
- with col2:
120
- st.caption("> ***Execute on the Go!*** 🚀 In-Built DuckDB Execution Engine")
121
-
122
- st.caption(
123
- "This is a Qwen2.5-Coder-3B model fine-tuned for SQL queries integrated with langchain for Agentic Workflow. To see the Fine-Tuning code - [click here](https://www.kaggle.com/code/debopamchowdhury/qwen-2-5coder-3b-instruct-finetuning)."
124
- )
125
-
126
-
127
- col1, col2, col3 = st.columns([1.5, 2, 1], vertical_alignment="top")
128
- with col1:
129
- disabled_selection = True
130
- if (
131
- "uploaded_dataframes" in st.session_state
132
- ) and st.session_state.uploaded_dataframes:
133
- disabled_selection = False
134
- options = ["default_db", "uploaded_files"]
135
- selected = st.segmented_control(
136
- "Choose",
137
- options,
138
- selection_mode="single",
139
- disabled=disabled_selection,
140
- label_visibility="collapsed",
141
- default="default_db" if disabled_selection else "uploaded_files",
142
- )
143
- if not disabled_selection:
144
- if selected == "uploaded_files":
145
- selected_df = st.session_state.uploaded_dataframes
146
- # print(selected_df)
147
- use_default_schema = False
148
- else:
149
- selected_df = default_dfs
150
- # print(selected_df)
151
- use_default_schema = True
152
- if selected_df == default_dfs:
153
- with st.popover("Default Database Queries 📚 - Trial"):
154
- default_db_questions = load_defaultdb_queries()
155
- st.markdown(default_db_questions)
156
-
157
- with col2:
158
- llm_option_radio = st.radio(
159
- "Choose LLM Model",
160
- ["Gemini-2.0-Flash-Exp", "FineTuned Qwen2.5-Coder-3B for SQL"],
161
- captions=[
162
- "Used via API",
163
- "Run Locally on this Server. Extremely Slow because of Free vCPUs, [Download & Run Locally](https://huggingface.co/DebopamC/Text-to-SQL__Qwen2.5-Coder-3B-FineTuned/tree/main)",
164
- ],
165
- label_visibility="collapsed",
166
- )
167
- if llm_option_radio == "Gemini-2.0-Flash-Exp":
168
- llm_option = "gemini"
169
- else:
170
- llm_option = "qwen"
171
-
172
- with col3:
173
- # Button to refresh the conversation
174
- if st.button("Start New Conversation", type="primary"):
175
- st.session_state.chat_history = []
176
- st.session_state.conversation_turns = 0
177
- st.rerun()
178
-
179
- # Initialize chat history in session state
180
- if "chat_history" not in st.session_state:
181
- st.session_state.chat_history = []
182
-
183
- # Initialize conversation turn counter
184
- if "conversation_turns" not in st.session_state:
185
- st.session_state.conversation_turns = 0
186
-
187
- # Set the maximum number of conversation turns
188
- MAX_TURNS = 5
189
-
190
- # Display existing chat messages
191
- for message in st.session_state.chat_history:
192
- with st.chat_message(message.type):
193
- st.markdown(message.content)
194
- if (
195
- isinstance(message, AIMessage)
196
- and "response_df" in message.additional_kwargs
197
- and message.additional_kwargs["response_df"] is not None
198
- and not message.additional_kwargs["response_df"].empty
199
- ):
200
- with st.expander("View SQL-Query Execution Result"):
201
- df = message.additional_kwargs["response_df"]
202
- # download_csv = convert_df(df)
203
- # st.download_button(
204
- # label="Download data as CSV",
205
- # data=download_csv,
206
- # file_name="query_results.csv",
207
- # mime="text/csv",
208
- # )
209
- # renderer = StreamlitRenderer(
210
- # df,
211
- # spec_io_mode="rw",
212
- # default_tab="data",
213
- # appearance="dark",
214
- # kernel_computation=True,
215
- # )
216
- # renderer.explorer(default_tab="data")
217
- st.dataframe(df)
218
- st.info(f"Rows x Columns: {df.shape[0]} x {df.shape[1]}")
219
- st.subheader("Data Description:")
220
- st.markdown(df.describe().T.to_markdown())
221
- st.subheader("Data Types:")
222
- st.write(df.dtypes)
223
-
224
- # Get user input only if the conversation turn limit is not reached
225
- if st.session_state.conversation_turns < MAX_TURNS:
226
- if prompt := st.chat_input("Ask me a SQL query question"):
227
- # Add user message to chat history in session state
228
- st.session_state.chat_history.append(HumanMessage(content=prompt))
229
- # Display user message in chat
230
- with st.chat_message("user"):
231
- st.markdown(prompt)
232
-
233
- duckdb_result = None
234
- # Get assistant response with streaming
235
- with st.chat_message("assistant"):
236
- message_placeholder = st.empty()
237
- full_response = ""
238
- spinner_text = ""
239
- if llm_option == "gemini":
240
- spinner_text = (
241
- "Using Gemini-2.0-Flash-Exp to run your query. Please wait...😊"
242
- )
243
- else:
244
- spinner_text = "I know it is taking a lot of time. To run the model I'm using `Free` small vCPUs provided by `HuggingFace Spaces` for deployment. Thank you so much for your patience😊"
245
- with st.spinner(
246
- spinner_text,
247
- ):
248
- for response_so_far in generate_llm_response(
249
- prompt, llm_option, use_default_schema
250
- ):
251
- # Remove <sql> and </sql> tags for streaming display
252
- streaming_response = response_so_far.replace("<sql>", "").replace(
253
- "</sql>", ""
254
- )
255
- # Remove duplicate ```sql tags with or without space for streaming display
256
- streaming_response = re.sub(
257
- r"```sql\s*```sql", "```sql", streaming_response
258
- )
259
- message_placeholder.markdown(streaming_response + "▌")
260
- full_response = response_so_far
261
-
262
- # Remove <sql> and </sql> tags from the full response
263
- full_response = full_response.replace("<sql>", "").replace("</sql>", "")
264
- # Remove duplicate ```sql tags with or without space from the full response
265
- full_response = re.sub(r"```sql\s*```sql", "```sql", full_response)
266
- # Remove trailing duplicate ``` tags from the full response
267
- full_response = re.sub(r"[\s\n]*`+$", "```", full_response)
268
- message_placeholder.markdown(full_response)
269
- # st.text(extract_sql_command(full_response))
270
-
271
- sql_command = extract_sql_command(full_response)
272
- # dataframe_html = None
273
- if sql_command:
274
- # st.text("Extracted SQL Command:")
275
- # st.code(sql_command, language="sql")
276
- duckdb_result = execute_sql_duckdb(sql_command, selected_df)
277
- if duckdb_result is not None:
278
- st.text("Query Execution Result:")
279
- with st.expander("View Result"):
280
- # st.dataframe(duckdb_result)
281
- st.dataframe(duckdb_result)
282
- st.info(
283
- f"Rows x Columns: {duckdb_result.shape[0]} x {duckdb_result.shape[1]}"
284
- )
285
- st.subheader("Data Description:")
286
- st.markdown(duckdb_result.describe().T.to_markdown())
287
- st.subheader("Data Types:")
288
- st.write(duckdb_result.dtypes)
289
- # renderer = StreamlitRenderer(
290
- # duckdb_result,
291
- # spec_io_mode="rw",
292
- # default_tab="data",
293
- # appearance="dark",
294
- # kernel_computation=True,
295
- # )
296
- # renderer.explorer(default_tab="data")
297
-
298
- else:
299
- # st.warning("No SQL command found in the response.")
300
- pass
301
-
302
- # Add assistant response to chat history in session state
303
- st.session_state.chat_history.append(
304
- AIMessage(
305
- content=full_response,
306
- additional_kwargs={"response_df": duckdb_result},
307
- )
308
- )
309
-
310
- # Increment the conversation turn counter
311
- st.session_state.conversation_turns += 1
312
- else:
313
- st.warning(
314
- "Maximum number of questions reached. Please click 'Start New Conversation' to continue."
315
- )
316
- st.chat_input(
317
- "Ask me a SQL query question", disabled=True
318
- ) # Disable the input field
319
-
320
- with st.sidebar:
321
- st.caption("Made with ❤️ by @Debopam_Chowdhury")
 
1
+ # main.py
2
+ import re
3
+ import streamlit as st
4
+ from langchain_core.messages import HumanMessage, AIMessage
5
+ from utils.llm_logic import generate_llm_response
6
+ from utils.sql_utils import (
7
+ extract_sql_command,
8
+ load_defaultdb_schema_text,
9
+ load_defaultdb_queries,
10
+ load_data,
11
+ )
12
+ from utils.handle_sql_commands import execute_sql_duckdb
13
+
14
+
15
+ st.set_page_config(
16
+ page_title="Text-to-SQL Agent",
17
+ page_icon="🤖",
18
+ layout="wide",
19
+ initial_sidebar_state="expanded",
20
+ )
21
+
22
+ default_db_questions = {}
23
+ default_dfs = load_data()
24
+ selected_df = default_dfs
25
+ use_default_schema = True
26
+ llm_option = "gemini"
27
+
28
+
29
+ st.markdown(
30
+ """
31
+ <style>
32
+ /* Base styles for both themes */
33
+ .stPageLink {
34
+ background-image: linear-gradient(to right, #007BFF, #6610F2); /* Gradient background */
35
+ color: white !important; /* Ensure text is readable on the gradient */
36
+ padding: 12px 20px !important; /* Slightly larger padding */
37
+ border-radius: 8px !important; /* More rounded corners */
38
+ border: none !important; /* Remove default border */
39
+ text-decoration: none !important;
40
+ font-weight: 500 !important; /* Slightly lighter font weight */
41
+ transition: transform 0.2s ease-in-out, box-shadow 0.2s ease-in-out; /* Smooth transitions */
42
+ box-shadow: 0 2px 5px rgba(0, 0, 0, 0.15); /* Subtle shadow for depth */
43
+ display: inline-flex;
44
+ align-items: center;
45
+ justify-content: center;
46
+ }
47
+
48
+ .stPageLink:hover {
49
+ transform: scale(1.03); /* Slight scale up on hover */
50
+ box-shadow: 0 4px 8px rgba(0, 0, 0, 0.2); /* Increased shadow on hover */
51
+ }
52
+
53
+ .stPageLink span { /* Style the label text */
54
+ margin-left: 5px; /* Space between icon and text */
55
+ }
56
+
57
+ /* Dark theme adjustments (optional, if needed for better contrast) */
58
+ /* Consider using Streamlit's theme variables if possible for a more robust solution */
59
+ /* For simplicity, this example uses fixed colors that should work reasonably well */
60
+ /* [data-theme="dark"] .stPageLink {
61
+ }
62
+
63
+ [data-theme="dark"] .stPageLink:hover {
64
+ } */
65
+ </style>
66
+ """,
67
+ unsafe_allow_html=True,
68
+ )
69
+
70
+
71
+ with st.popover("Click here to see Database Schema", use_container_width=True):
72
+ uploaded_df_schema = st.session_state.get("uploaded_df_schema", False)
73
+
74
+ choice = st.segmented_control(
75
+ "Choose",
76
+ ["Default DB", "Uploaded Files"],
77
+ label_visibility="collapsed",
78
+ disabled=uploaded_df_schema == False,
79
+ default="Default DB" if uploaded_df_schema == False else "Uploaded Files",
80
+ )
81
+
82
+ if uploaded_df_schema is False:
83
+ st.markdown(
84
+ """> You can also upload your own files, to get your schemas. You can then use those schemas to cross-check our answers with ChatGpt/Gemini/Claude (Preferred if the Question is very Complex). You can run the queries directly with our Manual SQL Executer😊.
85
+ - Ask Questions
86
+ - Run Queries: automatic + manual
87
+ - Download Results """
88
+ )
89
+ st.page_link(
90
+ page="pages/3 📂File Upload for SQL.py",
91
+ label="Upload your own CSV or Excel files",
92
+ icon="📜",
93
+ )
94
+ schema = load_defaultdb_schema_text()
95
+ st.markdown(schema, unsafe_allow_html=True)
96
+ elif choice == "Default DB":
97
+ schema = load_defaultdb_schema_text()
98
+ st.markdown(schema, unsafe_allow_html=True)
99
+ else:
100
+ pretty_schema, markdown = st.tabs(["Schema", "Copy Schema in Markdown"])
101
+ with pretty_schema:
102
+ st.info(
103
+ "You can copy this schema, and give it to any state of the art LLM models like (Gemini /ChatGPT /Claude etc) to cross check your answers.\n You can run the queries directly here, by using ***Manual Query Executer*** in the sidebar and download your results 😊",
104
+ icon="ℹ️",
105
+ )
106
+ st.markdown(uploaded_df_schema, unsafe_allow_html=True)
107
+ with markdown:
108
+ st.info(
109
+ "You can copy this schema, and give it to any state of the art LLM models like (Gemini /ChatGPT /Claude etc) to cross check your answers.\n You can run the queries directly here, by using ***Manual Query Executer*** in the sidebar and download your results 😊",
110
+ icon="ℹ️",
111
+ )
112
+ st.markdown(f"```\n{uploaded_df_schema}\n```")
113
+
114
+
115
+ col1, col2 = st.columns([2, 1], vertical_alignment="bottom")
116
+ with col1:
117
+ st.header("Natural Language to SQL Query Agent🤖")
118
+
119
+ with col2:
120
+ st.caption("> ***Execute on the Go!*** 🚀 In-Built DuckDB Execution Engine")
121
+
122
+ st.caption(
123
+ "This is a Qwen2.5-Coder-3B model fine-tuned for SQL queries integrated with langchain for Agentic Workflow. To see the Fine-Tuning code - [click here](https://www.kaggle.com/code/debopamchowdhury/qwen-2-5coder-3b-instruct-finetuning)."
124
+ )
125
+
126
+
127
+ col1, col2, col3 = st.columns([1.5, 2, 1], vertical_alignment="top")
128
+ with col1:
129
+ disabled_selection = True
130
+ if (
131
+ "uploaded_dataframes" in st.session_state
132
+ ) and st.session_state.uploaded_dataframes:
133
+ disabled_selection = False
134
+ options = ["default_db", "uploaded_files"]
135
+ selected = st.segmented_control(
136
+ "Choose",
137
+ options,
138
+ selection_mode="single",
139
+ disabled=disabled_selection,
140
+ label_visibility="collapsed",
141
+ default="default_db" if disabled_selection else "uploaded_files",
142
+ )
143
+ if not disabled_selection:
144
+ if selected == "uploaded_files":
145
+ selected_df = st.session_state.uploaded_dataframes
146
+ # print(selected_df)
147
+ use_default_schema = False
148
+ else:
149
+ selected_df = default_dfs
150
+ # print(selected_df)
151
+ use_default_schema = True
152
+ if selected_df == default_dfs:
153
+ with st.popover("Default Database Queries 📚 - Trial"):
154
+ default_db_questions = load_defaultdb_queries()
155
+ st.markdown(default_db_questions)
156
+
157
+ with col2:
158
+ llm_option_radio = st.radio(
159
+ "Choose LLM Model",
160
+ ["Gemini-2.0-Flash-Exp", "FineTuned Qwen2.5-Coder-3B for SQL"],
161
+ captions=[
162
+ "Used via API",
163
+ "Run Locally on this Server. Extremely Slow because of Free vCPUs, [Download & Run on your Computer](https://huggingface.co/DebopamC/Text-to-SQL__Qwen2.5-Coder-3B-FineTuned/tree/main)",
164
+ ],
165
+ label_visibility="collapsed",
166
+ )
167
+ if llm_option_radio == "Gemini-2.0-Flash-Exp":
168
+ llm_option = "gemini"
169
+ else:
170
+ llm_option = "qwen"
171
+
172
+ with col3:
173
+ # Button to refresh the conversation
174
+ if st.button("Start New Conversation", type="primary"):
175
+ st.session_state.chat_history = []
176
+ st.session_state.conversation_turns = 0
177
+ st.rerun()
178
+
179
+ # Initialize chat history in session state
180
+ if "chat_history" not in st.session_state:
181
+ st.session_state.chat_history = []
182
+
183
+ # Initialize conversation turn counter
184
+ if "conversation_turns" not in st.session_state:
185
+ st.session_state.conversation_turns = 0
186
+
187
+ # Set the maximum number of conversation turns
188
+ MAX_TURNS = 5
189
+
190
+ # Display existing chat messages
191
+ for message in st.session_state.chat_history:
192
+ with st.chat_message(message.type):
193
+ st.markdown(message.content)
194
+ if (
195
+ isinstance(message, AIMessage)
196
+ and "response_df" in message.additional_kwargs
197
+ and message.additional_kwargs["response_df"] is not None
198
+ and not message.additional_kwargs["response_df"].empty
199
+ ):
200
+ with st.expander("View SQL-Query Execution Result"):
201
+ df = message.additional_kwargs["response_df"]
202
+ # download_csv = convert_df(df)
203
+ # st.download_button(
204
+ # label="Download data as CSV",
205
+ # data=download_csv,
206
+ # file_name="query_results.csv",
207
+ # mime="text/csv",
208
+ # )
209
+ # renderer = StreamlitRenderer(
210
+ # df,
211
+ # spec_io_mode="rw",
212
+ # default_tab="data",
213
+ # appearance="dark",
214
+ # kernel_computation=True,
215
+ # )
216
+ # renderer.explorer(default_tab="data")
217
+ st.dataframe(df)
218
+ st.info(f"Rows x Columns: {df.shape[0]} x {df.shape[1]}")
219
+ st.subheader("Data Description:")
220
+ st.markdown(df.describe().T.to_markdown())
221
+ st.subheader("Data Types:")
222
+ st.write(df.dtypes)
223
+
224
+ # Get user input only if the conversation turn limit is not reached
225
+ if st.session_state.conversation_turns < MAX_TURNS:
226
+ if prompt := st.chat_input("Ask me a SQL query question"):
227
+ # Add user message to chat history in session state
228
+ st.session_state.chat_history.append(HumanMessage(content=prompt))
229
+ # Display user message in chat
230
+ with st.chat_message("user"):
231
+ st.markdown(prompt)
232
+
233
+ duckdb_result = None
234
+ # Get assistant response with streaming
235
+ with st.chat_message("assistant"):
236
+ message_placeholder = st.empty()
237
+ full_response = ""
238
+ spinner_text = ""
239
+ if llm_option == "gemini":
240
+ spinner_text = (
241
+ "Using Gemini-2.0-Flash-Exp to run your query. Please wait...😊"
242
+ )
243
+ else:
244
+ spinner_text = "I know it is taking a lot of time. To run the model I'm using `Free` small vCPUs provided by `HuggingFace Spaces` for deployment. Thank you so much for your patience😊"
245
+ with st.spinner(
246
+ spinner_text,
247
+ ):
248
+ for response_so_far in generate_llm_response(
249
+ prompt, llm_option, use_default_schema
250
+ ):
251
+ # Remove <sql> and </sql> tags for streaming display
252
+ streaming_response = response_so_far.replace("<sql>", "").replace(
253
+ "</sql>", ""
254
+ )
255
+ # Remove duplicate ```sql tags with or without space for streaming display
256
+ streaming_response = re.sub(
257
+ r"```sql\s*```sql", "```sql", streaming_response
258
+ )
259
+ message_placeholder.markdown(streaming_response + "▌")
260
+ full_response = response_so_far
261
+
262
+ # Remove <sql> and </sql> tags from the full response
263
+ full_response = full_response.replace("<sql>", "").replace("</sql>", "")
264
+ # Remove duplicate ```sql tags with or without space from the full response
265
+ full_response = re.sub(r"```sql\s*```sql", "```sql", full_response)
266
+ # Remove trailing duplicate ``` tags from the full response
267
+ full_response = re.sub(r"[\s\n]*`+$", "```", full_response)
268
+ message_placeholder.markdown(full_response)
269
+ # st.text(extract_sql_command(full_response))
270
+
271
+ sql_command = extract_sql_command(full_response)
272
+ # dataframe_html = None
273
+ if sql_command:
274
+ # st.text("Extracted SQL Command:")
275
+ # st.code(sql_command, language="sql")
276
+ duckdb_result = execute_sql_duckdb(sql_command, selected_df)
277
+ if duckdb_result is not None:
278
+ st.text("Query Execution Result:")
279
+ with st.expander("View Result"):
280
+ # st.dataframe(duckdb_result)
281
+ st.dataframe(duckdb_result)
282
+ st.info(
283
+ f"Rows x Columns: {duckdb_result.shape[0]} x {duckdb_result.shape[1]}"
284
+ )
285
+ st.subheader("Data Description:")
286
+ st.markdown(duckdb_result.describe().T.to_markdown())
287
+ st.subheader("Data Types:")
288
+ st.write(duckdb_result.dtypes)
289
+ # renderer = StreamlitRenderer(
290
+ # duckdb_result,
291
+ # spec_io_mode="rw",
292
+ # default_tab="data",
293
+ # appearance="dark",
294
+ # kernel_computation=True,
295
+ # )
296
+ # renderer.explorer(default_tab="data")
297
+
298
+ else:
299
+ # st.warning("No SQL command found in the response.")
300
+ pass
301
+
302
+ # Add assistant response to chat history in session state
303
+ st.session_state.chat_history.append(
304
+ AIMessage(
305
+ content=full_response,
306
+ additional_kwargs={"response_df": duckdb_result},
307
+ )
308
+ )
309
+
310
+ # Increment the conversation turn counter
311
+ st.session_state.conversation_turns += 1
312
+ else:
313
+ st.warning(
314
+ "Maximum number of questions reached. Please click 'Start New Conversation' to continue."
315
+ )
316
+ st.chat_input(
317
+ "Ask me a SQL query question", disabled=True
318
+ ) # Disable the input field
319
+
320
+ with st.sidebar:
321
+ st.caption("Made with ❤️ by @Debopam_Chowdhury")