Kashif17 commited on
Commit
014a744
·
verified ·
1 Parent(s): a5588e5

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +22 -36
app.py CHANGED
@@ -1,41 +1,27 @@
1
- import os
2
- import streamlit as st
3
- import requests
4
 
5
- # Retrieve Hugging Face API token from environment variable
6
- API_TOKEN = os.environ.get("HUGGING_FACE_API_TOKEN")
 
7
 
8
- # Define the Hugging Face API URL
9
- API_URL = "https://api-inference.huggingface.co/models/meta-llama/Meta-Llama-3-8B"
10
- headers = {"Authorization": f"Bearer {API_TOKEN}"}
 
 
 
 
11
 
12
- # Function to query the Hugging Face API
13
- def query(payload):
14
- response = requests.post(API_URL, headers=headers, json=payload)
15
- return response.json()
 
 
 
16
 
17
- # Streamlit app
18
- def main():
19
- st.title("SQL Query Generator")
20
 
21
- # User prompt input
22
- prompt = st.text_input("Enter your prompt:")
23
-
24
- # Button to generate SQL query
25
- if st.button("Generate SQL Query"):
26
- # Generate payload for Hugging Face API
27
- payload = {"inputs": prompt}
28
-
29
- # Query the Hugging Face API
30
- with st.spinner('Generating SQL query...'):
31
- output = query(payload)
32
-
33
- # Display the SQL query response
34
- if "generated_text" in output:
35
- st.write("Generated SQL Query:")
36
- st.code(output["generated_text"])
37
- else:
38
- st.error("Failed to generate SQL query. Please try again.")
39
-
40
- if __name__ == "__main__":
41
- main()
 
1
+ import gradio as gr
2
+ from transformers import AutoTokenizer, AutoModelForCausalLM
 
3
 
4
+ # Load language model
5
+ tokenizer = AutoTokenizer.from_pretrained("meta-llama/Meta-Llama-3-8B")
6
+ model = AutoModelForCausalLM.from_pretrained("meta-llama/Meta-Llama-3-8B")
7
 
8
+ # Function to generate SQL queries
9
+ def generate_sql_query(prompt):
10
+ input_text = "generate SQL query: " + prompt
11
+ input_ids = tokenizer.encode(input_text, return_tensors="pt")
12
+ output = model.generate(input_ids, max_length=100, num_return_sequences=1, temperature=0.9)
13
+ generated_query = tokenizer.decode(output[0], skip_special_tokens=True)
14
+ return generated_query
15
 
16
+ # Gradio UI for chatbot
17
+ def generate_sql_query_interface(prompt):
18
+ if prompt:
19
+ generated_query = generate_sql_query(prompt)
20
+ return generated_query
21
+ else:
22
+ return "Please enter a prompt."
23
 
24
+ inputs = gr.inputs.Textbox(lines=5, label="Enter your prompt:")
25
+ output = gr.outputs.Textbox(label="Generated SQL Query:")
 
26
 
27
+ gr.Interface(fn=generate_sql_query_interface, inputs=inputs, outputs=output, title="SQL Query Generator Chatbot").launch()