|
import gradio as gr |
|
from transformers import AutoTokenizer, AutoModelForCausalLM |
|
from transformers import AutoModel |
|
from transformers import AutoProcessor |
|
|
|
|
|
model = AutoModel.from_pretrained("unum-cloud/uform-gen2-dpo", trust_remote_code=True) |
|
processor = AutoProcessor.from_pretrained("unum-cloud/uform-gen2-dpo", trust_remote_code=True) |
|
|
|
|
|
def generate_sql_query(prompt): |
|
input_text = "generate SQL query: " + prompt |
|
input_ids = tokenizer.encode(input_text, return_tensors="pt") |
|
output = model.generate(input_ids, max_length=100, num_return_sequences=1, temperature=0.9) |
|
generated_query = tokenizer.decode(output[0], skip_special_tokens=True) |
|
return generated_query |
|
|
|
|
|
def generate_sql_query_interface(prompt): |
|
if prompt: |
|
generated_query = generate_sql_query(prompt) |
|
return generated_query |
|
else: |
|
return "Please enter a prompt." |
|
|
|
inputs = gr.inputs.Textbox(lines=5, label="Enter your prompt:") |
|
output = gr.outputs.Textbox(label="Generated SQL Query:") |
|
|
|
gr.Interface(fn=generate_sql_query_interface, inputs=inputs, outputs=output, title="SQL Query Generator Chatbot").launch() |
|
|