Spaces:
Runtime error
Runtime error
import os | |
import openai | |
import gradio as gr | |
from langchain.chat_models import ChatOpenAI | |
from langchain.prompts import ChatPromptTemplate | |
from langchain.prompts import ChatPromptTemplate | |
from langchain.output_parsers import ResponseSchema | |
from langchain.output_parsers import StructuredOutputParser | |
os.environ['OPENAI_API_KEY'] = '' | |
openai.api_key = os.environ['OPENAI_API_KEY'] | |
# To control the randomness and creativity of the generated | |
# text by an LLM, use temperature = 0.0 | |
chat = ChatOpenAI(temperature=0.0) | |
chat | |
def get_format_instructions(): | |
gift_schema = ResponseSchema(name="gift", | |
description="Was the item purchased\ | |
as a gift for someone else? \ | |
Answer True if yes,\ | |
False if not or unknown.") | |
delivery_days_schema = ResponseSchema(name="delivery_days", | |
description="How many days\ | |
did it take for the product\ | |
to arrive? If this \ | |
information is not found,\ | |
output -1.") | |
price_value_schema = ResponseSchema(name="price_value", | |
description="Extract any\ | |
sentences about the value or \ | |
price, and output them as a \ | |
comma separated Python list.") | |
response_schemas = [gift_schema, | |
delivery_days_schema, | |
price_value_schema] | |
output_parser = StructuredOutputParser.from_response_schemas(response_schemas) | |
format_instructions = output_parser.get_format_instructions() | |
return format_instructions | |
# To retrieve ChatGPT response in the reqiuired style | |
def response(template_string, user_input, translate_style_parsing_inst): | |
prompt_template = ChatPromptTemplate.from_template(template_string) | |
customer_messages = prompt_template.format_messages( | |
style=translate_style_parsing_inst, | |
text=user_input) | |
# Call the LLM to translate the style or parse the customer message | |
customer_response = chat(customer_messages) | |
return customer_response.content, prompt_template.messages[0].prompt.input_variables | |
# To parse the ChatGPT response into a python dictionary | |
def parser_response(template_string_parse, user_input_parse): | |
format_instructions = get_format_instructions() | |
prompt = ChatPromptTemplate.from_template(template=template_string_parse) | |
messages = prompt.format_messages(text=user_input_parse, | |
format_instructions=format_instructions) | |
response = chat(messages) | |
output_dict = output_parser.parse(response.content) | |
return output_dict, type(output_dict) | |
demo = gr.Blocks() | |
title = """<h1 align="center">Gradio x Langchain - Models, Prompts, and Parsers</h1>""" | |
with demo: | |
gr.HTML(title) | |
with gr.Tab("Translate"): | |
with gr.Row(): | |
user_input = gr.Textbox(label="Enter user input for translation or parsing", lines=5, max_lines=5) | |
template_string = gr.Textbox(label="Enter your prompt here", lines=5, max_lines=5) | |
translate_style_parsing_inst = gr.Textbox(label="Enter the translation style of choice", lines=5, max_lines=5) | |
btn_response = gr.Button("ChatGPT Response").style(full_width=True) | |
with gr.Row(): | |
chat_response = gr.Textbox(label="Response from ChatGPT", lines=5, max_lines=5) | |
with gr.Column(): | |
template_variables = gr.Textbox(label="Input variables for your prompt") | |
with gr.Tab("Parse"): | |
with gr.Row(): | |
user_input_parse = gr.Textbox(label="Enter user input for translation or parsing", lines=5, max_lines=5) | |
template_string_parse = gr.Textbox(label="Enter your prompt here", lines=5, max_lines=5) | |
btn_response_parse = gr.Button("Parsed ChatGPT Response").style(full_width=True) | |
with gr.Row(): | |
with gr.Column(scale=5): | |
chat_response_parse = gr.Textbox(label="Get your ChatGPT response parsed as a dictionary (json)", lines=5, max_lines=5) | |
with gr.Column(scale=5): | |
type_parse_output = gr.Textbox(label="Datatype of this parsed output") | |
btn_response.click(response, [template_string, user_input, translate_style_parsing_inst], [chat_response, template_variables]) | |
btn_response_parse.click(parser_response, [template_string_parse, user_input_parse], [chat_response_parse, type_parse_output]) | |
demo.launch() #(debug=True) | |