Spaces:
Sleeping
Sleeping
Upload 2 files
Browse files- app.py +56 -54
- requirements.txt +2 -1
app.py
CHANGED
@@ -1,21 +1,24 @@
|
|
1 |
import gradio as gr
|
2 |
-
from
|
3 |
from langchain.prompts import PromptTemplate
|
4 |
-
from langchain.agents import
|
5 |
-
from langchain_community.
|
6 |
-
from langchain_community.callbacks import get_openai_callback # Import the callback
|
7 |
import os
|
8 |
import time
|
9 |
-
import
|
10 |
-
|
11 |
|
12 |
# Load environment variables
|
13 |
API_KEY = os.getenv("OPENAI_API_KEY")
|
|
|
|
|
|
|
14 |
if not API_KEY:
|
15 |
raise ValueError("Please set the OPENAI_API_KEY environment variable.")
|
16 |
|
17 |
-
|
18 |
-
|
|
|
19 |
if not PASSWORD:
|
20 |
raise ValueError("Please set the APP_PASSWORD environment variable.")
|
21 |
|
@@ -23,7 +26,7 @@ if not PASSWORD:
|
|
23 |
prompt_template = PromptTemplate(
|
24 |
input_variables=["marka_model", "automobil", "question"],
|
25 |
template="""
|
26 |
-
Respond in
|
27 |
|
28 |
We are currently discussing repair work for the {marka_model} model vehicle {automobil}. Please provide your expert opinion on the following question: {question}
|
29 |
|
@@ -49,66 +52,65 @@ Balance confidence with caution to ensure the safety and reliability of the comp
|
|
49 |
"""
|
50 |
)
|
51 |
|
52 |
-
|
53 |
# Initialize components with updated classes
|
54 |
llm = ChatOpenAI(
|
55 |
openai_api_key=API_KEY,
|
56 |
-
model_name="gpt-
|
57 |
-
temperature=0
|
58 |
)
|
59 |
|
60 |
-
|
61 |
-
|
62 |
-
name="DuckDuckGo Search",
|
63 |
-
func=duckduckgo_search.run,
|
64 |
-
description="Useful for fact-checking information using DuckDuckGo search. Input should be a search query."
|
65 |
-
)
|
66 |
|
67 |
-
# Initialize the
|
68 |
-
|
69 |
-
|
70 |
-
|
71 |
-
agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION,
|
72 |
-
verbose=True,
|
73 |
-
max_iterations=10,
|
74 |
-
)
|
75 |
|
76 |
def generate_response(marka_model, automobil, question, password):
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
77 |
if password != PASSWORD:
|
78 |
return {"error": "Incorrect password. Access denied."}
|
79 |
|
80 |
-
|
81 |
-
|
82 |
-
|
83 |
-
|
84 |
"marka_model": marka_model,
|
85 |
"automobil": automobil,
|
86 |
"question": question
|
87 |
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
88 |
}
|
89 |
-
response = agent.run(user_input)
|
90 |
-
# response = llm.invoke(f'Clean up the response and translate it to Russian if needed: {response}').content
|
91 |
-
end_time = time.perf_counter()
|
92 |
-
|
93 |
-
total_time = end_time - start_time
|
94 |
-
|
95 |
-
# Updated cost rates
|
96 |
-
costs = {'input': 0.150 / 1e6, 'output': 0.600 / 1e6}
|
97 |
-
|
98 |
-
total_cost = cb.prompt_tokens * costs['input'] + cb.completion_tokens * costs['output']
|
99 |
-
|
100 |
-
json_data = {
|
101 |
-
"response": response,
|
102 |
-
# "total_tokens": cb.total_tokens,
|
103 |
-
# "prompt_tokens": cb.prompt_tokens,
|
104 |
-
# "completion_tokens": cb.completion_tokens,
|
105 |
-
"total_cost": round(total_cost, 6),
|
106 |
-
"total_time_seconds": round(total_time, 4)
|
107 |
-
}
|
108 |
-
|
109 |
-
# Format response as Markdown
|
110 |
|
111 |
-
|
|
|
|
|
112 |
|
113 |
# Define example inputs
|
114 |
examples = [
|
@@ -145,10 +147,10 @@ iface = gr.Interface(
|
|
145 |
gr.Textbox(label="Car Make and Model", placeholder="Enter the car make and model"),
|
146 |
gr.Textbox(label="Vehicle", placeholder="Enter the vehicle"),
|
147 |
gr.Textbox(label="Your Question", placeholder="Enter your automotive repair question"),
|
148 |
-
gr.
|
149 |
],
|
150 |
outputs=[
|
151 |
-
|
152 |
gr.JSON(label="Performance Metrics")
|
153 |
],
|
154 |
title="Automotive Technical Assistant",
|
|
|
1 |
import gradio as gr
|
2 |
+
from langchain_openai import ChatOpenAI
|
3 |
from langchain.prompts import PromptTemplate
|
4 |
+
from langchain.agents import AgentExecutor, create_structured_chat_agent
|
5 |
+
from langchain_community.callbacks import get_openai_callback
|
|
|
6 |
import os
|
7 |
import time
|
8 |
+
from langchain_community.tools.tavily_search import TavilySearchResults
|
9 |
+
from langchain import hub
|
10 |
|
11 |
# Load environment variables
|
12 |
API_KEY = os.getenv("OPENAI_API_KEY")
|
13 |
+
TAVILY_API_KEY = os.getenv("TAVILY_API_KEY")
|
14 |
+
PASSWORD = os.getenv("APP_PASSWORD")
|
15 |
+
|
16 |
if not API_KEY:
|
17 |
raise ValueError("Please set the OPENAI_API_KEY environment variable.")
|
18 |
|
19 |
+
if not TAVILY_API_KEY:
|
20 |
+
raise ValueError("Please set the TAVILY_API_KEY environment variable.")
|
21 |
+
|
22 |
if not PASSWORD:
|
23 |
raise ValueError("Please set the APP_PASSWORD environment variable.")
|
24 |
|
|
|
26 |
prompt_template = PromptTemplate(
|
27 |
input_variables=["marka_model", "automobil", "question"],
|
28 |
template="""
|
29 |
+
Respond in systematically cleaned-up Markdown format and only in Russian: You are a virtual technical expert with years of experience in vehicle repair and maintenance. Your responses are intended for professional mechanics and contain only reliable and safe instructions that adhere to best practices and manufacturer recommendations.
|
30 |
|
31 |
We are currently discussing repair work for the {marka_model} model vehicle {automobil}. Please provide your expert opinion on the following question: {question}
|
32 |
|
|
|
52 |
"""
|
53 |
)
|
54 |
|
|
|
55 |
# Initialize components with updated classes
|
56 |
llm = ChatOpenAI(
|
57 |
openai_api_key=API_KEY,
|
58 |
+
model_name="gpt-4",
|
59 |
+
temperature=0,
|
60 |
)
|
61 |
|
62 |
+
tools = [TavilySearchResults(max_results=5)]
|
63 |
+
prompt = prompt_template
|
|
|
|
|
|
|
|
|
64 |
|
65 |
+
# Initialize the structured chat agent
|
66 |
+
prompt = hub.pull("hwchase17/structured-chat-agent")
|
67 |
+
agent = create_structured_chat_agent(llm, tools, prompt)
|
68 |
+
agent_executor = AgentExecutor(agent=agent, tools=tools)
|
|
|
|
|
|
|
|
|
69 |
|
70 |
def generate_response(marka_model, automobil, question, password):
|
71 |
+
"""
|
72 |
+
Generates a response based on the user's automotive repair question.
|
73 |
+
|
74 |
+
Args:
|
75 |
+
marka_model (str): Car make and model.
|
76 |
+
automobil (str): Vehicle details.
|
77 |
+
question (str): Automotive repair question.
|
78 |
+
password (str): Password for accessing the app.
|
79 |
+
|
80 |
+
Returns:
|
81 |
+
tuple: Response in Markdown format and performance metrics in JSON.
|
82 |
+
"""
|
83 |
if password != PASSWORD:
|
84 |
return {"error": "Incorrect password. Access denied."}
|
85 |
|
86 |
+
try:
|
87 |
+
with get_openai_callback() as cb:
|
88 |
+
start_time = time.perf_counter()
|
89 |
+
user_input = {
|
90 |
"marka_model": marka_model,
|
91 |
"automobil": automobil,
|
92 |
"question": question
|
93 |
}
|
94 |
+
formatted_prompt = prompt_template.format(**user_input)
|
95 |
+
response = agent_executor.invoke({"input": formatted_prompt})
|
96 |
+
response = llm.invoke(f'Clean up the response and translate it to Russian if needed: {response}').content
|
97 |
+
end_time = time.perf_counter()
|
98 |
+
|
99 |
+
total_time = end_time - start_time
|
100 |
+
|
101 |
+
# Updated cost rates
|
102 |
+
costs = {'input': 0.150 / 1e6, 'output': 0.600 / 1e6}
|
103 |
+
|
104 |
+
total_cost = cb.prompt_tokens * costs['input'] + cb.completion_tokens * costs['output']
|
105 |
+
|
106 |
+
json_data = {
|
107 |
+
"total_cost": round(total_cost, 6),
|
108 |
+
"total_time_seconds": round(total_time, 4)
|
109 |
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
110 |
|
111 |
+
return response, json_data
|
112 |
+
except Exception as e:
|
113 |
+
return {"error": str(e)}, {}
|
114 |
|
115 |
# Define example inputs
|
116 |
examples = [
|
|
|
147 |
gr.Textbox(label="Car Make and Model", placeholder="Enter the car make and model"),
|
148 |
gr.Textbox(label="Vehicle", placeholder="Enter the vehicle"),
|
149 |
gr.Textbox(label="Your Question", placeholder="Enter your automotive repair question"),
|
150 |
+
gr.Password(label="Password", placeholder="Enter the password to access the app"),
|
151 |
],
|
152 |
outputs=[
|
153 |
+
gr.Markdown(label="Answer"),
|
154 |
gr.JSON(label="Performance Metrics")
|
155 |
],
|
156 |
title="Automotive Technical Assistant",
|
requirements.txt
CHANGED
@@ -3,4 +3,5 @@ requests
|
|
3 |
langchain
|
4 |
langchain-community
|
5 |
openai
|
6 |
-
|
|
|
|
3 |
langchain
|
4 |
langchain-community
|
5 |
openai
|
6 |
+
tavily-python
|
7 |
+
|