aaronmat1905 commited on
Commit
6581e69
·
verified ·
1 Parent(s): 60291bf
Files changed (1) hide show
  1. app.py +41 -94
app.py CHANGED
@@ -1,106 +1,53 @@
1
- import gradio as gr
2
- import pandas as pd
3
- import google.generativeai as genai
4
- import kagglehub
5
  import os
 
 
 
6
 
7
- # Download the Kaggle dataset
8
- path = kagglehub.dataset_download("fahmidachowdhury/food-adulteration-dataset")
9
-
10
- # List the files in the dataset folder and assign the first one (assuming it's the desired file)
11
- dataset_file = os.listdir(path)[0]
12
- path = os.path.join(path, dataset_file)
13
-
14
- # Configure Google Gemini API
15
- # gemapi = os.getenv("GeminiApi")
16
- gemapi = "AIzaSyAmDOBWfGuEju0oZyUIcn_H0k8XW0cTP7k"
17
- genai.configure(api_key=gemapi)
18
-
19
- # Load the dataset
20
- data = pd.read_csv(path)
21
-
22
- # Define the system instructions for the model
23
- system_instruction = f"""
24
- You are a public assistant who specializes in food safety. You look at data and explain to the user any question they ask; here is your data: {str(data.to_json())}
25
- You are also a food expert in the Indian context. You act as a representative of the government or public agencies, always keeping the needs of the people at the forefront.
26
- You will try to help the customer launch a feedback review whenever they complain. You are to prepare a "markdown" report, which is detailed and can be sent to the company or restaurant.
27
- In case of a complaint or a grievance, you will act like a detective gathering necessary information from the user until you are satisfied. Once you gather all the info, you are supposed to generate a markdown report.
28
- Once the customer asks you to show them the markdown report, you will use the information given to you to generate it.
29
- You will ask the customer a single question at a time, which is relevant, and you will not repeat another question until you've generated the report.
30
- """
31
-
32
- # Initialize the model
33
- model_path = "gemini-1.5-flash"
34
- FoodSafetyAssistant = genai.GenerativeModel(model_path, system_instruction=system_instruction)
35
-
36
- # Track chat history globally
37
- chat_history = []
38
 
39
- # Define the function to handle the chat
40
- def respond(usertxt, chat_history):
41
- # Initialize chat with the previous history
42
- chat = FoodSafetyAssistant.start_chat(history=chat_history)
43
-
44
- # Get response from the assistant
45
- response = chat.send_message(usertxt)
46
-
47
- # Append both user input and response to the chat history for context in the next interaction
48
- chat_history.append({"role": "user", "content": usertxt})
49
- chat_history.append({"role": "assistant", "content": response.text})
50
-
51
- return response.text, chat_history
52
 
53
- # Gradio interface
54
  def gradio_chat(usertxt, chat_history):
55
- response, updated_history = respond(usertxt, chat_history)
56
- return response, updated_history
 
 
 
 
 
 
 
 
 
 
 
 
 
57
  html_content = """
58
- <div style="background-color:#f9f9f9; padding:20px; border-radius:10px;">
59
- <!-- Project Title and Problem Statement Section -->
60
- <h1 style="color:#34495e;">Food Safety Assistant</h1>
61
- <h3 style="color:#2c3e50;">Your AI-Powered Assistant for Food Safety</h3>
62
-
63
- <!-- Short Intro About AI-Chat -->
64
- <p style="color:#7f8c8d;">
65
- Our platform allows consumers to report potential food safety violations, validate reports through AI, and notify local authorities. This proactive approach fosters community involvement in ensuring food integrity.
66
- </p>
67
-
68
- <!-- Core Functionalities Title -->
69
- <h4 style="color:#e74c3c; text-align:center;">Core Functionalities</h4>
70
-
71
- <!-- Functionality Boxes in a Flex Layout -->
72
- <div style="display:flex; justify-content: space-around; align-items:center; margin-top:20px;">
73
- <!-- Functionality 1 -->
74
- <div style="border: 2px solid #3498db; border-radius: 15px; padding: 20px; width: 150px; text-align: center;">
75
- <h4 style="color:#2980b9;">Report Issues</h4>
76
- <p style="color:#7f8c8d; font-size: 12px;">Submit details like the restaurant name and the issue, anonymously.</p>
77
- </div>
78
-
79
- <!-- Functionality 2 -->
80
- <div style="border: 2px solid #3498db; border-radius: 15px; padding: 20px; width: 150px; text-align: center;">
81
- <h4 style="color:#2980b9;">AI Validation</h4>
82
- <p style="color:#7f8c8d; font-size: 12px;">Validate reports using AI, ensuring accuracy and preventing duplicates.</p>
83
- </div>
84
-
85
- <!-- Functionality 3 -->
86
- <div style="border: 2px solid #3498db; border-radius: 15px; padding: 20px; width: 150px; text-align: center;">
87
- <h4 style="color:#2980b9;">Alerts</h4>
88
- <p style="color:#7f8c8d; font-size: 12px;">Notify authorities of repeated issues via email or SMS.</p>
89
- </div>
90
-
91
- <!-- Functionality 4 -->
92
- <div style="border: 2px solid #3498db; border-radius: 15px; padding: 20px; width: 150px; text-align: center;">
93
- <h4 style="color:#2980b9;">Data Chat</h4>
94
- <p style="color:#7f8c8d; font-size: 12px;">Enable real-time discussion between consumers and authorities.</p>
95
- </div>
96
- </div>
97
- </div>
98
  """
99
 
100
- # Create a Gradio interface
101
  with gr.Blocks() as demo:
102
  gr.HTML(html_content)
103
- chatbot = gr.ChatInterface(fn=gradio_chat)
 
 
 
 
 
 
104
 
105
- # Launch the interface
106
  demo.launch()
 
 
 
 
 
1
  import os
2
+ import kaggle
3
+ import google.generativeai as gemini
4
+ import gradio as gr
5
 
6
+ # Ensure that Kaggle API is authenticated correctly
7
+ try:
8
+ kaggle.api.authenticate()
9
+ except Exception as e:
10
+ print(f"Kaggle API authentication failed: {str(e)}")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
11
 
12
+ # Ensure that the Google Gemini API key is set properly
13
+ gemini_api_key = os.getenv("GEMINI_API_KEY")
14
+ if not gemini_api_key:
15
+ print("Error: GEMINI_API_KEY environment variable is not set.")
16
+ else:
17
+ gemini.configure(api_key=gemini_api_key)
 
 
 
 
 
 
 
18
 
19
+ # Function to handle the chat interaction
20
  def gradio_chat(usertxt, chat_history):
21
+ try:
22
+ # Initialize chat session with previous history
23
+ chat = gemini.ChatModel.start_chat(history=chat_history)
24
+ # Send user message to the Gemini model
25
+ response = chat.send_message(usertxt)
26
+ # Append user and assistant's responses to the chat history
27
+ chat_history.append({"role": "user", "content": usertxt})
28
+ chat_history.append({"role": "assistant", "content": response.text})
29
+ return chat_history, chat_history
30
+ except Exception as e:
31
+ error_message = f"Error occurred: {str(e)}"
32
+ chat_history.append({"role": "assistant", "content": error_message})
33
+ return chat_history, chat_history
34
+
35
+ # HTML content for Gradio interface (you can customize this as needed)
36
  html_content = """
37
+ <h1>Food Safety Inspection Hub Prototype</h1>
38
+ <p>Chat with our AI-powered assistant to report food safety concerns and interact with authorities.</p>
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
39
  """
40
 
41
+ # Define the Gradio interface
42
  with gr.Blocks() as demo:
43
  gr.HTML(html_content)
44
+ chatbot = gr.Chatbot()
45
+ user_input = gr.Textbox(placeholder="Enter your message here...")
46
+ chat_history = gr.State([])
47
+ submit_btn = gr.Button("Submit")
48
+
49
+ # When submit button is clicked, trigger the chat function
50
+ submit_btn.click(gradio_chat, inputs=[user_input, chat_history], outputs=[chatbot, chat_history])
51
 
52
+ # Launch the Gradio interface
53
  demo.launch()