Reshmarb commited on
Commit
fbd8044
·
1 Parent(s): 0f110d5

project added

Browse files
Files changed (1) hide show
  1. app.py +20 -400
app.py CHANGED
@@ -1,268 +1,4 @@
1
- # from groq import Groq
2
- # import gradio as gr
3
- # from gtts import gTTS
4
- # import uuid
5
- # import base64
6
- # from io import BytesIO
7
- # import os
8
- # import logging
9
- # import spacy
10
- # from transformers import pipeline
11
-
12
- # # Set up logger
13
- # logger = logging.getLogger(__name__)
14
- # logger.setLevel(logging.DEBUG)
15
- # console_handler = logging.StreamHandler()
16
- # file_handler = logging.FileHandler('chatbot_log.log')
17
- # formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
18
- # console_handler.setFormatter(formatter)
19
- # file_handler.setFormatter(formatter)
20
- # logger.addHandler(console_handler)
21
- # logger.addHandler(file_handler)
22
-
23
- # # Initialize Groq Client
24
- # #client = Groq(api_key=os.getenv("GROQ_API_KEY_2"))
25
-
26
- # client = Groq(
27
- # api_key="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",
28
- # )
29
-
30
- # # Initialize spaCy NLP model for named entity recognition (NER)
31
- # nlp = spacy.load("en_core_web_sm")
32
-
33
- # # Initialize sentiment analysis model using Hugging Face
34
- # sentiment_analyzer = pipeline("sentiment-analysis")
35
-
36
- # # Function to preprocess user input for better NLP understanding
37
- # def preprocess_input(user_input):
38
- # # Clean up text (remove unnecessary characters, standardize)
39
- # user_input = user_input.strip().lower()
40
- # return user_input
41
-
42
- # # Function for sentiment analysis (optional)
43
- # def analyze_sentiment(user_input):
44
- # result = sentiment_analyzer(user_input)
45
- # return result[0]['label'] # Positive, Negative, or Neutral
46
-
47
- # # Function to extract medical entities from input using NER
48
-
49
- # symptoms = [
50
- # "fever", "cough", "headache", "nausea", "pain", "fatigue", "dizziness",
51
- # "shortness of breath", "sore throat", "runny nose", "congestion", "diarrhea",
52
- # "vomiting", "chills", "sweating", "loss of appetite", "insomnia",
53
- # "itching", "rash", "swelling", "bleeding", "burning sensation",
54
- # "weakness", "tingling", "numbness", "muscle cramps", "joint pain",
55
- # "blurred vision", "double vision", "dry eyes", "sensitivity to light",
56
- # "difficulty breathing", "palpitations", "chest pain", "back pain",
57
- # "stomach ache", "abdominal pain", "weight loss", "weight gain",
58
- # "frequent urination", "difficulty urinating", "anxiety", "depression",
59
- # "irritability", "confusion", "memory loss", "bruising"
60
- # ]
61
- # diseases = [
62
- # "diabetes", "cancer", "asthma", "flu", "pneumonia", "hypertension",
63
- # "arthritis", "bronchitis", "migraine", "stroke", "heart attack",
64
- # "coronary artery disease", "tuberculosis", "malaria", "dengue",
65
- # "hepatitis", "anemia", "thyroid disease", "eczema", "psoriasis",
66
- # "osteoporosis", "parkinson's", "alzheimer's", "depression",
67
- # "anxiety disorder", "schizophrenia", "epilepsy", "bipolar disorder",
68
- # "chronic kidney disease", "liver cirrhosis", "HIV", "AIDS",
69
- # "covid-19", "cholera", "smallpox", "measles", "mumps",
70
- # "rubella", "whooping cough", "obesity", "GERD", "IBS",
71
- # "celiac disease", "ulcerative colitis", "Crohn's disease",
72
- # "sleep apnea", "hypothyroidism", "hyperthyroidism"
73
- # ]
74
-
75
-
76
- # # Function to extract medical entities
77
- # def extract_medical_entities(user_input):
78
- # user_input = preprocess_input(user_input)
79
- # medical_entities = []
80
- # for word in user_input.split():
81
- # if word in symptoms or word in diseases:
82
- # medical_entities.append(word)
83
- # return medical_entities
84
- # # def extract_medical_entities(user_input):
85
- # # doc = nlp(user_input)
86
- # # medical_entities = [ent.text for ent in doc.ents if ent.label_ == "SYMPTOM" or ent.label_ == "DISEASE"]
87
- # # print(medical_entities)
88
- # # print("This is doc",doc)
89
- # # return medical_entities
90
-
91
- # # Function to encode the image
92
- # def encode_image(uploaded_image):
93
- # try:
94
- # logger.debug("Encoding image...")
95
- # buffered = BytesIO()
96
- # uploaded_image.save(buffered, format="PNG")
97
- # logger.debug("Image encoding complete.")
98
- # return base64.b64encode(buffered.getvalue()).decode("utf-8")
99
- # except Exception as e:
100
- # logger.error(f"Error encoding image: {e}")
101
- # raise
102
-
103
- # # Initialize messages
104
- # def initialize_messages():
105
- # return [{"role": "system",
106
- # "content": '''You are Dr. HealthBuddy, a professional, empathetic,
107
- # and knowledgeable virtual doctor chatbot. Your purpose is to provide health information,
108
- # symptom guidance, and lifestyle tips using the uploaded dataset as a reference for common
109
- # symptoms and associated conditions.
110
-
111
- # Utilize the dataset to provide information about symptoms and possible conditions for educational purposes.
112
- # If a symptom matches data in the dataset, offer users relevant insights, and suggest general management strategies.
113
- # Clearly communicate that you are not a substitute for professional medical advice.
114
- # Encourage users to consult a licensed healthcare provider for any severe or persistent health issues.
115
- # Maintain a friendly and understanding tone in all responses.
116
- # Examples:
117
-
118
- # User: "I have skin rash and itching. What could it be?"
119
- # Response: "According to the data, skin rash and itching are common symptoms of conditions like fungal infections.
120
- # You can try keeping the affected area dry and clean, and using over-the-counter antifungal creams.
121
- # If the rash persists or worsens, please consult a dermatologist."
122
-
123
- # User: "What might cause nodal skin eruptions?"
124
- # Response: "Nodal skin eruptions could be linked to conditions such as fungal infections.
125
- # It's best to monitor the symptoms and avoid scratching.
126
- # For a proper diagnosis, consider visiting a healthcare provider.'''}]
127
-
128
-
129
- # messages = initialize_messages()
130
-
131
- # def customLLMBot(user_input, uploaded_image, chat_history):
132
- # try:
133
- # global messages
134
- # logger.info("Processing input...")
135
-
136
- # # Preprocess the user input
137
- # user_input = preprocess_input(user_input)
138
-
139
- # # Analyze sentiment (Optional)
140
- # sentiment = analyze_sentiment(user_input)
141
- # logger.info(f"Sentiment detected: {sentiment}")
142
-
143
- # # Extract medical entities (Optional)
144
- # medical_entities = extract_medical_entities(user_input)
145
- # logger.info(f"Extracted medical entities: {medical_entities}")
146
-
147
- # # Append user input to the chat history
148
- # chat_history.append(("user", user_input))
149
-
150
- # if uploaded_image is not None:
151
- # # Encode the image to base64
152
- # base64_image = encode_image(uploaded_image)
153
-
154
- # logger.debug(f"Image received, size: {len(base64_image)} bytes")
155
-
156
- # # Create a message for the image prompt
157
- # messages_image = [
158
- # {
159
- # "role": "user",
160
- # "content": [
161
- # {"type": "text", "text": "What's in this image?"},
162
- # {"type": "image_url", "image_url": {"url": f"data:image/png;base64,{base64_image}"}}
163
- # ]
164
- # }
165
- # ]
166
-
167
- # logger.info("Sending image to Groq API for processing...")
168
- # response = client.chat.completions.create(
169
- # model="llama-3.2-11b-vision-preview",
170
- # messages=messages_image,
171
- # )
172
- # logger.info("Image processed successfully.")
173
- # else:
174
- # # Process text input
175
- # logger.info("Processing text input...")
176
- # messages.append({
177
- # "role": "user",
178
- # "content": user_input
179
- # })
180
- # response = client.chat.completions.create(
181
- # model="llama-3.2-11b-vision-preview",
182
- # messages=messages,
183
- # )
184
- # logger.info("Text processed successfully.")
185
-
186
- # # Extract the reply
187
- # LLM_reply = response.choices[0].message.content
188
- # logger.debug(f"LLM reply: {LLM_reply}")
189
-
190
- # # Append the bot's response to the chat history
191
- # chat_history.append(("bot", LLM_reply))
192
- # messages.append({"role": "assistant", "content": LLM_reply})
193
-
194
- # # Generate audio for response
195
- # audio_file = f"response_{uuid.uuid4().hex}.mp3"
196
- # tts = gTTS(LLM_reply, lang='en')
197
- # tts.save(audio_file)
198
- # logger.info(f"Audio response saved as {audio_file}")
199
-
200
- # # Return chat history and audio file
201
- # return chat_history, audio_file
202
-
203
- # except Exception as e:
204
- # logger.error(f"Error in customLLMBot function: {e}")
205
- # return [("user", user_input or "Image uploaded"), ("bot", f"An error occurred: {e}")], None
206
-
207
- # # Gradio Interface
208
- # def chatbot_ui():
209
- # with gr.Blocks() as demo:
210
- # gr.Markdown("# Healthcare Chatbot Doctor")
211
-
212
- # # State for user chat history
213
- # chat_history = gr.State([])
214
-
215
- # # Layout for chatbot and input box alignment
216
- # with gr.Row():
217
- # with gr.Column(scale=3): # Main column for chatbot
218
- # chatbot = gr.Chatbot(label="Responses", elem_id="chatbot")
219
- # user_input = gr.Textbox(
220
- # label="Ask a health-related question",
221
- # placeholder="Describe your symptoms...",
222
- # elem_id="user-input",
223
- # lines=1,
224
- # )
225
- # with gr.Column(scale=1): # Side column for image and buttons
226
- # uploaded_image = gr.Image(label="Upload an Image", type="pil")
227
- # submit_btn = gr.Button("Submit")
228
- # clear_btn = gr.Button("Clear")
229
- # audio_output = gr.Audio(label="Audio Response")
230
-
231
- # # Define actions
232
- # def handle_submit(user_query, image, history):
233
- # logger.info("User submitted a query.")
234
- # response, audio = customLLMBot(user_query, image, history)
235
- # return response, audio, None, "", history # Clear the image after submission
236
-
237
- # # Submit on pressing Enter key
238
- # user_input.submit(
239
- # handle_submit,
240
- # inputs=[user_input, uploaded_image, chat_history],
241
- # outputs=[chatbot, audio_output, uploaded_image, user_input, chat_history],
242
- # )
243
-
244
- # # Submit on button click
245
- # submit_btn.click(
246
- # handle_submit,
247
- # inputs=[user_input, uploaded_image, chat_history],
248
- # outputs=[chatbot, audio_output, uploaded_image, user_input, chat_history],
249
- # )
250
-
251
- # # Action for clearing all fields
252
- # clear_btn.click(
253
- # lambda: ([], "", None, []),
254
- # inputs=[],
255
- # outputs=[chatbot, user_input, uploaded_image, chat_history],
256
- # )
257
-
258
- # return demo
259
-
260
- # # Launch the interface
261
- # #chatbot_ui().launch(server_name="0.0.0.0", server_port=7860)
262
-
263
- # chatbot_ui().launch(server_name="localhost", server_port=7860)
264
-
265
-
266
  from groq import Groq
267
  import gradio as gr
268
  from gtts import gTTS
@@ -274,15 +10,14 @@ import logging
274
  import spacy
275
  from transformers import pipeline
276
  import torch
277
- from PIL import Image
 
278
  from torchvision import transforms
279
  import pathlib
280
- import cv2 # Import OpenCV
281
- import numpy as np
282
 
283
- # # Pathlib adjustment for Windows compatibility
284
- # temp = pathlib.PosixPath
285
- # pathlib.PosixPath = pathlib.WindowsPath
286
 
287
  # Set up logger
288
  logger = logging.getLogger(__name__)
@@ -295,24 +30,14 @@ file_handler.setFormatter(formatter)
295
  logger.addHandler(console_handler)
296
  logger.addHandler(file_handler)
297
 
298
- #Initialize Groq Client
299
  client = Groq(api_key=os.getenv("GROQ_API_KEY_2"))
300
 
301
- # # Initialize Groq Client
302
- # client = Groq(api_key="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")
303
-
304
- # # Initialize spaCy NLP model for named entity recognition (NER)
305
- # nlp = spacy.load("en_core_web_sm")
306
-
307
 
308
-
309
- # Download the spaCy model if it doesn't exist
310
- try:
311
- nlp = spacy.load("en_core_web_sm")
312
- except OSError:
313
- print("Downloading 'en_core_web_sm' model...")
314
- spacy.cli.download("en_core_web_sm")
315
- nlp = spacy.load("en_core_web_sm")
316
 
317
  # Initialize sentiment analysis model using Hugging Face
318
  sentiment_analyzer = pipeline("sentiment-analysis")
@@ -389,36 +114,8 @@ def encode_image(uploaded_image):
389
 
390
  # Initialize messages
391
  def initialize_messages():
392
- return [{"role": "system",
393
- "content": '''You are Dr. SkinCare, a highly experienced and professional virtual dermatologist chatbot with over 40 years of expertise in diagnosing and managing skin conditions. You provide accurate, empathetic, and actionable advice on skin-related concerns, including rashes, acne, infections, and chronic skin diseases. Your goal is to offer clear explanations, practical solutions, and guidance on when to seek in-person care from a dermatologist.
394
-
395
- You only respond to skin-related inquiries and strive to provide the best possible guidance. Your responses should include:
396
- 1. A clear explanation of the possible condition(s) based on the symptoms described.
397
- 2. Practical, actionable steps for managing the issue at home (if applicable).
398
- 3. A recommendation to consult a dermatologist for persistent, severe, or unclear symptoms.
399
-
400
- Maintain a friendly, professional, and empathetic tone in all interactions. Always emphasize that you are not a substitute for professional medical advice, diagnosis, or treatment.
401
-
402
- **Prompt Template:**
403
- - Input: Patient’s skin-related concerns, including symptoms, questions, or specific issues they mention.
404
- - Response: Start with a polite acknowledgment of the patient’s concern. Provide a clear, concise explanation of the possible condition(s) and suggest practical, actionable steps. If needed, advise on when to consult a dermatologist.
405
-
406
- **Examples:**
407
 
408
- - User: "I have a red, itchy rash on my arm. What could it be?"
409
- Response: "Hello! A red, itchy rash on your arm could be caused by several conditions, such as eczema, contact dermatitis, or a fungal infection. Try keeping the area clean and moisturized, and avoid scratching. Over-the-counter hydrocortisone cream or antihistamines may help. If the rash persists or worsens, please consult a dermatologist for a proper diagnosis."
410
-
411
- - User: "I have small, raised bumps on my face. What might this be?"
412
- Response: "Hi there! Small, raised bumps on your face could be due to acne, closed comedones, or even a mild allergic reaction. Avoid using harsh skincare products and consider using a gentle cleanser and non-comedogenic moisturizer. If the bumps don’t improve or spread, it’s best to see a dermatologist for further evaluation."
413
-
414
- - User: "I’ve noticed dark patches on my skin. What could cause this?"
415
- Response: "Hello! Dark patches on the skin, known as hyperpigmentation, can be caused by sun exposure, hormonal changes, or conditions like melasma. Use a broad-spectrum sunscreen daily and consider skincare products with ingredients like vitamin C or niacinamide. For persistent or concerning patches, consult a dermatologist to rule out underlying conditions."
416
-
417
- - User: "I have a mole that looks irregular. Should I be worried?"
418
- Response: "Hi. Irregular moles can sometimes be a sign of skin changes that need attention. Keep an eye on the mole for changes in size, shape, or color, and avoid exposing it to excessive sunlight. It’s important to have it checked by a dermatologist to rule out any serious concerns, such as skin cancer."
419
-
420
- Always maintain a compassionate tone, provide educational insights, and stress that you are not a substitute for professional medical advice. Encourage users to consult a dermatologist for any serious, persistent, or unclear skin concerns.'''
421
- }]
422
  messages = initialize_messages()
423
 
424
  # Function for image prediction using YOLOv5
@@ -428,9 +125,13 @@ def predict_image(image):
428
  if image is None:
429
  return "Error: No image uploaded.", "No description available."
430
 
431
- # Convert PIL image to NumPy array (OpenCV format)
432
  image_np = np.array(image) # Convert PIL image to NumPy array
433
 
 
 
 
 
434
  # Convert RGB to BGR (OpenCV uses BGR by default)
435
  image_np = cv2.cvtColor(image_np, cv2.COLOR_RGB2BGR)
436
 
@@ -439,8 +140,8 @@ def predict_image(image):
439
 
440
  # Transform the image for the model
441
  transform = transforms.Compose([
442
- transforms.ToTensor(), # Convert image to tensor
443
- transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]), # Normalize
444
  ])
445
  im = transform(image_resized).unsqueeze(0) # Add batch dimension (BCHW)
446
 
@@ -465,10 +166,6 @@ def predict_image(image):
465
  prediction_result = f"Predicted Class ID: {predicted_class_id}\nConfidence: {confidence_score:.4f}"
466
  description = "No description available."
467
 
468
- # Display the image with OpenCV (optional)
469
- cv2.imshow("Processed Image", image_resized)
470
- cv2.waitKey(1) # Wait for 1 ms to display the image
471
-
472
  return prediction_result, description
473
 
474
  except Exception as e:
@@ -485,83 +182,6 @@ def get_description(class_name):
485
  }
486
  return descriptions.get(class_name.lower(), "No description available.")
487
 
488
- # Custom LLM Bot Function
489
- def customLLMBot(user_input, uploaded_image, chat_history):
490
- try:
491
- global messages
492
- logger.info("Processing input...")
493
-
494
- # Preprocess the user input
495
- user_input = preprocess_input(user_input)
496
-
497
- # Analyze sentiment (Optional)
498
- sentiment = analyze_sentiment(user_input)
499
- logger.info(f"Sentiment detected: {sentiment}")
500
-
501
- # Extract medical entities (Optional)
502
- medical_entities = extract_medical_entities(user_input)
503
- logger.info(f"Extracted medical entities: {medical_entities}")
504
-
505
- # Append user input to the chat history
506
- chat_history.append(("user", user_input))
507
-
508
- if uploaded_image is not None:
509
- # Encode the image to base64
510
- base64_image = encode_image(uploaded_image)
511
-
512
- logger.debug(f"Image received, size: {len(base64_image)} bytes")
513
-
514
- # Create a message for the image prompt
515
- messages_image = [
516
- {
517
- "role": "user",
518
- "content": [
519
- {"type": "text", "text": "What's in this image?"},
520
- {"type": "image_url", "image_url": {"url": f"data:image/png;base64,{base64_image}"}}
521
- ]
522
- }
523
- ]
524
-
525
- logger.info("Sending image to Groq API for processing...")
526
- response = client.chat.completions.create(
527
- model="llama-3.2-11b-vision-preview",
528
- messages=messages_image,
529
- )
530
- logger.info("Image processed successfully.")
531
- else:
532
- # Process text input
533
- logger.info("Processing text input...")
534
- messages.append({
535
- "role": "user",
536
- "content": user_input
537
- })
538
- response = client.chat.completions.create(
539
- model="llama-3.2-11b-vision-preview",
540
- messages=messages,
541
- )
542
- logger.info("Text processed successfully.")
543
-
544
- # Extract the reply
545
- LLM_reply = response.choices[0].message.content
546
- logger.debug(f"LLM reply: {LLM_reply}")
547
-
548
- # Append the bot's response to the chat history
549
- chat_history.append(("bot", LLM_reply))
550
- messages.append({"role": "assistant", "content": LLM_reply})
551
-
552
- # Generate audio for response
553
- audio_file = f"response_{uuid.uuid4().hex}.mp3"
554
- tts = gTTS(LLM_reply, lang='en')
555
- tts.save(audio_file)
556
- logger.info(f"Audio response saved as {audio_file}")
557
-
558
- # Return chat history and audio file
559
- return chat_history, audio_file
560
-
561
- except Exception as e:
562
- logger.error(f"Error in customLLMBot function: {e}")
563
- return [("user", user_input or "Image uploaded"), ("bot", f"An error occurred: {e}")], None
564
-
565
  # Gradio Interface
566
  def chatbot_ui():
567
  with gr.Blocks() as demo:
@@ -654,7 +274,7 @@ def chatbot_ui():
654
  return demo
655
 
656
  # Launch the interface
657
- # chatbot_ui().launch(server_name="localhost", server_port=7860)
658
 
659
  # Launch the interface
660
  chatbot_ui().launch(server_name="0.0.0.0", server_port=7860)
 
1
+ # Import necessary libraries
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2
  from groq import Groq
3
  import gradio as gr
4
  from gtts import gTTS
 
10
  import spacy
11
  from transformers import pipeline
12
  import torch
13
+ import cv2
14
+ import numpy as np
15
  from torchvision import transforms
16
  import pathlib
 
 
17
 
18
+ # Pathlib adjustment for Windows compatibility
19
+ temp = pathlib.PosixPath
20
+ pathlib.PosixPath = pathlib.WindowsPath
21
 
22
  # Set up logger
23
  logger = logging.getLogger(__name__)
 
30
  logger.addHandler(console_handler)
31
  logger.addHandler(file_handler)
32
 
33
+ # Initialize Groq Client
34
  client = Groq(api_key=os.getenv("GROQ_API_KEY_2"))
35
 
36
+ # Initialize Groq Client
37
+ #client = Groq(api_key="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")
 
 
 
 
38
 
39
+ # Initialize spaCy NLP model for named entity recognition (NER)
40
+ nlp = spacy.load("en_core_web_sm")
 
 
 
 
 
 
41
 
42
  # Initialize sentiment analysis model using Hugging Face
43
  sentiment_analyzer = pipeline("sentiment-analysis")
 
114
 
115
  # Initialize messages
116
  def initialize_messages():
117
+ return [{"role": "system", "content": '''You are Dr. HealthBuddy, a professional, empathetic, and knowledgeable virtual doctor chatbot.'''}]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
118
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
119
  messages = initialize_messages()
120
 
121
  # Function for image prediction using YOLOv5
 
125
  if image is None:
126
  return "Error: No image uploaded.", "No description available."
127
 
128
+ # Convert PIL image to NumPy array
129
  image_np = np.array(image) # Convert PIL image to NumPy array
130
 
131
+ # Handle grayscale images
132
+ if len(image_np.shape) == 2: # Grayscale image
133
+ image_np = cv2.cvtColor(image_np, cv2.COLOR_GRAY2RGB)
134
+
135
  # Convert RGB to BGR (OpenCV uses BGR by default)
136
  image_np = cv2.cvtColor(image_np, cv2.COLOR_RGB2BGR)
137
 
 
140
 
141
  # Transform the image for the model
142
  transform = transforms.Compose([
143
+ transforms.ToTensor(),
144
+ transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
145
  ])
146
  im = transform(image_resized).unsqueeze(0) # Add batch dimension (BCHW)
147
 
 
166
  prediction_result = f"Predicted Class ID: {predicted_class_id}\nConfidence: {confidence_score:.4f}"
167
  description = "No description available."
168
 
 
 
 
 
169
  return prediction_result, description
170
 
171
  except Exception as e:
 
182
  }
183
  return descriptions.get(class_name.lower(), "No description available.")
184
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
185
  # Gradio Interface
186
  def chatbot_ui():
187
  with gr.Blocks() as demo:
 
274
  return demo
275
 
276
  # Launch the interface
277
+ #chatbot_ui().launch(server_name="localhost", server_port=7860)
278
 
279
  # Launch the interface
280
  chatbot_ui().launch(server_name="0.0.0.0", server_port=7860)