File size: 12,107 Bytes
17593c4
173e2fe
1ec0b16
 
 
d897c87
 
78b8ee2
d897c87
b410607
 
 
17593c4
fbd8044
b410607
 
 
c6a3095
17593c4
 
 
 
d32256a
27eeeac
d897c87
 
 
 
 
 
 
 
 
fbd8044
173e2fe
 
17593c4
 
402b569
17593c4
b9f6329
 
 
 
 
 
 
 
 
 
 
 
 
b410607
 
 
 
17593c4
 
 
b410607
17593c4
b410607
17593c4
 
 
 
b410607
 
 
17593c4
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
b410607
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
fa529e8
59b7150
d897c87
 
 
 
b410607
d32256a
d897c87
 
 
fa529e8
173e2fe
b410607
 
fbd8044
0f110d5
b410607
173e2fe
b410607
 
 
17593c4
b410607
 
 
17593c4
 
 
 
 
 
 
 
 
 
b410607
17593c4
b410607
 
 
fbd8044
 
b410607
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
173e2fe
b410607
3aa9f69
b410607
 
 
 
 
 
 
 
 
 
17593c4
b410607
 
 
d897c87
4dc0555
1b0a98b
3aa9f69
17593c4
 
98205e6
 
17593c4
1c66968
17593c4
5bff428
d897c87
 
 
 
 
 
17593c4
d897c87
 
 
 
 
17593c4
b410607
17593c4
b410607
 
 
 
 
17593c4
b410607
 
 
17593c4
 
b410607
 
17593c4
 
b410607
 
17593c4
98205e6
173e2fe
98205e6
b410607
 
17593c4
b410607
 
d897c87
17593c4
d897c87
 
98205e6
b410607
caff306
 
17593c4
d897c87
 
98205e6
b410607
b36b54e
 
17593c4
1b4e41d
98205e6
1b4e41d
98205e6
1b4e41d
b1e86a8
17593c4
b410607
 
 
17593c4
b410607
 
17593c4
b410607
 
 
 
 
 
1ec0b16
b1186d2
b410607
17593c4
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
# Import necessary libraries
from groq import Groq
import gradio as gr
from gtts import gTTS
import uuid
import base64
from io import BytesIO
import os
import logging
import spacy
from transformers import pipeline
import torch
import cv2
import numpy as np
from torchvision import transforms
import pathlib


# Pathlib adjustment for Windows compatibility
temp = pathlib.PosixPath
pathlib.PosixPath = pathlib.WindowsPath

# Set up logger
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
console_handler = logging.StreamHandler()
file_handler = logging.FileHandler('chatbot_log.log')
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
console_handler.setFormatter(formatter)
file_handler.setFormatter(formatter)
logger.addHandler(console_handler)
logger.addHandler(file_handler)

# Initialize Groq Client
client = Groq(api_key=os.getenv("GROQ_API_KEY_2"))

# Initialize Groq Client
#client = Groq(api_key="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")

# Initialize spaCy NLP model for named entity recognition (NER)
import spacy

# Download the model if it's not already installed
try:
    nlp = spacy.load("en_core_web_sm")
except OSError:
    print("Downloading 'en_core_web_sm' model...")
    import os
    os.system("python -m spacy download en_core_web_sm")
    nlp = spacy.load("en_core_web_sm")

# Your code continues here
print("Model loaded successfully!")

# Initialize sentiment analysis model using Hugging Face
sentiment_analyzer = pipeline("sentiment-analysis")

import torch
import os

def load_yolov5_model():
    # Load model from Hugging Face Hub or local path
    model = torch.hub.load(
        'ultralytics/yolov5',  # Use the official YOLOv5 repo
        'custom',
        path='models/best.pt',  # Relative path to the model file
        source='local'  # Change to 'github' if loading from the official repo
    )
    return model

# Example usage
if __name__ == "__main__":
    model = load_yolov5_model()
    print("Model loaded successfully!")


# Load pre-trained YOLOv5 model
# def load_yolov5_model():
#     model = torch.hub.load(
#         r'C:\Users\RESHMA R B\OneDrive\Documents\Desktop\project_without_malayalam\chatbot2\yolov5',
#         'custom',
#         path=r"C:\Users\RESHMA R B\OneDrive\Documents\Desktop\project_without_malayalam\chatbot2\models\best.pt",
#         source="local"
#     )
#     model.eval()
#     return model

model = load_yolov5_model()

# Function to preprocess user input for better NLP understanding
def preprocess_input(user_input):
    user_input = user_input.strip().lower()
    return user_input

# Function for sentiment analysis (optional)
def analyze_sentiment(user_input):
    result = sentiment_analyzer(user_input)
    return result[0]['label']

# Function to extract medical entities from input using NER
symptoms = [
    "fever", "cough", "headache", "nausea", "pain", "fatigue", "dizziness",
    "shortness of breath", "sore throat", "runny nose", "congestion", "diarrhea",
    "vomiting", "chills", "sweating", "loss of appetite", "insomnia", 
    "itching", "rash", "swelling", "bleeding", "burning sensation",
    "weakness", "tingling", "numbness", "muscle cramps", "joint pain",
    "blurred vision", "double vision", "dry eyes", "sensitivity to light",
    "difficulty breathing", "palpitations", "chest pain", "back pain",
    "stomach ache", "abdominal pain", "weight loss", "weight gain",
    "frequent urination", "difficulty urinating", "anxiety", "depression",
    "irritability", "confusion", "memory loss", "bruising"
]
diseases = [
    "diabetes", "cancer", "asthma", "flu", "pneumonia", "hypertension",
    "arthritis", "bronchitis", "migraine", "stroke", "heart attack",
    "coronary artery disease", "tuberculosis", "malaria", "dengue",
    "hepatitis", "anemia", "thyroid disease", "eczema", "psoriasis",
    "osteoporosis", "parkinson's", "alzheimer's", "depression",
    "anxiety disorder", "schizophrenia", "epilepsy", "bipolar disorder",
    "chronic kidney disease", "liver cirrhosis", "HIV", "AIDS",
    "covid-19", "cholera", "smallpox", "measles", "mumps", 
    "rubella", "whooping cough", "obesity", "GERD", "IBS", 
    "celiac disease", "ulcerative colitis", "Crohn's disease", 
    "sleep apnea", "hypothyroidism", "hyperthyroidism"
]

def extract_medical_entities(user_input):
    user_input = preprocess_input(user_input)
    medical_entities = []
    for word in user_input.split():
        if word in symptoms or word in diseases:
            medical_entities.append(word)
    return medical_entities

# Function to encode the image
def encode_image(uploaded_image):
    try:
        logger.debug("Encoding image...")
        buffered = BytesIO()
        uploaded_image.save(buffered, format="PNG")
        logger.debug("Image encoding complete.")
        return base64.b64encode(buffered.getvalue()).decode("utf-8")
    except Exception as e:
        logger.error(f"Error encoding image: {e}")
        raise

# Initialize messages
def initialize_messages():
    return [{"role": "system", "content": '''You are Dr. HealthBuddy, a professional, empathetic, and knowledgeable virtual doctor chatbot.'''}]

messages = initialize_messages()

# Function for image prediction using YOLOv5
def predict_image(image):
    try:
        # Debug: Check if the image is None
        if image is None:
            return "Error: No image uploaded.", "No description available."

        # Convert PIL image to NumPy array
        image_np = np.array(image)  # Convert PIL image to NumPy array

        # Handle grayscale images
        if len(image_np.shape) == 2:  # Grayscale image
            image_np = cv2.cvtColor(image_np, cv2.COLOR_GRAY2RGB)

        # Convert RGB to BGR (OpenCV uses BGR by default)
        image_np = cv2.cvtColor(image_np, cv2.COLOR_RGB2BGR)

        # Resize the image to match the model's expected input size
        image_resized = cv2.resize(image_np, (224, 224))

        # Transform the image for the model
        transform = transforms.Compose([
            transforms.ToTensor(),
            transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
        ])
        im = transform(image_resized).unsqueeze(0)  # Add batch dimension (BCHW)

        # Get predictions
        with torch.no_grad():
            output = model(im)  # Raw model output (logits)

        # Apply softmax to get confidence scores
        softmax = torch.nn.Softmax(dim=1)
        probs = softmax(output)

        # Get the predicted class and its confidence score
        predicted_class_id = torch.argmax(probs, dim=1).item()
        confidence_score = probs[0, predicted_class_id].item()

        # Get predicted class name if available
        if hasattr(model, 'names'):
            class_name = model.names[predicted_class_id]
            prediction_result = f"Predicted Class: {class_name}\nConfidence: {confidence_score:.4f}"
            description = get_description(class_name)  # Function to get description
        else:
            prediction_result = f"Predicted Class ID: {predicted_class_id}\nConfidence: {confidence_score:.4f}"
            description = "No description available."

        return prediction_result, description

    except Exception as e:
        logger.error(f"Error in image prediction: {e}")
        return f"An error occurred during image prediction: {e}", "No description available."

# Function to get description based on predicted class
def get_description(class_name):
    descriptions = {
        "bcc": "Basal cell carcinoma (BCC) is a type of skin cancer that begins in the basal cells. It often appears as a slightly transparent bump on the skin, though it can take other forms. BCC grows slowly and is unlikely to spread to other parts of the body, but early treatment is important to prevent damage to surrounding tissues.",
        "atopic": "Atopic dermatitis is a chronic skin condition characterized by itchy, inflamed skin. It is common in individuals with a family history of allergies or asthma.",
        "acne": "Acne is a skin condition that occurs when hair follicles become clogged with oil and dead skin cells. It often causes pimples, blackheads, and whiteheads, and is most common among teenagers.",
        # Add more descriptions as needed
    }
    return descriptions.get(class_name.lower(), "No description available.")

# Gradio Interface
def chatbot_ui():
    with gr.Blocks() as demo:
        gr.Markdown("# Healthcare Chatbot Doctor")

        # State for user chat history
        chat_history = gr.State([])

        # Layout for chatbot and input box alignment
        with gr.Row():
            with gr.Column(scale=3):  # Main column for chatbot
                chatbot = gr.Chatbot(label="Responses", elem_id="chatbot")
                user_input = gr.Textbox(
                    label="Ask a health-related question",
                    placeholder="Describe your symptoms...",
                    elem_id="user-input",
                    lines=1,
                )
            with gr.Column(scale=1):  # Side column for image and buttons
                uploaded_image = gr.Image(label="Upload an Image", type="pil")
                submit_btn = gr.Button("Submit")
                clear_btn = gr.Button("Clear")
                audio_output = gr.Audio(label="Audio Response")

        # New section for image prediction (left and right layout)
        with gr.Row():
            # Left side: Upload image
            with gr.Column():
                gr.Markdown("### Upload Image for Prediction")
                prediction_image = gr.Image(label="Upload Image", type="pil")
                predict_btn = gr.Button("Predict")

            # Right side: Prediction result and description
            with gr.Column():
                gr.Markdown("### Prediction Result")
                prediction_output = gr.Textbox(label="Result", interactive=False)

                # Description column
                gr.Markdown("### Description")
                description_output = gr.Textbox(label="Description", interactive=False)

                # Clear button for prediction result (below description box)
                clear_prediction_btn = gr.Button("Clear Prediction")

        # Define actions
        def handle_submit(user_query, image, history):
            logger.info("User submitted a query.")
            response, audio = customLLMBot(user_query, image, history)
            return response, audio, None, "", history

        # Clear prediction result and image
        def clear_prediction(prediction_image, prediction_output, description_output):
            return None, "", ""

        # Submit on pressing Enter key
        user_input.submit(
            handle_submit,
            inputs=[user_input, uploaded_image, chat_history],
            outputs=[chatbot, audio_output, uploaded_image, user_input, chat_history],
        )

        # Submit on button click
        submit_btn.click(
            handle_submit,
            inputs=[user_input, uploaded_image, chat_history],
            outputs=[chatbot, audio_output, uploaded_image, user_input, chat_history],
        )

        # Action for clearing all fields
        clear_btn.click(
            lambda: ([], "", None, []),
            inputs=[],
            outputs=[chatbot, user_input, uploaded_image, chat_history],
        )

        # Action for image prediction
        predict_btn.click(
            predict_image,
            inputs=[prediction_image],
            outputs=[prediction_output, description_output],  # Update both outputs
        )

        # Action for clearing prediction result and image
        clear_prediction_btn.click(
            clear_prediction,
            inputs=[prediction_image, prediction_output, description_output],
            outputs=[prediction_image, prediction_output, description_output],
        )

    return demo

# Launch the interface
#chatbot_ui().launch(server_name="localhost", server_port=7860)

# Launch the interface
chatbot_ui().launch(server_name="0.0.0.0", server_port=7860)