Spaces:
Sleeping
Sleeping
File size: 3,458 Bytes
ddb0e33 bc7f367 b0397d2 bc7f367 2101247 603c4d7 5cbb24b 603c4d7 bc7f367 603c4d7 2101247 603c4d7 2101247 603c4d7 bc7f367 603c4d7 61993c7 2101247 7fb780e 603c4d7 05b01da 2101247 603c4d7 2101247 603c4d7 2101247 603c4d7 2101247 603c4d7 2101247 603c4d7 2101247 603c4d7 2101247 b05e484 603c4d7 b05e484 2101247 b0397d2 2101247 603c4d7 2101247 de24765 7fb780e |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 |
import os
import json
import gradio as gr
from ultralytics import YOLO
from PIL import Image, ImageDraw
import torch
from langchain_google_genai.chat_models import ChatGoogleGenerativeAI
# Load model YOLOv8
model = YOLO("yolov8n.pt")
# Load credentials (stringified JSON) from environment variable for Gemini
credentials_string = os.environ.get("GOOGLE_APPLICATION_CREDENTIALS")
if not credentials_string:
raise ValueError("GOOGLE_APPLICATION_CREDENTIALS is not set in the environment!")
# Parse the stringified JSON back to a Python dictionary
credentials = json.loads(credentials_string)
# Save the credentials to a temporary JSON file (required by Google SDKs)
with open("service_account.json", "w") as f:
json.dump(credentials, f)
# Set the GOOGLE_APPLICATION_CREDENTIALS environment variable to the temporary file
os.environ["GOOGLE_APPLICATION_CREDENTIALS"] = "service_account.json"
# Initialize Gemini model (chatbot)
llm = ChatGoogleGenerativeAI(model='gemini-1.5-pro')
# Global chat history
chat_history = []
# Fungsi untuk chatting dengan chatbot
def chat_with_bot(message):
global chat_history
response = llm.predict(message) # Menggunakan Gemini untuk menghasilkan respon
bot_response = f"Bot: {response}"
chat_history.append((message, bot_response))
return chat_history
# Fungsi untuk menganalisis gambar
def analyze_image(image_path):
global chat_history
try:
# Load gambar
image = Image.open(image_path).convert("RGB")
# Prediksi objek dalam gambar
results = model(image)
# Ambil hasil deteksi
detected_objects = []
image_draw = image.copy()
draw = ImageDraw.Draw(image_draw)
for result in results:
for box in result.boxes.data:
x1, y1, x2, y2, score, class_id = box.tolist()
if score > 0.5: # Hanya tampilkan objek dengan confidence score > 0.5
class_name = model.names[int(class_id)]
detected_objects.append(f"{class_name} (score: {score:.2f})")
draw.rectangle([x1, y1, x2, y2], outline="red", width=3)
draw.text((x1, y1), class_name, fill="red")
if detected_objects:
bot_response = f"Objects detected: {', '.join(detected_objects)}."
else:
bot_response = "No objects detected."
chat_history.append(("Uploaded an image for analysis", bot_response))
return image_draw, chat_history
except Exception as e:
error_msg = f"Error processing the image: {str(e)}"
chat_history.append(("Error during image analysis", error_msg))
return None, chat_history
# Bangun antarmuka Gradio
with gr.Blocks() as demo:
gr.Markdown("# Ken Chatbot")
gr.Markdown("Ask me anything or upload an image for analysis!")
chatbot = gr.Chatbot(elem_id="chatbot")
msg = gr.Textbox(label="Type your message here...", placeholder="Enter your message...", show_label=False)
send_btn = gr.Button("Send")
img_upload = gr.Image(type="filepath", label="Upload an image for analysis")
img_output = gr.Image(label="Detected Objects")
msg.submit(chat_with_bot, msg, chatbot)
send_btn.click(chat_with_bot, msg, chatbot)
send_btn.click(lambda: "", None, msg) # Clear input field
img_upload.change(analyze_image, img_upload, [img_output, chatbot])
demo.launch()
|