import os
from pyChatGPT import ChatGPT

os.system("pip install -U gradio")

import sys
import gradio as gr

os.system(
    "pip install detectron2 -f https://dl.fbaipublicfiles.com/detectron2/wheels/cu102/torch1.9/index.html"
)

# clone and install Detic
os.system(
    "git clone https://github.com/facebookresearch/Detic.git --recurse-submodules"
)
os.chdir("Detic")

# Install detectron2
import torch

# Some basic setup:
# Setup detectron2 logger
import detectron2
from detectron2.utils.logger import setup_logger

setup_logger()

# import some common libraries
import sys
import numpy as np
import os, json, cv2, random

# import some common detectron2 utilities
from detectron2 import model_zoo
from detectron2.engine import DefaultPredictor
from detectron2.config import get_cfg
from detectron2.utils.visualizer import Visualizer
from detectron2.data import MetadataCatalog, DatasetCatalog

# Detic libraries
sys.path.insert(0, "third_party/CenterNet2/projects/CenterNet2/")
sys.path.insert(0, "third_party/CenterNet2/")
from centernet.config import add_centernet_config
from detic.config import add_detic_config
from detic.modeling.utils import reset_cls_test

from PIL import Image

# Build the detector and download our pretrained weights
cfg = get_cfg()
add_centernet_config(cfg)
add_detic_config(cfg)
cfg.MODEL.DEVICE = "cpu"
cfg.merge_from_file("configs/Detic_LCOCOI21k_CLIP_SwinB_896b32_4x_ft4x_max-size.yaml")
cfg.MODEL.WEIGHTS = "https://dl.fbaipublicfiles.com/detic/Detic_LCOCOI21k_CLIP_SwinB_896b32_4x_ft4x_max-size.pth"
cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = 0.5  # set threshold for this model
cfg.MODEL.ROI_BOX_HEAD.ZEROSHOT_WEIGHT_PATH = "rand"
cfg.MODEL.ROI_HEADS.ONE_CLASS_PER_PROPOSAL = (
    True  # For better visualization purpose. Set to False for all classes.
)
predictor = DefaultPredictor(cfg)

# Setup the model's vocabulary using build-in datasets

BUILDIN_CLASSIFIER = {
    "lvis": "datasets/metadata/lvis_v1_clip_a+cname.npy",
    "objects365": "datasets/metadata/o365_clip_a+cnamefix.npy",
    "openimages": "datasets/metadata/oid_clip_a+cname.npy",
    "coco": "datasets/metadata/coco_clip_a+cname.npy",
}

BUILDIN_METADATA_PATH = {
    "lvis": "lvis_v1_val",
    "objects365": "objects365_v2_val",
    "openimages": "oid_val_expanded",
    "coco": "coco_2017_val",
}

vocabulary = "lvis"  # change to 'lvis', 'objects365', 'openimages', or 'coco'
metadata = MetadataCatalog.get(BUILDIN_METADATA_PATH[vocabulary])
classifier = BUILDIN_CLASSIFIER[vocabulary]
num_classes = len(metadata.thing_classes)
reset_cls_test(predictor.model, classifier, num_classes)



def inference(img,unique_only):

    im = cv2.imread(img)

    outputs = predictor(im)
    v = Visualizer(im[:, :, ::-1], metadata)
    out = v.draw_instance_predictions(outputs["instances"].to("cpu"))

    detected_objects = []
    object_list_str = []

    box_locations = outputs["instances"].pred_boxes
    box_loc_screen = box_locations.tensor.cpu().numpy()
    unique_object_dict = {}
    for i, box_coord in enumerate(box_loc_screen):
        x0, y0, x1, y1 = box_coord
        width = x1 - x0
        height = y1 - y0
        predicted_label = metadata.thing_classes[outputs["instances"].pred_classes[i]]
        detected_objects.append(
            {
                "prediction": predicted_label,
                "x": int(x0),
                "y": int(y0),
                "w": int(width),
                "h": int(height),
            }
        )
        if ((not unique_only) or  (unique_only and predicted_label not in unique_object_dict)):
            object_list_str.append(
            f"{predicted_label} - X:{int(x0)} Y: {int(y0)} Width: {int(width)} Height: {int(height)}"
            )
            unique_object_dict[predicted_label] = 1


    output_str = "Imagine you are a blind but intelligent image captioner who is only given the X,Y coordinates and width, height of  each object in a scene with no specific attributes of the objects themselves. Create a description of the scene using the relative positions and sizes of objects\n"
    for line in object_list_str:
        output_str += line + "\n"

    return (
        Image.fromarray(np.uint8(out.get_image())).convert("RGB"),
        output_str
    )


with gr.Blocks() as demo:
    gr.Markdown("<div style=\"font-size:22; color: #2f2f2f; text-align: center\"><b>Detic for ChatGPT</b></div> <i>")
    gr.Markdown("<div style=\"font-size:12; color: #6f6f6f; text-align: center\"><i>A duplicated tweak of  <a href=\"https://huggingface.co/spaces/taesiri/DeticChatGPT\">taesiri's Dectic/ChatGPT demo</a></i>")
    gr.Markdown("Use Detic to detect objects in an image and then copy/paste output text into your ChatGPT playground.")
    
    with gr.Column():
        inp = gr.Image(label="Input Image", type="filepath")
        chk = gr.Checkbox(label="Unique Objects only? (useful to reduce ChatGPT input to speed up its reponse and also eliminate timeouts")
        btn_detic = gr.Button("Run Detic for ChatGPT")
    with gr.Column():
        outviz = gr.Image(label="Visualization", type="pil")
        output_desc = gr.Textbox(label="Description for using in ChatGPT", lines=5)
        # outputjson = gr.JSON(label="Detected Objects")

    btn_detic.click(fn=inference, inputs=[inp,chk], outputs=[outviz, output_desc])

demo.launch()