import gradio as gr
import numpy as np
import torch
from PIL import Image
from segment_anything import SamPredictor, sam_model_registry, SamAutomaticMaskGenerator

from transformers import pipeline

import colorsys

sam_checkpoint = "sam_vit_h_4b8939.pth"
model_type = "vit_h"
device = "cuda" if torch.cuda.is_available() else "cpu"


#sam = sam_model_registry[model_type](checkpoint=sam_checkpoint)
#sam.to(device=device)
#predictor = SamPredictor(sam)
#mask_generator = SamAutomaticMaskGenerator(sam)

generator = pipeline(model="facebook/sam-vit-base", task="mask-generation", points_per_batch=256)
#image_url = "https://huggingface.co/ybelkada/segment-anything/resolve/main/assets/car.png"

# controlnet, controlnet_params = FlaxControlNetModel.from_pretrained(
#     "SAMControlNet/sd-controlnet-sam-seg", dtype=jnp.float32
# )

# pipe, params = FlaxStableDiffusionControlNetPipeline.from_pretrained(
#     "runwayml/stable-diffusion-v1-5",
#     controlnet=controlnet,
#     revision="flax",
#     dtype=jnp.bfloat16,
# )

# params["controlnet"] = controlnet_params
# p_params = replicate(params)


with gr.Blocks() as demo:
    gr.Markdown("# Ahsans version WildSynth: Synthetic Wildlife Data Generation")
    gr.Markdown(
        """
        ## Work in Progress
        ### About
        
        ### How To Use
        
    """
    )
    with gr.Row():
        input_img = gr.Image(label="Input", type="pil")
        mask_img = gr.Image(label="Mask", interactive=False)
        output_img = gr.Image(label="Output", interactive=False)

    with gr.Row():
        submit = gr.Button("Submit")
        clear = gr.Button("Clear")

    def generate_mask(image):
        outputs = generator(image, points_per_batch=256)
        mask_images = []
        #for mask in outputs["masks"]:
        #    color = np.concatenate([np.random.random(3), np.array([1.0])], axis=0)
        #    h, w = mask.shape[-2:]
        #    mask_image = mask.reshape(h, w, 1) * color.reshape(1, 1, -1)
        #    np_img = mask_image;
        #    np_img = np.squeeze(np_img, axis=2)  # axis=2 is channel dimension 
        #    pil_img = Image.fromarray(np_img, 'RGB')
        #    mask_images.append(pil_img)
        
        #return np.stack(mask_images)
        return image

    # def infer(
    #     image, prompts, negative_prompts, num_inference_steps=50, seed=4, num_samples=4
    # ):
    #     try:
    #         rng = jax.random.PRNGKey(int(seed))
    #         num_inference_steps = int(num_inference_steps)
    #         image = Image.fromarray(image, mode="RGB")
    #         num_samples = max(jax.device_count(), int(num_samples))
    #         p_rng = jax.random.split(rng, jax.device_count())

    #         prompt_ids = pipe.prepare_text_inputs([prompts] * num_samples)
    #         negative_prompt_ids = pipe.prepare_text_inputs(
    #             [negative_prompts] * num_samples
    #         )
    #         processed_image = pipe.prepare_image_inputs([image] * num_samples)

    #         prompt_ids = shard(prompt_ids)
    #         negative_prompt_ids = shard(negative_prompt_ids)
    #         processed_image = shard(processed_image)

    #         output = pipe(
    #             prompt_ids=prompt_ids,
    #             image=processed_image,
    #             params=p_params,
    #             prng_seed=p_rng,
    #             num_inference_steps=num_inference_steps,
    #             neg_prompt_ids=negative_prompt_ids,
    #             jit=True,
    #         ).images

    #         del negative_prompt_ids
    #         del processed_image
    #         del prompt_ids

    #         output = output.reshape((num_samples,) + output.shape[-3:])
    #         final_image = [np.array(x * 255, dtype=np.uint8) for x in output]
    #         print(output.shape)
    #         del output

    #     except Exception as e:
    #         print("Error: " + str(e))
    #         final_image = [np.zeros((512, 512, 3), dtype=np.uint8)] * num_samples
    #     finally:
    #         gc.collect()
    #         return final_image

    # def _clear(sel_pix, img, mask, seg, out, prompt, neg_prompt, bg):
    #     img = None
    #     mask = None
    #     seg = None
    #     out = None
    #     prompt = ""
    #     neg_prompt = ""
    #     bg = False
    #     return img, mask, seg, out, prompt, neg_prompt, bg

    input_img.change(
        generate_mask,
        inputs=[input_img],
        outputs=[mask_img],
    )
    # submit.click(
    #     infer,
    #     inputs=[mask_img, prompt_text, negative_prompt_text],
    #     outputs=[output_img],
    # )
    # clear.click(
    #     _clear,
    #     inputs=[
    #         input_img,
    #         mask_img,
    #         output_img,
    #         prompt_text,
    #         negative_prompt_text,
    #     ],
    #     outputs=[
    #         input_img,
    #         mask_img,
    #         output_img,
    #         prompt_text,
    #         negative_prompt_text,
    #     ],
    # )

if __name__ == "__main__":
    demo.queue()
    demo.launch()