File size: 1,818 Bytes
0cef02f
 
2d6fc22
0cef02f
2d6fc22
f1e3f4b
2d6fc22
0cef02f
f1e3f4b
2d6fc22
f1e3f4b
ee84b3c
9cbc798
 
85da776
2d6fc22
 
85da776
f1e3f4b
 
2d6fc22
 
9cbc798
 
2d6fc22
 
 
 
 
 
9cbc798
2d6fc22
 
 
f1e3f4b
 
 
 
 
 
 
9cbc798
f1e3f4b
 
 
9cbc798
2ec7210
f1e3f4b
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
import gradio as gr
import cv2
import torch
import numpy as np
from diffusers import StableDiffusionImg2ImgPipeline
from transformers import AutoProcessor, AutoModel
from PIL import Image

# Load the Real-Time Latent Consistency Model
device = "cuda" if torch.cuda.is_available() else "cpu"
realtime_pipe = StableDiffusionImg2ImgPipeline.from_pretrained("radames/Real-Time-Latent-Consistency-Model").to(device)

def process_frame(frame, prompt="A futuristic landscape"):
    """Process a single frame using the real-time latent consistency model."""
    
    # Convert frame to PIL image
    image = Image.fromarray(cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)).resize((512, 512))
    
    # Apply Real-Time Latent Consistency Model
    result = realtime_pipe(prompt=prompt, image=image, strength=0.5, guidance_scale=7.5).images[0]
    return np.array(result)

def video_stream(prompt):
    """Captures video feed from webcam and sends to the AI model."""
    cap = cv2.VideoCapture(0)
    while cap.isOpened():
        ret, frame = cap.read()
        if not ret:
            break
        
        frame = process_frame(frame, prompt)
        yield frame  # Return processed frame
    cap.release()

# Create Gradio App
with gr.Blocks() as demo:
    gr.Markdown("## 🎨 Real-Time AI-Enhanced Webcam using Latent Consistency Model")
    
    with gr.Row():
        webcam_feed = gr.Camera(streaming=True, label="Live Webcam")
        processed_image = gr.Image(label="AI-Enhanced Webcam Feed")
        canvas = gr.Image(interactive=True, label="Canvas - Edit Processed Image")
    
    prompt_input = gr.Textbox(label="Real-Time Latent Consistency Model Prompt", value="A futuristic landscape")
    
    webcam_feed.change(fn=video_stream, inputs=[prompt_input], outputs=[processed_image, canvas])

demo.launch(share=True)