File size: 4,924 Bytes
8535875
 
678fc41
8535875
c138338
678fc41
 
 
 
 
8535875
c138338
 
 
 
 
 
 
 
 
 
 
678fc41
c138338
8535875
678fc41
 
 
8535875
678fc41
c138338
678fc41
 
 
 
 
 
8535875
c138338
 
 
 
 
 
 
 
678fc41
 
c138338
 
678fc41
8535875
678fc41
8535875
c138338
0f519d6
 
 
 
 
 
 
 
 
 
c138338
 
 
 
 
 
 
 
0f519d6
c138338
0f519d6
 
c138338
 
 
 
 
 
 
 
 
 
 
 
 
 
 
0f519d6
c138338
 
0f519d6
c138338
 
 
 
0f519d6
 
 
8535875
0f519d6
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
c138338
0f519d6
 
 
 
c138338
0f519d6
 
8535875
678fc41
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
import gradio as gr
import numpy as np
import cv2

def create_dot_effect(image, dot_size=10, spacing=2, invert=False):
    # Convert to grayscale if image is color
    if len(image.shape) == 3:
        gray = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)
    else:
        gray = image
    
    # Apply adaptive thresholding to improve contrast
    gray = cv2.adaptiveThreshold(
        gray,
        255,
        cv2.ADAPTIVE_THRESH_GAUSSIAN_C,
        cv2.THRESH_BINARY,
        25,  # Block size
        5    # Constant subtracted from mean
    )
    
    # Create a blank canvas with white background if inverted
    height, width = gray.shape
    canvas = np.zeros_like(gray) if not invert else np.full_like(gray, 255)
    
    # Calculate number of dots based on spacing
    y_dots = range(0, height, dot_size + spacing)
    x_dots = range(0, width, dot_size + spacing)
    
    # Create dots based on brightness
    dot_color = 255 if not invert else 0
    for y in y_dots:
        for x in x_dots:
            # Get the average brightness of the region
            region = gray[y:min(y+dot_size, height), x:min(x+dot_size, width)]
            if region.size > 0:
                brightness = np.mean(region)
                
                # Dynamic dot sizing based on brightness
                relative_brightness = brightness / 255.0
                if invert:
                    relative_brightness = 1 - relative_brightness
                
                # Draw circle with size proportional to brightness
                radius = int((dot_size/2) * relative_brightness)
                if radius > 0:
                    cv2.circle(canvas, 
                             (x + dot_size//2, y + dot_size//2), 
                             radius, 
                             (dot_color), 
                             -1)
    
    return canvas

def process_video(video_path, dot_size=10, spacing=2, invert=False):
    # Read the video
    cap = cv2.VideoCapture(video_path)
    if not cap.isOpened():
        return None

    # Get video properties
    fps = int(cap.get(cv2.CAP_PROP_FPS))
    frame_width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
    frame_height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
    
    # Calculate target dimensions (max 720p for performance)
    max_height = 720
    if frame_height > max_height:
        scale = max_height / frame_height
        frame_width = int(frame_width * scale)
        frame_height = max_height
    
    # Create temporary output file
    output_path = "temp_output.mp4"
    fourcc = cv2.VideoWriter_fourcc(*'avc1')
    out = cv2.VideoWriter(output_path, fourcc, fps, (frame_width, frame_height), False)

    try:
        while cap.isOpened():
            ret, frame = cap.read()
            if not ret:
                break
            
            # Resize frame if needed
            if frame.shape[0] > max_height:
                frame = cv2.resize(frame, (frame_width, frame_height))
            
            # Convert BGR to RGB for processing
            frame_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
            
            # Apply dot effect
            dotted_frame = create_dot_effect(frame_rgb, dot_size, spacing, invert)
            
            # Write the frame
            out.write(dotted_frame)

    finally:
        # Ensure resources are released
        cap.release()
        out.release()
    
    return output_path

# Create Gradio interface
with gr.Blocks(title="ChatGPT Ad Maker") as iface:
    gr.Markdown("# ChatGPT Ad Maker")
    gr.Markdown("Convert your image or video into a dotted pattern. Adjust dot size and spacing using the sliders.")
    
    with gr.Tab("Image"):
        image_input = gr.Image(label="Input Image")
        with gr.Row():
            img_dot_size = gr.Slider(minimum=2, maximum=20, value=10, step=1, label="Dot Size")
            img_spacing = gr.Slider(minimum=0, maximum=10, value=2, step=1, label="Dot Spacing")
        image_output = gr.Image(label="Dotted Output")
        image_button = gr.Button("Process Image")
        image_button.click(
            fn=create_dot_effect,
            inputs=[image_input, img_dot_size, img_spacing],
            outputs=image_output
        )
    
    with gr.Tab("Video"):
        video_input = gr.Video(label="Input Video")
        with gr.Row():
            vid_dot_size = gr.Slider(minimum=2, maximum=20, value=10, step=1, label="Dot Size")
            vid_spacing = gr.Slider(minimum=0, maximum=10, value=2, step=1, label="Dot Spacing")
            vid_invert = gr.Checkbox(label="Invert", value=False)
        video_output = gr.Video(label="Dotted Output", format="mp4")
        video_button = gr.Button("Process Video")
        video_button.click(
            fn=process_video,
            inputs=[video_input, vid_dot_size, vid_spacing, vid_invert],
            outputs=video_output
        )

if __name__ == "__main__":
    iface.launch()