ChatGPT-ad-Maker / chatgpt-ad-maker.py
akhaliq's picture
akhaliq HF staff
Update chatgpt-ad-maker.py
c138338 verified
import gradio as gr
import numpy as np
import cv2
def create_dot_effect(image, dot_size=10, spacing=2, invert=False):
# Convert to grayscale if image is color
if len(image.shape) == 3:
gray = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)
else:
gray = image
# Apply adaptive thresholding to improve contrast
gray = cv2.adaptiveThreshold(
gray,
255,
cv2.ADAPTIVE_THRESH_GAUSSIAN_C,
cv2.THRESH_BINARY,
25, # Block size
5 # Constant subtracted from mean
)
# Create a blank canvas with white background if inverted
height, width = gray.shape
canvas = np.zeros_like(gray) if not invert else np.full_like(gray, 255)
# Calculate number of dots based on spacing
y_dots = range(0, height, dot_size + spacing)
x_dots = range(0, width, dot_size + spacing)
# Create dots based on brightness
dot_color = 255 if not invert else 0
for y in y_dots:
for x in x_dots:
# Get the average brightness of the region
region = gray[y:min(y+dot_size, height), x:min(x+dot_size, width)]
if region.size > 0:
brightness = np.mean(region)
# Dynamic dot sizing based on brightness
relative_brightness = brightness / 255.0
if invert:
relative_brightness = 1 - relative_brightness
# Draw circle with size proportional to brightness
radius = int((dot_size/2) * relative_brightness)
if radius > 0:
cv2.circle(canvas,
(x + dot_size//2, y + dot_size//2),
radius,
(dot_color),
-1)
return canvas
def process_video(video_path, dot_size=10, spacing=2, invert=False):
# Read the video
cap = cv2.VideoCapture(video_path)
if not cap.isOpened():
return None
# Get video properties
fps = int(cap.get(cv2.CAP_PROP_FPS))
frame_width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
frame_height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
# Calculate target dimensions (max 720p for performance)
max_height = 720
if frame_height > max_height:
scale = max_height / frame_height
frame_width = int(frame_width * scale)
frame_height = max_height
# Create temporary output file
output_path = "temp_output.mp4"
fourcc = cv2.VideoWriter_fourcc(*'avc1')
out = cv2.VideoWriter(output_path, fourcc, fps, (frame_width, frame_height), False)
try:
while cap.isOpened():
ret, frame = cap.read()
if not ret:
break
# Resize frame if needed
if frame.shape[0] > max_height:
frame = cv2.resize(frame, (frame_width, frame_height))
# Convert BGR to RGB for processing
frame_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
# Apply dot effect
dotted_frame = create_dot_effect(frame_rgb, dot_size, spacing, invert)
# Write the frame
out.write(dotted_frame)
finally:
# Ensure resources are released
cap.release()
out.release()
return output_path
# Create Gradio interface
with gr.Blocks(title="ChatGPT Ad Maker") as iface:
gr.Markdown("# ChatGPT Ad Maker")
gr.Markdown("Convert your image or video into a dotted pattern. Adjust dot size and spacing using the sliders.")
with gr.Tab("Image"):
image_input = gr.Image(label="Input Image")
with gr.Row():
img_dot_size = gr.Slider(minimum=2, maximum=20, value=10, step=1, label="Dot Size")
img_spacing = gr.Slider(minimum=0, maximum=10, value=2, step=1, label="Dot Spacing")
image_output = gr.Image(label="Dotted Output")
image_button = gr.Button("Process Image")
image_button.click(
fn=create_dot_effect,
inputs=[image_input, img_dot_size, img_spacing],
outputs=image_output
)
with gr.Tab("Video"):
video_input = gr.Video(label="Input Video")
with gr.Row():
vid_dot_size = gr.Slider(minimum=2, maximum=20, value=10, step=1, label="Dot Size")
vid_spacing = gr.Slider(minimum=0, maximum=10, value=2, step=1, label="Dot Spacing")
vid_invert = gr.Checkbox(label="Invert", value=False)
video_output = gr.Video(label="Dotted Output", format="mp4")
video_button = gr.Button("Process Video")
video_button.click(
fn=process_video,
inputs=[video_input, vid_dot_size, vid_spacing, vid_invert],
outputs=video_output
)
if __name__ == "__main__":
iface.launch()