akhaliq HF staff commited on
Commit
c138338
·
verified ·
1 Parent(s): 0f519d6

Update chatgpt-ad-maker.py

Browse files
Files changed (1) hide show
  1. chatgpt-ad-maker.py +57 -22
chatgpt-ad-maker.py CHANGED
@@ -2,22 +2,33 @@ import gradio as gr
2
  import numpy as np
3
  import cv2
4
 
5
- def create_dot_effect(image, dot_size=10, spacing=2):
6
  # Convert to grayscale if image is color
7
  if len(image.shape) == 3:
8
  gray = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)
9
  else:
10
  gray = image
11
 
12
- # Create a blank canvas
 
 
 
 
 
 
 
 
 
 
13
  height, width = gray.shape
14
- canvas = np.zeros_like(gray)
15
 
16
  # Calculate number of dots based on spacing
17
  y_dots = range(0, height, dot_size + spacing)
18
  x_dots = range(0, width, dot_size + spacing)
19
 
20
  # Create dots based on brightness
 
21
  for y in y_dots:
22
  for x in x_dots:
23
  # Get the average brightness of the region
@@ -25,17 +36,23 @@ def create_dot_effect(image, dot_size=10, spacing=2):
25
  if region.size > 0:
26
  brightness = np.mean(region)
27
 
28
- # Draw circle if the region is bright enough
29
- if brightness > 30: # Threshold can be adjusted
 
 
 
 
 
 
30
  cv2.circle(canvas,
31
  (x + dot_size//2, y + dot_size//2),
32
- dot_size//2,
33
- (255),
34
  -1)
35
 
36
  return canvas
37
 
38
- def process_video(video_path, dot_size=10, spacing=2):
39
  # Read the video
40
  cap = cv2.VideoCapture(video_path)
41
  if not cap.isOpened():
@@ -46,24 +63,41 @@ def process_video(video_path, dot_size=10, spacing=2):
46
  frame_width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
47
  frame_height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
48
 
49
- # Create temporary output file with mp4v codec
 
 
 
 
 
 
 
50
  output_path = "temp_output.mp4"
51
- fourcc = cv2.VideoWriter_fourcc(*'avc1') # Changed from 'mp4v' to 'avc1' (h264 codec)
52
  out = cv2.VideoWriter(output_path, fourcc, fps, (frame_width, frame_height), False)
53
 
54
- while cap.isOpened():
55
- ret, frame = cap.read()
56
- if not ret:
57
- break
 
 
 
 
 
 
 
 
 
 
 
58
 
59
- # Convert BGR to RGB for processing
60
- frame_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
61
- # Apply dot effect
62
- dotted_frame = create_dot_effect(frame_rgb, dot_size, spacing)
63
- out.write(dotted_frame)
64
 
65
- cap.release()
66
- out.release()
 
 
67
 
68
  return output_path
69
 
@@ -90,11 +124,12 @@ with gr.Blocks(title="ChatGPT Ad Maker") as iface:
90
  with gr.Row():
91
  vid_dot_size = gr.Slider(minimum=2, maximum=20, value=10, step=1, label="Dot Size")
92
  vid_spacing = gr.Slider(minimum=0, maximum=10, value=2, step=1, label="Dot Spacing")
 
93
  video_output = gr.Video(label="Dotted Output", format="mp4")
94
  video_button = gr.Button("Process Video")
95
  video_button.click(
96
  fn=process_video,
97
- inputs=[video_input, vid_dot_size, vid_spacing],
98
  outputs=video_output
99
  )
100
 
 
2
  import numpy as np
3
  import cv2
4
 
5
+ def create_dot_effect(image, dot_size=10, spacing=2, invert=False):
6
  # Convert to grayscale if image is color
7
  if len(image.shape) == 3:
8
  gray = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)
9
  else:
10
  gray = image
11
 
12
+ # Apply adaptive thresholding to improve contrast
13
+ gray = cv2.adaptiveThreshold(
14
+ gray,
15
+ 255,
16
+ cv2.ADAPTIVE_THRESH_GAUSSIAN_C,
17
+ cv2.THRESH_BINARY,
18
+ 25, # Block size
19
+ 5 # Constant subtracted from mean
20
+ )
21
+
22
+ # Create a blank canvas with white background if inverted
23
  height, width = gray.shape
24
+ canvas = np.zeros_like(gray) if not invert else np.full_like(gray, 255)
25
 
26
  # Calculate number of dots based on spacing
27
  y_dots = range(0, height, dot_size + spacing)
28
  x_dots = range(0, width, dot_size + spacing)
29
 
30
  # Create dots based on brightness
31
+ dot_color = 255 if not invert else 0
32
  for y in y_dots:
33
  for x in x_dots:
34
  # Get the average brightness of the region
 
36
  if region.size > 0:
37
  brightness = np.mean(region)
38
 
39
+ # Dynamic dot sizing based on brightness
40
+ relative_brightness = brightness / 255.0
41
+ if invert:
42
+ relative_brightness = 1 - relative_brightness
43
+
44
+ # Draw circle with size proportional to brightness
45
+ radius = int((dot_size/2) * relative_brightness)
46
+ if radius > 0:
47
  cv2.circle(canvas,
48
  (x + dot_size//2, y + dot_size//2),
49
+ radius,
50
+ (dot_color),
51
  -1)
52
 
53
  return canvas
54
 
55
+ def process_video(video_path, dot_size=10, spacing=2, invert=False):
56
  # Read the video
57
  cap = cv2.VideoCapture(video_path)
58
  if not cap.isOpened():
 
63
  frame_width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
64
  frame_height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
65
 
66
+ # Calculate target dimensions (max 720p for performance)
67
+ max_height = 720
68
+ if frame_height > max_height:
69
+ scale = max_height / frame_height
70
+ frame_width = int(frame_width * scale)
71
+ frame_height = max_height
72
+
73
+ # Create temporary output file
74
  output_path = "temp_output.mp4"
75
+ fourcc = cv2.VideoWriter_fourcc(*'avc1')
76
  out = cv2.VideoWriter(output_path, fourcc, fps, (frame_width, frame_height), False)
77
 
78
+ try:
79
+ while cap.isOpened():
80
+ ret, frame = cap.read()
81
+ if not ret:
82
+ break
83
+
84
+ # Resize frame if needed
85
+ if frame.shape[0] > max_height:
86
+ frame = cv2.resize(frame, (frame_width, frame_height))
87
+
88
+ # Convert BGR to RGB for processing
89
+ frame_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
90
+
91
+ # Apply dot effect
92
+ dotted_frame = create_dot_effect(frame_rgb, dot_size, spacing, invert)
93
 
94
+ # Write the frame
95
+ out.write(dotted_frame)
 
 
 
96
 
97
+ finally:
98
+ # Ensure resources are released
99
+ cap.release()
100
+ out.release()
101
 
102
  return output_path
103
 
 
124
  with gr.Row():
125
  vid_dot_size = gr.Slider(minimum=2, maximum=20, value=10, step=1, label="Dot Size")
126
  vid_spacing = gr.Slider(minimum=0, maximum=10, value=2, step=1, label="Dot Spacing")
127
+ vid_invert = gr.Checkbox(label="Invert", value=False)
128
  video_output = gr.Video(label="Dotted Output", format="mp4")
129
  video_button = gr.Button("Process Video")
130
  video_button.click(
131
  fn=process_video,
132
+ inputs=[video_input, vid_dot_size, vid_spacing, vid_invert],
133
  outputs=video_output
134
  )
135