Spaces:
Runtime error
Runtime error
| import cv2 | |
| import av | |
| import numpy as np | |
| def resize_aspect_fit(image, dim=(640, 480)): | |
| h, w = image.shape[:2] | |
| aspect_ratio = w / h | |
| target_width, target_height = dim | |
| target_aspect = target_width / target_height | |
| if aspect_ratio > target_aspect: | |
| # Original aspect is wider than target | |
| new_width = target_width | |
| new_height = int(target_width / aspect_ratio) | |
| else: | |
| # Original aspect is taller than target | |
| new_height = target_height | |
| new_width = int(target_height * aspect_ratio) | |
| resized_image = cv2.resize(image, (new_width, new_height), interpolation=cv2.INTER_AREA) | |
| return resized_image | |
| def resize_and_crop(image, dim=(640, 480)): | |
| h, w = image.shape[:2] | |
| aspect_ratio = w / h | |
| target_width, target_height = dim | |
| target_aspect = target_width / target_height | |
| if aspect_ratio > target_aspect: | |
| # Original aspect is wider than target, fit by height | |
| new_height = target_height | |
| new_width = int(target_height * aspect_ratio) | |
| else: | |
| # Original aspect is taller than target, fit by width | |
| new_width = target_width | |
| new_height = int(target_width / aspect_ratio) | |
| # Resize the image with new dimensions | |
| resized_image = cv2.resize(image, (new_width, new_height), interpolation=cv2.INTER_AREA) | |
| # Crop to target dimensions | |
| x_offset = (new_width - target_width) // 2 | |
| y_offset = (new_height - target_height) // 2 | |
| cropped_image = resized_image[y_offset:y_offset + target_height, x_offset:x_offset + target_width] | |
| return cropped_image | |
| def overlay_images(background, overlay, x, y): | |
| """ | |
| Overlay an image with transparency over another image. | |
| """ | |
| # Check if overlay dimensions fit within the background at the given (x, y) position | |
| if y + overlay.shape[0] > background.shape[0] or x + overlay.shape[1] > background.shape[1]: | |
| raise ValueError("Overlay dimensions exceed background dimensions at the specified position.") | |
| # Extract the alpha channel from the overlay and create an inverse alpha channel | |
| alpha = overlay[:, :, 3] / 255.0 | |
| inverse_alpha = 1.0 - alpha | |
| # Convert overlay to BGR if it's in RGB | |
| if overlay.shape[2] == 4: # If it has an alpha channel | |
| overlay = cv2.cvtColor(overlay[:, :, :3], cv2.COLOR_RGB2BGR) | |
| overlay = np.concatenate([overlay, overlay[:, :, 3:]], axis=2) # Add alpha channel back | |
| else: | |
| overlay = cv2.cvtColor(overlay, cv2.COLOR_RGB2BGR) | |
| # Overlay the images | |
| for c in range(0, 3): | |
| background[y:overlay.shape[0]+y, x:overlay.shape[1]+x, c] = ( | |
| alpha * overlay[:, :, c] + inverse_alpha * background[y:overlay.shape[0]+y, x:overlay.shape[1]+x, c] | |
| ) | |
| return background | |
| def transform_frame(user_frame: av.VideoFrame) -> av.VideoFrame: | |
| # Convert av.VideoFrame to numpy array (OpenCV format) | |
| user_frame_np = np.frombuffer(user_frame.planes[0], np.uint8).reshape(user_frame.height, user_frame.width, -1) | |
| # Load background image | |
| background = cv2.imread("zoom-background.png") | |
| # Load bot image (assuming it has an alpha channel for transparency) | |
| bot_image = cv2.imread("bot-image.png", cv2.IMREAD_UNCHANGED) | |
| # Resize background to match the user frame dimensions | |
| aspect_ratio = background.shape[1] / background.shape[0] | |
| new_h = user_frame.height | |
| new_w = int(new_h * aspect_ratio) | |
| background_resized = cv2.resize(background, (new_w, new_h)) | |
| # Crop the background if it exceeds the user frame width | |
| if new_w > user_frame.width: | |
| crop_x1 = (new_w - user_frame.width) // 2 | |
| crop_x2 = crop_x1 + user_frame.width | |
| background_resized = background_resized[:, crop_x1:crop_x2, :3] | |
| # Overlay bot image on the right-hand side | |
| x_bot = background_resized.shape[1] - bot_image.shape[1] | |
| y_bot = 0 | |
| background_resized = overlay_images(background_resized, bot_image, x_bot, y_bot) | |
| # Overlay user's video frame in the bottom-left corner | |
| x_user = 0 | |
| y_user = background_resized.shape[0] - user_frame.height | |
| background_resized[y_user:user_frame.height+y_user, x_user:user_frame.width+x_user, :3] = user_frame_np | |
| # Convert the final frame back to av.VideoFrame | |
| output_frame = av.VideoFrame.from_ndarray(background_resized, format="bgr24") | |
| return output_frame | |
| def create_charles_frames(background, charles_frames): | |
| output_frames = [] | |
| # Load background image | |
| background = cv2.imread(background, cv2.COLOR_BGR2RGB) | |
| background = cv2.cvtColor(background, cv2.COLOR_BGR2RGB) | |
| # resize background to match user image | |
| background = resize_and_crop(background, (640, 480)) | |
| for bot_image_path in charles_frames: | |
| bot_image = cv2.imread(bot_image_path, cv2.IMREAD_UNCHANGED) | |
| # assert bot image is square | |
| assert bot_image.shape[0] == bot_image.shape[1] | |
| # resize bot image if it is larger than backgroun impage in any direction | |
| if bot_image.shape[0] > background.shape[0]: | |
| bot_image = cv2.resize(bot_image, (background.shape[0], background.shape[0]), interpolation=cv2.INTER_AREA) | |
| # Overlay bot image on the right-hand side | |
| x_bot = background.shape[1] - bot_image.shape[1] | |
| y_bot = background.shape[0] - bot_image.shape[0] | |
| background_with_bot = overlay_images(background.copy(), bot_image, x_bot, y_bot) | |
| output_frames.append(background_with_bot) | |
| return output_frames | |
| def test_create_bot_frames(): | |
| frames = create_charles_frames("./images/zoom-background.png", ["./images/charles.png", "./images/charles-open.png"]) | |
| index = 0 | |
| for frame in frames: | |
| final_frame_bgr = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR) | |
| cv2.imwrite(f"./images/charles_frame_{index}.jpg", final_frame_bgr) | |
| index += 1 | |
| def test_overlay(): | |
| # Load mock user image | |
| user_image = cv2.imread("./prototypes/person-016.jpg", cv2.COLOR_BGR2RGB) | |
| user_image = cv2.cvtColor(user_image, cv2.COLOR_BGR2RGB) | |
| # resize to 640x480, handle that this is smaller and can be cropped | |
| user_image = resize_and_crop(user_image, (640, 480)) | |
| # Load background image | |
| background = cv2.imread("./images/zoom-background.png", cv2.COLOR_BGR2RGB) | |
| background = cv2.cvtColor(background, cv2.COLOR_BGR2RGB) | |
| # resize background to match user image | |
| background = resize_and_crop(background, (user_image.shape[:2][1], user_image.shape[:2][0])) | |
| # Load bot image (assuming it has an alpha channel for transparency) | |
| bot_image = cv2.imread("./images/charles-open.png", cv2.IMREAD_UNCHANGED) | |
| # resize bot image if it is larger than backgroun impage in any direction | |
| if bot_image.shape[0] > background.shape[0]: | |
| bot_image = cv2.resize(bot_image, (background.shape[0], background.shape[0]), interpolation=cv2.INTER_AREA) | |
| # Overlay bot image on the right-hand side | |
| x_bot = background.shape[1] - bot_image.shape[1] | |
| y_bot = background.shape[0] - bot_image.shape[0] | |
| background_with_bot = overlay_images(background.copy(), bot_image, x_bot, y_bot) | |
| # Overlay user's frame in the bottom-left corner (1/3 size) | |
| # resize user image to 1/4 size | |
| user_frame = cv2.resize(user_image, (user_image.shape[1]//4, user_image.shape[0]//4), interpolation=cv2.INTER_AREA) | |
| x_user = 0 | |
| y_user = background.shape[0] - user_frame.shape[0] | |
| final_frame = background_with_bot.copy() | |
| # final_frame[y_user:user_frame.shape[0]+y_user, x_user:user_frame.shape[1]+x_user, :3] = user_frame | |
| final_frame[y_user:y_user+user_frame.shape[0], x_user:x_user+user_frame.shape[1]] = user_frame | |
| # Save the final frame as JPEG | |
| final_frame_bgr = cv2.cvtColor(final_frame, cv2.COLOR_RGB2BGR) | |
| cv2.imwrite("./images/final_frame.jpg", final_frame_bgr) | |
| test_overlay() | |
| test_create_bot_frames() |