Update app.py
Browse files"cpu로 전환"
app.py
CHANGED
|
@@ -1,4 +1,3 @@
|
|
| 1 |
-
import requests
|
| 2 |
import streamlit as st
|
| 3 |
import cv2
|
| 4 |
import numpy as np
|
|
@@ -7,7 +6,35 @@ import torch
|
|
| 7 |
from diffusers import StableDiffusionPipeline
|
| 8 |
import io
|
| 9 |
|
| 10 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 11 |
def apply_deepfake(image):
|
| 12 |
# Convert PIL image to bytes
|
| 13 |
image_bytes = io.BytesIO()
|
|
@@ -16,8 +43,8 @@ def apply_deepfake(image):
|
|
| 16 |
|
| 17 |
# Initialize the Stable Diffusion pipeline
|
| 18 |
model_id = "CompVis/stable-diffusion-v1-4"
|
| 19 |
-
pipe = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.
|
| 20 |
-
pipe = pipe.to("
|
| 21 |
|
| 22 |
prompt = "A person with a different face but same pose and background"
|
| 23 |
|
|
@@ -41,9 +68,7 @@ if uploaded_file is not None:
|
|
| 41 |
st.image(image, caption='Original Image.', use_column_width=True)
|
| 42 |
elif action == 'B':
|
| 43 |
# Add noise to the original image
|
| 44 |
-
|
| 45 |
-
noise = np.random.normal(0, 25, image_np.shape).astype(np.uint8)
|
| 46 |
-
noisy_image = cv2.add(image_np, noise)
|
| 47 |
st.image(noisy_image, caption='Image with Noise.', use_column_width=True)
|
| 48 |
elif action == 'Deepfake':
|
| 49 |
# Apply deepfake transformation
|
|
|
|
|
|
|
| 1 |
import streamlit as st
|
| 2 |
import cv2
|
| 3 |
import numpy as np
|
|
|
|
| 6 |
from diffusers import StableDiffusionPipeline
|
| 7 |
import io
|
| 8 |
|
| 9 |
+
def change_hair_to_blonde(image):
|
| 10 |
+
# Convert to OpenCV format
|
| 11 |
+
image = np.array(image)
|
| 12 |
+
# Convert the image to HSV color space
|
| 13 |
+
hsv = cv2.cvtColor(image, cv2.COLOR_RGB2HSV)
|
| 14 |
+
|
| 15 |
+
# Define the range for hair color (dark colors)
|
| 16 |
+
lower_hair = np.array([0, 0, 0])
|
| 17 |
+
upper_hair = np.array([180, 255, 30])
|
| 18 |
+
|
| 19 |
+
# Create a mask for hair
|
| 20 |
+
mask = cv2.inRange(hsv, lower_hair, upper_hair)
|
| 21 |
+
|
| 22 |
+
# Change hair color to blonde (light yellow)
|
| 23 |
+
hsv[mask > 0] = (30, 255, 200)
|
| 24 |
+
|
| 25 |
+
# Convert back to RGB color space
|
| 26 |
+
image_blonde = cv2.cvtColor(hsv, cv2.COLOR_HSV2RGB)
|
| 27 |
+
return image_blonde
|
| 28 |
+
|
| 29 |
+
def add_noise(image):
|
| 30 |
+
# Convert to OpenCV format
|
| 31 |
+
image_np = np.array(image)
|
| 32 |
+
# Generate random noise
|
| 33 |
+
noise = np.random.normal(0, 25, image_np.shape).astype(np.uint8)
|
| 34 |
+
# Add noise to the image
|
| 35 |
+
noisy_image = cv2.add(image_np, noise)
|
| 36 |
+
return noisy_image
|
| 37 |
+
|
| 38 |
def apply_deepfake(image):
|
| 39 |
# Convert PIL image to bytes
|
| 40 |
image_bytes = io.BytesIO()
|
|
|
|
| 43 |
|
| 44 |
# Initialize the Stable Diffusion pipeline
|
| 45 |
model_id = "CompVis/stable-diffusion-v1-4"
|
| 46 |
+
pipe = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.float32)
|
| 47 |
+
pipe = pipe.to("cpu")
|
| 48 |
|
| 49 |
prompt = "A person with a different face but same pose and background"
|
| 50 |
|
|
|
|
| 68 |
st.image(image, caption='Original Image.', use_column_width=True)
|
| 69 |
elif action == 'B':
|
| 70 |
# Add noise to the original image
|
| 71 |
+
noisy_image = add_noise(image)
|
|
|
|
|
|
|
| 72 |
st.image(noisy_image, caption='Image with Noise.', use_column_width=True)
|
| 73 |
elif action == 'Deepfake':
|
| 74 |
# Apply deepfake transformation
|