Spaces:
Runtime error
Runtime error
make pil and then convert
Browse files
app.py
CHANGED
@@ -6,6 +6,7 @@ import torchvision
|
|
6 |
import torchvision.transforms as T
|
7 |
#from torchvision.transforms import v2 as T2
|
8 |
import cv2
|
|
|
9 |
from PIL import Image
|
10 |
import numpy as np
|
11 |
|
@@ -13,9 +14,9 @@ output_res = (768,768)
|
|
13 |
|
14 |
conditioning_image_transforms = T.Compose(
|
15 |
[
|
16 |
-
#T2.ScaleJitter(target_size=output_res, scale_range=(0.5, 3.0)),
|
17 |
T.RandomCrop(size=output_res, pad_if_needed=True, padding_mode="symmetric"),
|
18 |
-
T.
|
19 |
T.Normalize([0.5], [0.5]),
|
20 |
]
|
21 |
)
|
@@ -35,8 +36,9 @@ generator = torch.manual_seed(0)
|
|
35 |
# inference function takes prompt, negative prompt and image
|
36 |
def infer(prompt, negative_prompt, image):
|
37 |
# implement your inference function here
|
|
|
38 |
|
39 |
-
cond_input = conditioning_image_transforms(
|
40 |
#cond_input = T.ToPILImage(cond_input)
|
41 |
|
42 |
output = pipe(
|
|
|
6 |
import torchvision.transforms as T
|
7 |
#from torchvision.transforms import v2 as T2
|
8 |
import cv2
|
9 |
+
import PIL
|
10 |
from PIL import Image
|
11 |
import numpy as np
|
12 |
|
|
|
14 |
|
15 |
conditioning_image_transforms = T.Compose(
|
16 |
[
|
17 |
+
#T2.ScaleJitter(target_size=output_res, scale_range=(0.5, 3.0))),
|
18 |
T.RandomCrop(size=output_res, pad_if_needed=True, padding_mode="symmetric"),
|
19 |
+
T.PILToTensor(),
|
20 |
T.Normalize([0.5], [0.5]),
|
21 |
]
|
22 |
)
|
|
|
36 |
# inference function takes prompt, negative prompt and image
|
37 |
def infer(prompt, negative_prompt, image):
|
38 |
# implement your inference function here
|
39 |
+
inp = PIL.Image.create(image)
|
40 |
|
41 |
+
cond_input = conditioning_image_transforms(image)
|
42 |
#cond_input = T.ToPILImage(cond_input)
|
43 |
|
44 |
output = pipe(
|