Spaces:
Running
on
Zero
Running
on
Zero
Upload 5 files
Browse files- utils/dataset.py +84 -0
- utils/dataset_jichao.py +100 -0
- utils/dataset_mv.py +93 -0
- utils/dataset_stable_hair.py +68 -0
- utils/pipeline.py +0 -0
utils/dataset.py
ADDED
@@ -0,0 +1,84 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from torch.utils import data
|
2 |
+
import os
|
3 |
+
import torch
|
4 |
+
import numpy as np
|
5 |
+
import cv2
|
6 |
+
import random
|
7 |
+
|
8 |
+
class myDataset(data.Dataset):
|
9 |
+
"""Custom data.Dataset compatible with data.DataLoader."""
|
10 |
+
def __init__(self, train_data_dir):
|
11 |
+
self.img_path = os.path.join(train_data_dir, "hair")
|
12 |
+
self.pose_path = os.path.join(train_data_dir, "pose.npy")
|
13 |
+
self.non_hair_path = os.path.join(train_data_dir, "no_hair")
|
14 |
+
self.ref_path = os.path.join(train_data_dir, "ref_hair")
|
15 |
+
|
16 |
+
self.lists = os.listdir(self.img_path)
|
17 |
+
self.len = len(self.lists)
|
18 |
+
self.pose = np.load(self.pose_path)
|
19 |
+
|
20 |
+
def __getitem__(self, index):
|
21 |
+
"""Returns one data pair (source and target)."""
|
22 |
+
# seq_len, fea_dim
|
23 |
+
random_number1 = random.randrange(0, 120)
|
24 |
+
random_number2 = random.randrange(0, 120)
|
25 |
+
while random_number2==random_number1:
|
26 |
+
random_number2 = random.randrange(0, 120)
|
27 |
+
name = self.lists[index]
|
28 |
+
|
29 |
+
hair_path = os.path.join(self.img_path, name, str(random_number1)+'.jpg')
|
30 |
+
non_hair_path = os.path.join(self.non_hair_path, name, str(random_number2)+'.jpg')
|
31 |
+
ref_folder = os.path.join(self.ref_path, name)
|
32 |
+
files = [f for f in os.listdir(ref_folder) if f.endswith('.jpg')]
|
33 |
+
ref_path = os.path.join(ref_folder, files[0])
|
34 |
+
img_hair = cv2.imread(hair_path)
|
35 |
+
img_non_hair = cv2.imread(non_hair_path)
|
36 |
+
ref_hair = cv2.imread(ref_path)
|
37 |
+
|
38 |
+
img_hair = cv2.cvtColor(img_hair, cv2.COLOR_BGR2RGB)
|
39 |
+
img_non_hair = cv2.cvtColor(img_non_hair, cv2.COLOR_BGR2RGB)
|
40 |
+
ref_hair = cv2.cvtColor(ref_hair, cv2.COLOR_BGR2RGB)
|
41 |
+
|
42 |
+
img_hair = cv2.resize(img_hair, (512, 512))
|
43 |
+
img_non_hair = cv2.resize(img_non_hair, (512, 512))
|
44 |
+
ref_hair = cv2.resize(ref_hair, (512, 512))
|
45 |
+
img_hair = (img_hair/255.0)* 2 - 1
|
46 |
+
img_non_hair = (img_non_hair/255.0)
|
47 |
+
ref_hair = (ref_hair/255.0)* 2 - 1
|
48 |
+
|
49 |
+
img_hair = torch.tensor(img_hair).permute(2, 0, 1)
|
50 |
+
img_non_hair = torch.tensor(img_non_hair).permute(2, 0, 1)
|
51 |
+
ref_hair = torch.tensor(ref_hair).permute(2, 0, 1)
|
52 |
+
|
53 |
+
pose1 = self.pose[random_number1]
|
54 |
+
pose1 = torch.tensor(pose1)
|
55 |
+
pose2 = self.pose[random_number2]
|
56 |
+
pose2 = torch.tensor(pose2)
|
57 |
+
|
58 |
+
return {
|
59 |
+
'hair_pose': pose1,
|
60 |
+
'img_hair':img_hair,
|
61 |
+
'bald_pose': pose2,
|
62 |
+
'img_non_hair':img_non_hair,
|
63 |
+
'ref_hair':ref_hair
|
64 |
+
}
|
65 |
+
|
66 |
+
def __len__(self):
|
67 |
+
return self.len
|
68 |
+
|
69 |
+
if __name__ == "__main__":
|
70 |
+
|
71 |
+
train_dataset = myDataset("./data")
|
72 |
+
train_dataloader = torch.utils.data.DataLoader(
|
73 |
+
train_dataset,
|
74 |
+
batch_size=1,
|
75 |
+
num_workers=1,
|
76 |
+
)
|
77 |
+
|
78 |
+
for epoch in range(0, len(train_dataset)+1):
|
79 |
+
for step, batch in enumerate(train_dataloader):
|
80 |
+
print("batch[hair_pose]:", batch["hair_pose"])
|
81 |
+
print("batch[img_hair]:", batch["img_hair"])
|
82 |
+
print("batch[bald_pose]:", batch["bald_pose"])
|
83 |
+
print("batch[img_non_hair]:", batch["img_non_hair"])
|
84 |
+
print("batch[ref_hair]:", batch["ref_hair"])
|
utils/dataset_jichao.py
ADDED
@@ -0,0 +1,100 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from torch.utils import data
|
2 |
+
import os
|
3 |
+
import torch
|
4 |
+
import numpy as np
|
5 |
+
import cv2
|
6 |
+
import random
|
7 |
+
|
8 |
+
class myDataset(data.Dataset):
|
9 |
+
"""Custom data.Dataset compatible with data.DataLoader."""
|
10 |
+
|
11 |
+
def __init__(self, train_data_dir):
|
12 |
+
self.img_path = os.path.join(train_data_dir, "hair")
|
13 |
+
self.pose_path = os.path.join(train_data_dir, "pose.npy")
|
14 |
+
self.non_hair_path = os.path.join(train_data_dir, "no_hair")
|
15 |
+
self.ref_path = os.path.join(train_data_dir, "ref_hair")
|
16 |
+
self.lists = os.listdir(self.img_path)
|
17 |
+
self.len = len(self.lists)
|
18 |
+
self.pose = np.load(self.pose_path)
|
19 |
+
|
20 |
+
def __getitem__(self, index):
|
21 |
+
"""Returns one data pair (source and target)."""
|
22 |
+
# seq_len, fea_dim
|
23 |
+
random_number1 = random.randrange(0, 12)
|
24 |
+
random_number2 = random.randrange(0, 12)
|
25 |
+
|
26 |
+
while random_number2 == random_number1:
|
27 |
+
random_number2 = random.randrange(0, 12)
|
28 |
+
name = self.lists[index]
|
29 |
+
|
30 |
+
random_number1 = random_number1 * 10
|
31 |
+
random_number2 = random_number2 * 10
|
32 |
+
|
33 |
+
hair_path = os.path.join(self.img_path, name, str(random_number1) + '.jpg')
|
34 |
+
non_hair_path = os.path.join(self.non_hair_path, name, str(random_number2) + '.jpg')
|
35 |
+
ref_folder = os.path.join(self.ref_path, name)
|
36 |
+
|
37 |
+
files = [f for f in os.listdir(ref_folder) if f.endswith('.jpg')]
|
38 |
+
ref_path = os.path.join(ref_folder, files[0])
|
39 |
+
img_hair = cv2.imread(hair_path)
|
40 |
+
img_non_hair = cv2.imread(non_hair_path)
|
41 |
+
ref_hair = cv2.imread(ref_path)
|
42 |
+
|
43 |
+
img_hair = cv2.cvtColor(img_hair, cv2.COLOR_BGR2RGB)
|
44 |
+
img_non_hair = cv2.cvtColor(img_non_hair, cv2.COLOR_BGR2RGB)
|
45 |
+
ref_hair = cv2.cvtColor(ref_hair, cv2.COLOR_BGR2RGB)
|
46 |
+
|
47 |
+
img_hair = cv2.resize(img_hair, (512, 512))
|
48 |
+
img_non_hair = cv2.resize(img_non_hair, (512, 512))
|
49 |
+
ref_hair = cv2.resize(ref_hair, (512, 512))
|
50 |
+
|
51 |
+
img_hair = (img_hair / 255.0) * 2 - 1
|
52 |
+
img_non_hair = (img_non_hair / 255.0) * 2 - 1
|
53 |
+
ref_hair = (ref_hair / 255.0) * 2 - 1
|
54 |
+
|
55 |
+
img_hair = (img_hair / 255.0) * 2 - 1
|
56 |
+
img_non_hair = (img_non_hair / 255.0) * 2 - 1
|
57 |
+
ref_hair = (ref_hair / 255.0) * 2 - 1
|
58 |
+
|
59 |
+
img_hair = torch.tensor(img_hair)
|
60 |
+
img_non_hair = torch.tensor(img_non_hair)
|
61 |
+
ref_hair = torch.tensor(ref_hair)
|
62 |
+
|
63 |
+
img_hair = torch.tensor(img_hair).permute(2, 0, 1)
|
64 |
+
img_non_hair = torch.tensor(img_non_hair).permute(2, 0, 1)
|
65 |
+
ref_hair = torch.tensor(ref_hair).permute(2, 0, 1)
|
66 |
+
|
67 |
+
pose1 = self.pose[random_number1]
|
68 |
+
pose1 = torch.tensor(pose1)
|
69 |
+
pose2 = self.pose[random_number2]
|
70 |
+
pose2 = torch.tensor(pose2)
|
71 |
+
|
72 |
+
return {
|
73 |
+
'hair_pose': pose1,
|
74 |
+
'img_hair': img_hair,
|
75 |
+
'bald_pose': pose2,
|
76 |
+
'img_non_hair': img_non_hair,
|
77 |
+
'ref_hair': ref_hair
|
78 |
+
}
|
79 |
+
|
80 |
+
|
81 |
+
def __len__(self):
|
82 |
+
return self.len
|
83 |
+
|
84 |
+
|
85 |
+
if __name__ == "__main__":
|
86 |
+
|
87 |
+
train_dataset = myDataset("./data")
|
88 |
+
train_dataloader = torch.utils.data.DataLoader(
|
89 |
+
train_dataset,
|
90 |
+
batch_size=1,
|
91 |
+
num_workers=1,
|
92 |
+
)
|
93 |
+
|
94 |
+
for epoch in range(0, len(train_dataset) + 1):
|
95 |
+
for step, batch in enumerate(train_dataloader):
|
96 |
+
print("batch[hair_pose]:", batch["hair_pose"])
|
97 |
+
print("batch[img_hair]:", batch["img_hair"])
|
98 |
+
print("batch[bald_pose]:", batch["bald_pose"])
|
99 |
+
print("batch[img_non_hair]:", batch["img_non_hair"])
|
100 |
+
print("batch[ref_hair]:", batch["ref_hair"])
|
utils/dataset_mv.py
ADDED
@@ -0,0 +1,93 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from torch.utils import data
|
2 |
+
import os
|
3 |
+
import torch
|
4 |
+
import numpy as np
|
5 |
+
import cv2
|
6 |
+
import random
|
7 |
+
|
8 |
+
class myDataset(data.Dataset):
|
9 |
+
"""Custom data.Dataset compatible with data.DataLoader."""
|
10 |
+
|
11 |
+
def __init__(self, train_data_dir):
|
12 |
+
self.img_path = os.path.join(train_data_dir, "hair")
|
13 |
+
self.pose_path = os.path.join(train_data_dir, "pose.npy")
|
14 |
+
self.non_hair_path = os.path.join(train_data_dir, "no_hair")
|
15 |
+
self.ref_path = os.path.join(train_data_dir, "ref_hair")
|
16 |
+
self.lists = os.listdir(self.img_path)
|
17 |
+
self.len = len(self.lists)
|
18 |
+
self.pose = np.load(self.pose_path)
|
19 |
+
|
20 |
+
def __getitem__(self, index):
|
21 |
+
"""Returns one data pair (source and target)."""
|
22 |
+
# seq_len, fea_dim
|
23 |
+
random_number1 = random.randrange(0, 12)
|
24 |
+
random_number2 = random.randrange(0, 12)
|
25 |
+
|
26 |
+
while random_number2 == random_number1:
|
27 |
+
random_number2 = random.randrange(0, 12)
|
28 |
+
name = self.lists[index]
|
29 |
+
|
30 |
+
#random_number1 = random_number1 * 10
|
31 |
+
#random_number2 = random_number2 * 10
|
32 |
+
|
33 |
+
random_number2 = random_number1
|
34 |
+
|
35 |
+
hair_path = os.path.join(self.img_path, name, str(random_number1) + '.jpg')
|
36 |
+
non_hair_path = os.path.join(self.non_hair_path, name, str(random_number2) + '.jpg')
|
37 |
+
ref_folder = os.path.join(self.ref_path, name)
|
38 |
+
|
39 |
+
files = [f for f in os.listdir(ref_folder) if f.endswith('.jpg')]
|
40 |
+
ref_path = os.path.join(ref_folder, files[0])
|
41 |
+
img_hair = cv2.imread(hair_path)
|
42 |
+
img_non_hair = cv2.imread(non_hair_path)
|
43 |
+
ref_hair = cv2.imread(ref_path)
|
44 |
+
|
45 |
+
img_hair = cv2.cvtColor(img_hair, cv2.COLOR_BGR2RGB)
|
46 |
+
img_non_hair = cv2.cvtColor(img_non_hair, cv2.COLOR_BGR2RGB)
|
47 |
+
ref_hair = cv2.cvtColor(ref_hair, cv2.COLOR_BGR2RGB)
|
48 |
+
|
49 |
+
img_hair = cv2.resize(img_hair, (512, 512))
|
50 |
+
img_non_hair = cv2.resize(img_non_hair, (512, 512))
|
51 |
+
ref_hair = cv2.resize(ref_hair, (512, 512))
|
52 |
+
|
53 |
+
img_hair = (img_hair / 255.0) * 2 - 1
|
54 |
+
img_non_hair = (img_non_hair / 255.0) * 2 - 1
|
55 |
+
ref_hair = (ref_hair / 255.0) * 2 - 1
|
56 |
+
|
57 |
+
img_hair = torch.tensor(img_hair).permute(2, 0, 1)
|
58 |
+
img_non_hair = torch.tensor(img_non_hair).permute(2, 0, 1)
|
59 |
+
ref_hair = torch.tensor(ref_hair).permute(2, 0, 1)
|
60 |
+
|
61 |
+
pose1 = self.pose[random_number1]
|
62 |
+
pose1 = torch.tensor(pose1)
|
63 |
+
pose2 = self.pose[random_number2]
|
64 |
+
pose2 = torch.tensor(pose2)
|
65 |
+
|
66 |
+
return {
|
67 |
+
'hair_pose': pose1,
|
68 |
+
'img_hair': img_hair,
|
69 |
+
'bald_pose': pose2,
|
70 |
+
'img_non_hair': img_non_hair,
|
71 |
+
'ref_hair': ref_hair
|
72 |
+
}
|
73 |
+
|
74 |
+
def __len__(self):
|
75 |
+
return self.len
|
76 |
+
|
77 |
+
|
78 |
+
if __name__ == "__main__":
|
79 |
+
|
80 |
+
train_dataset = myDataset("./data")
|
81 |
+
train_dataloader = torch.utils.data.DataLoader(
|
82 |
+
train_dataset,
|
83 |
+
batch_size=1,
|
84 |
+
num_workers=1,
|
85 |
+
)
|
86 |
+
|
87 |
+
for epoch in range(0, len(train_dataset) + 1):
|
88 |
+
for step, batch in enumerate(train_dataloader):
|
89 |
+
print("batch[hair_pose]:", batch["hair_pose"])
|
90 |
+
print("batch[img_hair]:", batch["img_hair"])
|
91 |
+
print("batch[bald_pose]:", batch["bald_pose"])
|
92 |
+
print("batch[img_non_hair]:", batch["img_non_hair"])
|
93 |
+
print("batch[ref_hair]:", batch["ref_hair"])
|
utils/dataset_stable_hair.py
ADDED
@@ -0,0 +1,68 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from torch.utils import data
|
2 |
+
import os
|
3 |
+
import torch
|
4 |
+
import cv2
|
5 |
+
import json
|
6 |
+
|
7 |
+
class myDataset(data.Dataset):
|
8 |
+
"""Custom data.Dataset compatible with data.DataLoader."""
|
9 |
+
|
10 |
+
def __init__(self, train_data_dir):
|
11 |
+
self.json_path = os.path.join(train_data_dir, "data_jichao.jsonl")
|
12 |
+
with open(self.json_path, 'r') as f:
|
13 |
+
self.data = [json.loads(line) for line in f]
|
14 |
+
|
15 |
+
def __len__(self):
|
16 |
+
"""Return the total number of items in the dataset."""
|
17 |
+
return len(self.data)
|
18 |
+
|
19 |
+
def __getitem__(self, index):
|
20 |
+
"""Returns one data pair (source and target)."""
|
21 |
+
# seq_len, fea_dim
|
22 |
+
|
23 |
+
item = self.data[index]
|
24 |
+
|
25 |
+
img_hair = cv2.imread(item["target"])
|
26 |
+
img_non_hair = cv2.imread(item["source"])
|
27 |
+
ref_hair = cv2.imread(item["reference"])
|
28 |
+
|
29 |
+
img_hair = cv2.cvtColor(img_hair, cv2.COLOR_BGR2RGB)
|
30 |
+
img_non_hair = cv2.cvtColor(img_non_hair, cv2.COLOR_BGR2RGB)
|
31 |
+
ref_hair = cv2.cvtColor(ref_hair, cv2.COLOR_BGR2RGB)
|
32 |
+
|
33 |
+
img_hair = cv2.resize(img_hair, (512, 512))
|
34 |
+
img_non_hair = cv2.resize(img_non_hair, (512, 512))
|
35 |
+
ref_hair = cv2.resize(ref_hair, (512, 512))
|
36 |
+
img_hair = (img_hair / 255.0) * 2 - 1
|
37 |
+
img_non_hair = (img_non_hair/255.0) * 2 - 1
|
38 |
+
ref_hair = (ref_hair / 255.0) * 2 - 1
|
39 |
+
img_hair = torch.tensor(img_hair)
|
40 |
+
img_non_hair = torch.tensor(img_non_hair)
|
41 |
+
ref_hair = torch.tensor(ref_hair)
|
42 |
+
|
43 |
+
img_hair = torch.tensor(img_hair).permute(2, 0, 1)
|
44 |
+
img_non_hair = torch.tensor(img_non_hair).permute(2, 0, 1)
|
45 |
+
ref_hair = torch.tensor(ref_hair).permute(2, 0, 1)
|
46 |
+
|
47 |
+
return {
|
48 |
+
'img_hair': img_hair,
|
49 |
+
'img_non_hair': img_non_hair,
|
50 |
+
'ref_hair': ref_hair
|
51 |
+
}
|
52 |
+
|
53 |
+
if __name__ == "__main__":
|
54 |
+
|
55 |
+
train_dataset = myDataset("./data")
|
56 |
+
train_dataloader = torch.utils.data.DataLoader(
|
57 |
+
train_dataset,
|
58 |
+
batch_size=1,
|
59 |
+
num_workers=1,
|
60 |
+
)
|
61 |
+
|
62 |
+
for epoch in range(0, len(train_dataset) + 1):
|
63 |
+
for step, batch in enumerate(train_dataloader):
|
64 |
+
print("batch[hair_pose]:", batch["hair_pose"])
|
65 |
+
print("batch[img_hair]:", batch["img_hair"])
|
66 |
+
print("batch[bald_pose]:", batch["bald_pose"])
|
67 |
+
print("batch[img_non_hair]:", batch["img_non_hair"])
|
68 |
+
print("batch[ref_hair]:", batch["ref_hair"])
|
utils/pipeline.py
ADDED
The diff for this file is too large to render.
See raw diff
|
|