defjam commited on
Commit
9c39f29
·
1 Parent(s): b63d9a9

Initial commit

Browse files
Files changed (4) hide show
  1. conditioning_images.zip +3 -0
  2. images.zip +3 -0
  3. testdataset.py +101 -0
  4. train.jsonl +19 -0
conditioning_images.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:74a6dde1abd9534efb52b289b36286cc8ad998df00a3930da2853a5087e197f0
3
+ size 260754
images.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0aa05cfd16da3809b4f6d97a853108370f32aa27a42cb9728ecdc54c327dc4eb
3
+ size 7920242
testdataset.py ADDED
@@ -0,0 +1,101 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import pandas as pd
2
+ from huggingface_hub import hf_hub_url
3
+ import datasets
4
+ import os
5
+
6
+ _VERSION = datasets.Version("1.0.0")
7
+
8
+ _DESCRIPTION = "TODO"
9
+ _HOMEPAGE = "TODO"
10
+ _LICENSE = "TODO"
11
+ _CITATION = "TODO"
12
+
13
+ _FEATURES = datasets.Features(
14
+ {
15
+ "image": datasets.Image(),
16
+ "conditioning_image": datasets.Image(),
17
+ "text": datasets.Value("string"),
18
+ },
19
+ )
20
+
21
+ METADATA_URL = hf_hub_url(
22
+ "defjam/testdataset",
23
+ filename="train.jsonl",
24
+ repo_type="dataset",
25
+ )
26
+
27
+ IMAGES_URL = hf_hub_url(
28
+ "defjam/testdataset",
29
+ filename="images.zip",
30
+ repo_type="dataset",
31
+ )
32
+
33
+ CONDITIONING_IMAGES_URL = hf_hub_url(
34
+ "defjam/testdataset",
35
+ filename="conditioning_images.zip",
36
+ repo_type="dataset",
37
+ )
38
+
39
+ _DEFAULT_CONFIG = datasets.BuilderConfig(name="default", version=_VERSION)
40
+
41
+
42
+ class Fill50k(datasets.GeneratorBasedBuilder):
43
+ BUILDER_CONFIGS = [_DEFAULT_CONFIG]
44
+ DEFAULT_CONFIG_NAME = "default"
45
+
46
+ def _info(self):
47
+ return datasets.DatasetInfo(
48
+ description=_DESCRIPTION,
49
+ features=_FEATURES,
50
+ supervised_keys=None,
51
+ homepage=_HOMEPAGE,
52
+ license=_LICENSE,
53
+ citation=_CITATION,
54
+ )
55
+
56
+ def _split_generators(self, dl_manager):
57
+ metadata_path = dl_manager.download(METADATA_URL)
58
+ images_dir = dl_manager.download_and_extract(IMAGES_URL)
59
+ conditioning_images_dir = dl_manager.download_and_extract(
60
+ CONDITIONING_IMAGES_URL
61
+ )
62
+
63
+ return [
64
+ datasets.SplitGenerator(
65
+ name=datasets.Split.TRAIN,
66
+ # These kwargs will be passed to _generate_examples
67
+ gen_kwargs={
68
+ "metadata_path": metadata_path,
69
+ "images_dir": images_dir,
70
+ "conditioning_images_dir": conditioning_images_dir,
71
+ },
72
+ ),
73
+ ]
74
+
75
+ def _generate_examples(self, metadata_path, images_dir, conditioning_images_dir):
76
+ metadata = pd.read_json(metadata_path, lines=True)
77
+
78
+ for _, row in metadata.iterrows():
79
+ text = row["text"]
80
+
81
+ image_path = row["image"]
82
+ image_path = os.path.join(images_dir, image_path)
83
+ image = open(image_path, "rb").read()
84
+
85
+ conditioning_image_path = row["conditioning_image"]
86
+ conditioning_image_path = os.path.join(
87
+ conditioning_images_dir, row["conditioning_image"]
88
+ )
89
+ conditioning_image = open(conditioning_image_path, "rb").read()
90
+
91
+ yield row["image"], {
92
+ "text": text,
93
+ "image": {
94
+ "path": image_path,
95
+ "bytes": image,
96
+ },
97
+ "conditioning_image": {
98
+ "path": conditioning_image_path,
99
+ "bytes": conditioning_image,
100
+ },
101
+ }
train.jsonl ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {"text": "a rcn coat", "image": "images/1.png", "conditioning_image": "conditioning_images/1.png"}
2
+ {"text": "a rcn coat", "image": "images/2.png", "conditioning_image": "conditioning_images/2.png"}
3
+ {"text": "a rcn coat", "image": "images/3.png", "conditioning_image": "conditioning_images/3.png"}
4
+ {"text": "a rcn coat", "image": "images/4.png", "conditioning_image": "conditioning_images/4.png"}
5
+ {"text": "a rcn coat", "image": "images/5.png", "conditioning_image": "conditioning_images/5.png"}
6
+ {"text": "a rcn coat", "image": "images/6.png", "conditioning_image": "conditioning_images/6.png"}
7
+ {"text": "a rcn coat", "image": "images/7.png", "conditioning_image": "conditioning_images/7.png"}
8
+ {"text": "a rcn coat", "image": "images/8.png", "conditioning_image": "conditioning_images/8.png"}
9
+ {"text": "a rcn coat", "image": "images/9.png", "conditioning_image": "conditioning_images/9.png"}
10
+ {"text": "a rcn coat", "image": "images/10.png", "conditioning_image": "conditioning_images/10.png"}
11
+ {"text": "a rcn coat", "image": "images/11.png", "conditioning_image": "conditioning_images/11.png"}
12
+ {"text": "a rcn coat", "image": "images/12.png", "conditioning_image": "conditioning_images/12.png"}
13
+ {"text": "a rcn coat", "image": "images/13.png", "conditioning_image": "conditioning_images/13.png"}
14
+ {"text": "a rcn coat", "image": "images/14.png", "conditioning_image": "conditioning_images/14.png"}
15
+ {"text": "a rcn coat", "image": "images/15.png", "conditioning_image": "conditioning_images/15.png"}
16
+ {"text": "a rcn coat", "image": "images/16.png", "conditioning_image": "conditioning_images/16.png"}
17
+ {"text": "a rcn coat", "image": "images/17.png", "conditioning_image": "conditioning_images/17.png"}
18
+ {"text": "a rcn coat", "image": "images/18.png", "conditioning_image": "conditioning_images/18.png"}
19
+ {"text": "a rcn coat", "image": "images/19.png", "conditioning_image": "conditioning_images/19.png"}