Gary3410 commited on
Commit
11091e3
·
1 Parent(s): a9248a7

Upload 2 files

Browse files
Files changed (2) hide show
  1. app.py +29 -10
  2. object_id.json +0 -0
app.py CHANGED
@@ -6,9 +6,12 @@ import trimesh
6
  import huggingface_hub
7
  from huggingface_hub import Repository
8
  import os
 
 
 
9
 
10
  DATASET_REPO_URL = "https://huggingface.co/datasets/Gary3410/object_tmp"
11
- DATA_FILENAME = "object.npy"
12
  DATA_FILE = os.path.join("data", DATA_FILENAME)
13
 
14
 
@@ -101,11 +104,28 @@ def trimesh_to_pc(scene_or_mesh):
101
 
102
  processes = multiprocessing.cpu_count()
103
 
104
- uids = objaverse.load_uids()
105
- random_object_uids = random.sample(uids, 100)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
106
 
107
  objects = objaverse.load_objects(
108
- uids=random_object_uids,
109
  download_processes=processes
110
  )
111
 
@@ -113,19 +133,18 @@ repo = Repository(
113
  local_dir="data", clone_from=DATASET_REPO_URL, use_auth_token="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"
114
  )
115
 
116
- pc_list = []
117
- for objaid in random_object_uids:
118
  objamodel = objaverse.load_objects([objaid])[objaid]
119
  try:
120
  pc = trimesh_to_pc(trimesh.load(objamodel))
121
  pc = random_sampling(pc, num_samples=5000)
122
- pc_list.append(pc.reshape([1, -1, pc.shape[-1]]))
123
  except:
124
  continue
125
 
126
- pc_list = numpy.concatenate(pc_list, axis=0)
127
-
128
- numpy.save(DATA_FILE, pc_list)
129
  commit_url = repo.push_to_hub()
130
 
131
  print("Done")
 
6
  import huggingface_hub
7
  from huggingface_hub import Repository
8
  import os
9
+ import pickle
10
+ import json
11
+ from tqdm import tqdm
12
 
13
  DATASET_REPO_URL = "https://huggingface.co/datasets/Gary3410/object_tmp"
14
+ DATA_FILENAME = "object_tmp.pkl"
15
  DATA_FILE = os.path.join("data", DATA_FILENAME)
16
 
17
 
 
104
 
105
  processes = multiprocessing.cpu_count()
106
 
107
+ # uids = objaverse.load_uids()
108
+ # random_object_uids = random.sample(uids, 100)
109
+ uids = []
110
+ object_id_tmp_dict = {}
111
+
112
+ # 解析json文件
113
+ with open('object_id.json','r') as file:
114
+ str = file.read()
115
+ obj_data = json.loads(str)
116
+
117
+ for ints_key in obj_data.keys():
118
+ ints_dict_one = obj_data[ints_key]
119
+ ints_id_dict = ints_dict_one["obj_id"]
120
+ for ints_one in ints_id_dict.keys():
121
+ uid_one = ints_id_dict[ints_one]
122
+ uids.append(uid_one)
123
+
124
+ uids = list(set(uids))
125
+
126
 
127
  objects = objaverse.load_objects(
128
+ uids=uids,
129
  download_processes=processes
130
  )
131
 
 
133
  local_dir="data", clone_from=DATASET_REPO_URL, use_auth_token="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"
134
  )
135
 
136
+
137
+ for objaid in tqdm(uids):
138
  objamodel = objaverse.load_objects([objaid])[objaid]
139
  try:
140
  pc = trimesh_to_pc(trimesh.load(objamodel))
141
  pc = random_sampling(pc, num_samples=5000)
142
+ object_id_tmp_dict[objaid] = pc
143
  except:
144
  continue
145
 
146
+ with open(DATA_FILE, 'wb') as file:
147
+ pickle.dump(object_id_tmp_dict, file)
 
148
  commit_url = repo.push_to_hub()
149
 
150
  print("Done")
object_id.json ADDED
The diff for this file is too large to render. See raw diff