tungvu3196 commited on
Commit
9ee0952
·
verified ·
1 Parent(s): dd617ab

Delete vlm-projects-multi-lang-final.py

Browse files
Files changed (1) hide show
  1. vlm-projects-multi-lang-final.py +0 -97
vlm-projects-multi-lang-final.py DELETED
@@ -1,97 +0,0 @@
1
- # dataset.py
2
- import os
3
- import pandas as pd
4
- import datasets
5
-
6
- _DESCRIPTION = "A multilingual medical imaging dataset with questions and answers, structured by language."
7
- _HOMEPAGE = "https://huggingface.co/datasets/tungvu3196/vlm-projects-multi-lang-final"
8
- _LICENSE = "apache-2.0"
9
- _CITATION = ""
10
-
11
- LANGUAGES = [
12
- "English","Vietnamese","French","German","Spanish","Russian","Korean",
13
- "Mandarin","Japanese","Thai","Indonesian","Malay","Arabic","Hindi",
14
- "Turkish","Portuguese"
15
- ]
16
-
17
- class VlmProjectsMultiLangFinal(datasets.GeneratorBasedBuilder):
18
- BUILDER_CONFIGS = [
19
- datasets.BuilderConfig(
20
- name=lang_name,
21
- version=datasets.Version("1.0.0"),
22
- description=f"Dataset in {lang_name}",
23
- )
24
- for lang_name in LANGUAGES
25
- ]
26
-
27
- def _info(self):
28
- return datasets.DatasetInfo(
29
- description=_DESCRIPTION,
30
- homepage=_HOMEPAGE,
31
- license=_LICENSE,
32
- citation=_CITATION,
33
- features=datasets.Features({
34
- "A1": datasets.Value("string"),
35
- "A2": datasets.Value("string"),
36
- "A3": datasets.Value("string"),
37
- "A4": datasets.Value("string"),
38
- "Bbox coordinates normalized (X, Y, W, H)": datasets.Value("string"),
39
- "Column 9": datasets.Value("float64"),
40
- "Deliverable": datasets.Value("string"),
41
- "Doctor": datasets.Value("string"),
42
- "Google Drive Link": datasets.Value("string"),
43
- "No.": datasets.Value("int64"),
44
- "Notes": datasets.Value("string"),
45
- "Original": datasets.Value("string"),
46
- "Patient ID": datasets.Value("string"),
47
- "Q1": datasets.Value("string"),
48
- "Q2": datasets.Value("string"),
49
- "Q3": datasets.Value("string"),
50
- "Q4": datasets.Value("string"),
51
- "Remove Status": datasets.Value("string"),
52
- "Slide": datasets.Value("string"),
53
- "Start date": datasets.Value("float64"),
54
- "Status": datasets.Value("string"),
55
- "__index_level_0__": datasets.Value("int64"),
56
- # These two will render in the Viewer if the underlying files exist in the repo:
57
- "image": datasets.Image(), # path or dict -> file in repo
58
- "image_with_bboxes": datasets.Image(),
59
- # keep as string/URL if it's not a local file:
60
- "rotated_link": datasets.Value("string"),
61
- }),
62
- )
63
-
64
- def _split_generators(self, dl_manager):
65
- # Map config name ("English") to folder ("english")
66
- lang_dir = self.config.name.lower()
67
- base = os.path.join(self.config.data_dir or "data", lang_dir)
68
- return [
69
- datasets.SplitGenerator(
70
- name=datasets.Split.TRAIN,
71
- gen_kwargs={"filepath": os.path.join(base, "train.parquet"),
72
- "base_dir": base},
73
- ),
74
- datasets.SplitGenerator(
75
- name=datasets.Split.TEST,
76
- gen_kwargs={"filepath": os.path.join(base, "test.parquet"),
77
- "base_dir": base},
78
- ),
79
- ]
80
-
81
- def _generate_examples(self, filepath, base_dir):
82
- # Read parquet produced by your pipeline
83
- df = pd.read_parquet(filepath)
84
-
85
- for i, row in df.iterrows():
86
- ex = row.to_dict()
87
-
88
- # If parquet stored relative paths like "images/xyz.png", keep them relative to repo:
89
- for col in ("image", "image_with_bboxes"):
90
- p = ex.get(col)
91
- if isinstance(p, str) and len(p):
92
- # If the path isn't an URL, make it relative to the dataset files
93
- if not (p.startswith("http://") or p.startswith("https://")):
94
- ex[col] = os.path.join(base_dir, p).replace("\\", "/")
95
- # if it *is* a URL, leave as-is (Image will try to download)
96
-
97
- yield i, ex