Datasets:
QCRI
/

Modalities:
Image
Text
Formats:
parquet
Languages:
Arabic
ArXiv:
Libraries:
Datasets
pandas
License:
Firoj commited on
Commit
263a074
·
verified ·
1 Parent(s): 998ef9a

Upload armeme_loader.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. armeme_loader.py +49 -0
armeme_loader.py ADDED
@@ -0,0 +1,49 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import json
3
+ import datasets
4
+ from datasets import Dataset, DatasetDict, load_dataset, Features, Value, Image
5
+
6
+ # Define the paths to your dataset
7
+ image_root_dir = "./"
8
+ train_jsonl_file_path = "arabic_memes_categorization_train.jsonl"
9
+ dev_jsonl_file_path = "arabic_memes_categorization_dev.jsonl"
10
+ test_jsonl_file_path = "arabic_memes_categorization_test.jsonl"
11
+
12
+ # Define features for the dataset
13
+ features = Features({
14
+ 'id': Value('string'),
15
+ 'text': Value('string'),
16
+ 'image': Image(),
17
+ 'img_path': Value('string')
18
+ })
19
+
20
+ # Function to load each dataset split
21
+ def load_armeme_split(jsonl_file_path, image_root_dir):
22
+ data = []
23
+
24
+ # Load JSONL file
25
+ with open(jsonl_file_path, 'r') as f:
26
+ for line in f:
27
+ item = json.loads(line)
28
+ # Update image path to absolute path
29
+ item['img_path'] = os.path.join(image_root_dir, item['img_path'])
30
+ data.append(item)
31
+
32
+ # Create a Hugging Face dataset
33
+ dataset = Dataset.from_dict(data, features=features)
34
+ return dataset
35
+
36
+ # Load each split
37
+ train_dataset = load_armeme_split(train_jsonl_file_path, image_root_dir)
38
+ dev_dataset = load_armeme_split(dev_jsonl_file_path, image_root_dir)
39
+ test_dataset = load_armeme_split(test_jsonl_file_path, image_root_dir)
40
+
41
+ # Create a DatasetDict
42
+ dataset_dict = DatasetDict({
43
+ 'train': train_dataset,
44
+ 'dev': dev_dataset,
45
+ 'test': test_dataset
46
+ })
47
+
48
+ # Push the dataset to Hugging Face Hub
49
+ dataset_dict.push_to_hub("QCRI/ArMeme", license="CC-By-NC-SA-4.0")