Datasets:
QCRI
/

Modalities:
Image
Text
Formats:
parquet
Languages:
Arabic
ArXiv:
Libraries:
Datasets
pandas
License:
File size: 2,102 Bytes
772bf5d
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
import os
import json
import datasets
from datasets import Dataset, DatasetDict, load_dataset, Features, Value, Image, ClassLabel


# Define the paths to your dataset
image_root_dir = "./"
train_jsonl_file_path = "arabic_memes_categorization_train.jsonl"
dev_jsonl_file_path = "arabic_memes_categorization_dev.jsonl"
test_jsonl_file_path = "arabic_memes_categorization_test.jsonl"



# Function to load each dataset split
def load_armeme_split(jsonl_file_path, image_root_dir):
    texts = []
    images = []
    ids=[]
    class_labels=[]
    image_file_paths = []
    
    # Load JSONL file
    with open(jsonl_file_path, 'r') as f:
        for line in f:
            item = json.loads(line)
            ids.append(item['id'])
            texts.append(item['text'])            
            image_file_path = os.path.join(image_root_dir, item['img_path'])
            images.append(image_file_path)
            image_file_paths.append(image_file_path)
            class_labels.append(item['class_label'])
    
    # Create a dictionary to match the dataset structure
    data_dict = {
        'id':ids,
        'text': texts,
        'image': images,
        'img_path': image_file_paths,
        'class_label': class_labels
    }
    
    # Define the features
    features = Features({
        'id': Value('string'),
        'text': Value('string'),
        'image': Image(),
        'img_path': Value('string'),
        'class_label': ClassLabel(names=['not_propaganda','propaganda','not-meme','other'])
    })
    
    # Create a Hugging Face dataset from the dictionary
    dataset = Dataset.from_dict(data_dict, features=features)
    return dataset

# Load each split
train_dataset = load_armeme_split(train_jsonl_file_path, image_root_dir)
dev_dataset = load_armeme_split(dev_jsonl_file_path, image_root_dir)
test_dataset = load_armeme_split(test_jsonl_file_path, image_root_dir)

# Create a DatasetDict
dataset_dict = DatasetDict({
    'train': train_dataset,
    'dev': dev_dataset,
    'test': test_dataset
})

# Push the dataset to Hugging Face Hub
dataset_dict.push_to_hub("QCRI/ArMeme")