Datasets:
QCRI
/

Modalities:
Image
Text
Formats:
parquet
Languages:
Arabic
ArXiv:
Libraries:
Datasets
pandas
License:
Firoj commited on
Commit
257976b
·
verified ·
1 Parent(s): ec33a96

updated script

Browse files
Files changed (1) hide show
  1. README.md +33 -0
README.md CHANGED
@@ -38,6 +38,39 @@ You can load the dataset using the `datasets` library from Hugging Face:
38
  from datasets import load_dataset
39
 
40
  dataset = load_dataset("QCRI/ArMeme")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
41
  ```
42
 
43
  **Language:** Arabic
 
38
  from datasets import load_dataset
39
 
40
  dataset = load_dataset("QCRI/ArMeme")
41
+
42
+ # Specify the directory where you want to save the dataset
43
+ output_dir="./ArMeme/"
44
+
45
+ # Save the dataset to the specified directory. This will save all splits to the output directory.
46
+ dataset.save_to_disk(output_dir)
47
+
48
+ # If you want to get the raw images from HF dataset format
49
+
50
+ from PIL import Image
51
+ import os
52
+ import json
53
+
54
+ # Directory to save the images
55
+ output_dir="./ArMeme/"
56
+ os.makedirs(output_dir, exist_ok=True)
57
+
58
+ # Assuming the images are in a column named "image"
59
+ image_column = "image"
60
+
61
+ # Iterate over the dataset and save each image
62
+ for split in ['train','dev','test']:
63
+ jsonl_path = os.path.join(output_dir, f"arabic_memes_categorization_{split}.jsonl")
64
+ with open(jsonl_path, 'w', encoding='utf-8') as f:
65
+ for idx, item in enumerate(dataset[split]):
66
+ # Access the image directly as it's already a PIL.Image object
67
+ image = item[image_column]
68
+ image_path = os.path.join(output_dir, item['img_path'])
69
+ # Ensure the directory exists
70
+ os.makedirs(os.path.dirname(image_path), exist_ok=True)
71
+ image.save(image_path)
72
+ del item['image']
73
+ f.write(json.dumps(item, ensure_ascii=False) + '\n')
74
  ```
75
 
76
  **Language:** Arabic