| import os | |
| import re | |
| import sys | |
| import json | |
| import argparse | |
| from datasets import load_dataset | |
| from tqdm import tqdm | |
| import random | |
| from multiprocessing import cpu_count | |
| from pathlib import Path | |
| markdown_prompts = [ | |
| "Please transform the document’s contents into Markdown format.", | |
| "Extract the core information of the document and present it in markdown form.", | |
| "Reconstruct the document in markdown format, paying attention to title hierarchy and list usage.", | |
| "Task: Parse the main body of the document and convert it to markdown. Requirements: Retain the original logical structure, use elements such as titles, lists, and quotes appropriately, and ensure that the output document is clear and easy to read.", | |
| "Reorganize the document using markdown syntax, ensuring clear structure and logical coherence.", | |
| ] | |
| html_table_prompts = [ | |
| "Please encode the table from the image into HTML format.", | |
| "Render the table in the image as HTML code, please.", | |
| "Please transform the table from the image into HTML format.", | |
| "Convert the image’s table data into the HTML structure.", | |
| "Transform the image’s table into the HTML format, please.", | |
| "Convert the table found in the image into HTML format.", | |
| ] | |
| def get_random_prompt(task_type): | |
| prompts = { | |
| "document_parsing": markdown_prompts, | |
| "table_parsing": html_table_prompts, | |
| } | |
| return random.choice(prompts.get(task_type, [""])) | |
| def build_res_batch(item): | |
| idx, img, gt, attr = item["id"], item["image"], item["gt"], item["attributes"] | |
| info = json.loads(attr) | |
| task_type = info.get("task", "unknown") | |
| doc_type = info.get("document_type", "unknown") | |
| save_path = os.path.join(args.image_path, idx + ".png") | |
| if not os.path.exists(save_path): | |
| img.save(save_path, quality=100) | |
| results = { | |
| "images": [str(Path(save_path).resolve())], | |
| "conversations": [ | |
| {"from": "human", "value": get_random_prompt(task_type)}, | |
| {"from": "gpt", "value": gt}, | |
| ], | |
| "attributes": {"document_type": doc_type, "task": task_type}, | |
| } | |
| if "bbox" in item: | |
| bbox = item["bbox"] | |
| results["bbox"] = ( | |
| json.dumps(json.loads(bbox), ensure_ascii=False) if bbox != "" else "" | |
| ) | |
| return results | |
| def main(args): | |
| file_dir = args.input | |
| dataset = load_dataset( | |
| "parquet", | |
| data_files=os.path.join(file_dir, "train-*.parquet"), | |
| split="train", | |
| cache_dir=file_dir, | |
| ) | |
| print(dataset) | |
| os.makedirs(args.image_path, exist_ok=True) | |
| processed = dataset.map( | |
| build_res_batch, | |
| batched=False, | |
| num_proc=32, | |
| remove_columns=dataset.column_names, | |
| desc="Converting to sharegpt format", | |
| ) | |
| df = processed.to_pandas() | |
| data = df.to_dict('records') | |
| for item in data: | |
| item["images"] = item["images"].tolist() | |
| item["conversations"] = item["conversations"].tolist() | |
| with open(args.output, 'w', encoding='utf-8') as f: | |
| json.dump(data, f, ensure_ascii=False, indent=2) | |
| if __name__ == "__main__": | |
| def parse_args(): | |
| parser = argparse.ArgumentParser( | |
| description="Convert parquet format to sharegpt format" | |
| ) | |
| parser.add_argument("--input", type=str, required=True, help="Input directory") | |
| parser.add_argument( | |
| "--output", type=str, required=True, help="Output json file" | |
| ) | |
| parser.add_argument( | |
| "--image_path", required=True, help="output image directory" | |
| ) | |
| return parser.parse_args() | |
| args = parse_args() | |
| main(args) | |