Spaces:
Sleeping
Sleeping
add dataset card
Browse files
app.py
CHANGED
@@ -13,7 +13,7 @@ import requests
|
|
13 |
from datasets import Dataset, Features, Value, Sequence
|
14 |
from datasets.fingerprint import Hasher
|
15 |
from gradio_huggingfacehub_search import HuggingfaceHubSearch
|
16 |
-
from huggingface_hub import InferenceClient
|
17 |
|
18 |
from utils import StringIteratorIO
|
19 |
|
@@ -26,6 +26,7 @@ session = requests.Session()
|
|
26 |
empty_dataframe = pd.DataFrame({"1": [], "2": [], "3": []})
|
27 |
|
28 |
NAMESPACE = "dataset-rewriter"
|
|
|
29 |
|
30 |
NUM_ROWS_PREVIEW = 3
|
31 |
MAX_NUM_ROWS_TO_REWRITE = 1000
|
@@ -55,6 +56,26 @@ FIND_NEW_NAME = (
|
|
55 |
"Here is your first job: rephrase the sentence 'Take this dataset and apply the instruction \"{prompt}\"'"
|
56 |
)
|
57 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
58 |
css = """
|
59 |
.settings {
|
60 |
background: transparent;
|
@@ -163,9 +184,7 @@ with gr.Blocks(css=css) as demo:
|
|
163 |
prompt=prompt,
|
164 |
)}]
|
165 |
response_format = {"type": "json", "value": {"properties": {"data": {"type": "array", "items": format}}, "required": ["data"]}}
|
166 |
-
print(f"Streaming preview of {dataset} with instruction '{prompt}'")
|
167 |
yield from ijson.items(StringIteratorIO(stream_reponse(messages, response_format=response_format)), "data.item", buf_size=4)
|
168 |
-
print(f"Done streaming preview of {dataset} with instruction '{prompt}'")
|
169 |
|
170 |
|
171 |
def stream_rewrite_dataset_row_by_row(dataset: str, rows: list[dict[str, str]], prompt: str, format: str, input_preview_rows: list[dict[str, str]], output_preview_rows: list[dict[str, str]]) -> Iterator[dict[str, str]]:
|
@@ -305,7 +324,7 @@ with gr.Blocks(css=css) as demo:
|
|
305 |
format = output_format_df.to_dict(orient="records")
|
306 |
format = {"properties": {x["column"]: json.loads(x["type"]) for x in format}, "required": [x["column"] for x in format]}
|
307 |
output_rows = []
|
308 |
-
print(f"ReWriting {dataset}
|
309 |
yield {rewrite_full_dataset_button: gr.Button(interactive=False), full_dataset_generation_label: gr.Label(visible=False)}
|
310 |
yield {
|
311 |
pretty_output_preview: gr.DataFrame(visible=True),
|
@@ -315,6 +334,7 @@ with gr.Blocks(css=css) as demo:
|
|
315 |
output_rows.append({k: json.dumps(row[k], ensure_ascii=False) for k in output_format_df["column"]})
|
316 |
yield {pretty_output_preview: gr.DataFrame(pd.DataFrame(output_rows))}
|
317 |
yield {rewrite_full_dataset_button: gr.Button(interactive=True)}
|
|
|
318 |
|
319 |
|
320 |
@rewrite_full_dataset_button.click(inputs=[dataset_search, subset_dropdown, split_dropdown, pretty_input_preview, pretty_output_preview, input_prompt, output_format_dataframe, dataset_info_json, select_namespace_dropdown], outputs=[full_dataset_generation_label, full_dataset_generation_success_markdown, pretty_output_preview, pretty_full_dataset_generation_output])
|
@@ -325,7 +345,7 @@ with gr.Blocks(css=css) as demo:
|
|
325 |
format = {"properties": {x["column"]: json.loads(x["type"]) for x in format}, "required": [x["column"] for x in format]}
|
326 |
num_examples = dataset_info["splits"][split]["num_examples"]
|
327 |
total = min(num_examples, MAX_NUM_ROWS_TO_REWRITE)
|
328 |
-
print(f"ReWriting {dataset}
|
329 |
yield {full_dataset_generation_label: gr.Label({f"⚙️ ReWriting {dataset}": 0.}, visible=True)}
|
330 |
yield {pretty_full_dataset_generation_output: empty_dataframe}
|
331 |
yield {
|
@@ -357,15 +377,17 @@ with gr.Blocks(css=css) as demo:
|
|
357 |
full_dataset_generation_label: gr.Label({f"⚙️ ReWriting {dataset}": current / total}),
|
358 |
pretty_full_dataset_generation_output: gr.DataFrame(pd.DataFrame([row for rows in parallel_output_rows for row in rows]))
|
359 |
}
|
360 |
-
print(f"Done ReWriting {dataset}
|
361 |
|
362 |
output_rows = [{k: json.loads(row[k]) for k in output_format_df["column"]} for rows in parallel_output_rows for row in rows]
|
363 |
-
|
|
|
364 |
yield {full_dataset_generation_label: gr.Label({f"✅ ReWriting {dataset}": len(output_rows) / total, f"⚙️ Saving to {repo_id}": 0.})}
|
365 |
token = oauth_token.token if oauth_token else save_dataset_hf_token
|
366 |
print(f"Saving {repo_id}")
|
367 |
ds = Dataset.from_list(output_rows)
|
368 |
ds.push_to_hub(repo_id, config_name=subset, split=split, token=token)
|
|
|
369 |
yield {full_dataset_generation_label: gr.Label({f"✅ ReWriting {dataset}": len(output_rows) / total, f"✅ Saving to {repo_id}": 1.})}
|
370 |
yield {full_dataset_generation_success_markdown: f"# Open the ReWriten dataset in a new tab: [{repo_id}](https://huggingface.co/datasets/{repo_id})"}
|
371 |
print(f"Saved {repo_id}")
|
|
|
13 |
from datasets import Dataset, Features, Value, Sequence
|
14 |
from datasets.fingerprint import Hasher
|
15 |
from gradio_huggingfacehub_search import HuggingfaceHubSearch
|
16 |
+
from huggingface_hub import DatasetCard, InferenceClient
|
17 |
|
18 |
from utils import StringIteratorIO
|
19 |
|
|
|
26 |
empty_dataframe = pd.DataFrame({"1": [], "2": [], "3": []})
|
27 |
|
28 |
NAMESPACE = "dataset-rewriter"
|
29 |
+
URL = "https://huggingface.co/spaces/dataset-rewrite/dataset-rewrite"
|
30 |
|
31 |
NUM_ROWS_PREVIEW = 3
|
32 |
MAX_NUM_ROWS_TO_REWRITE = 1000
|
|
|
56 |
"Here is your first job: rephrase the sentence 'Take this dataset and apply the instruction \"{prompt}\"'"
|
57 |
)
|
58 |
|
59 |
+
|
60 |
+
DATASET_CARD_CONTENT = """
|
61 |
+
---
|
62 |
+
license: mit
|
63 |
+
tags:
|
64 |
+
- dataset-rewriter
|
65 |
+
- synthetic
|
66 |
+
---
|
67 |
+
|
68 |
+
# {new_dataset}
|
69 |
+
|
70 |
+
_Note: This is an AI-generated dataset so its content may be inaccurate or false_
|
71 |
+
|
72 |
+
**Source of the data:**
|
73 |
+
The dataset was generated using the [Dataset ReWriter]({url}) and {model_id} from the dataset {dataset} and using the prompt '{prompt}':
|
74 |
+
- **Original Dataset**: https://huggingface.co/datasets/{dataset}
|
75 |
+
- **Model**: https://huggingface.co/{model_id}
|
76 |
+
- **More Datasets**: https://huggingface.co/datasets?other=dataset-rewriter
|
77 |
+
"""
|
78 |
+
|
79 |
css = """
|
80 |
.settings {
|
81 |
background: transparent;
|
|
|
184 |
prompt=prompt,
|
185 |
)}]
|
186 |
response_format = {"type": "json", "value": {"properties": {"data": {"type": "array", "items": format}}, "required": ["data"]}}
|
|
|
187 |
yield from ijson.items(StringIteratorIO(stream_reponse(messages, response_format=response_format)), "data.item", buf_size=4)
|
|
|
188 |
|
189 |
|
190 |
def stream_rewrite_dataset_row_by_row(dataset: str, rows: list[dict[str, str]], prompt: str, format: str, input_preview_rows: list[dict[str, str]], output_preview_rows: list[dict[str, str]]) -> Iterator[dict[str, str]]:
|
|
|
324 |
format = output_format_df.to_dict(orient="records")
|
325 |
format = {"properties": {x["column"]: json.loads(x["type"]) for x in format}, "required": [x["column"] for x in format]}
|
326 |
output_rows = []
|
327 |
+
print(f"(preview) ReWriting {dataset} with instruction '{prompt}'")
|
328 |
yield {rewrite_full_dataset_button: gr.Button(interactive=False), full_dataset_generation_label: gr.Label(visible=False)}
|
329 |
yield {
|
330 |
pretty_output_preview: gr.DataFrame(visible=True),
|
|
|
334 |
output_rows.append({k: json.dumps(row[k], ensure_ascii=False) for k in output_format_df["column"]})
|
335 |
yield {pretty_output_preview: gr.DataFrame(pd.DataFrame(output_rows))}
|
336 |
yield {rewrite_full_dataset_button: gr.Button(interactive=True)}
|
337 |
+
print(f"(preview) Done ReWriting {dataset} with instruction '{prompt}'")
|
338 |
|
339 |
|
340 |
@rewrite_full_dataset_button.click(inputs=[dataset_search, subset_dropdown, split_dropdown, pretty_input_preview, pretty_output_preview, input_prompt, output_format_dataframe, dataset_info_json, select_namespace_dropdown], outputs=[full_dataset_generation_label, full_dataset_generation_success_markdown, pretty_output_preview, pretty_full_dataset_generation_output])
|
|
|
345 |
format = {"properties": {x["column"]: json.loads(x["type"]) for x in format}, "required": [x["column"] for x in format]}
|
346 |
num_examples = dataset_info["splits"][split]["num_examples"]
|
347 |
total = min(num_examples, MAX_NUM_ROWS_TO_REWRITE)
|
348 |
+
print(f"ReWriting {dataset} with instruction '{prompt}'")
|
349 |
yield {full_dataset_generation_label: gr.Label({f"⚙️ ReWriting {dataset}": 0.}, visible=True)}
|
350 |
yield {pretty_full_dataset_generation_output: empty_dataframe}
|
351 |
yield {
|
|
|
377 |
full_dataset_generation_label: gr.Label({f"⚙️ ReWriting {dataset}": current / total}),
|
378 |
pretty_full_dataset_generation_output: gr.DataFrame(pd.DataFrame([row for rows in parallel_output_rows for row in rows]))
|
379 |
}
|
380 |
+
print(f"Done ReWriting {dataset} with instruction '{prompt}'")
|
381 |
|
382 |
output_rows = [{k: json.loads(row[k]) for k in output_format_df["column"]} for rows in parallel_output_rows for row in rows]
|
383 |
+
new_dataset = find_new_name(dataset + (PARTIAL_SUFFIX if num_examples > total else ""), prompt)
|
384 |
+
repo_id = namespace + "/" + new_dataset
|
385 |
yield {full_dataset_generation_label: gr.Label({f"✅ ReWriting {dataset}": len(output_rows) / total, f"⚙️ Saving to {repo_id}": 0.})}
|
386 |
token = oauth_token.token if oauth_token else save_dataset_hf_token
|
387 |
print(f"Saving {repo_id}")
|
388 |
ds = Dataset.from_list(output_rows)
|
389 |
ds.push_to_hub(repo_id, config_name=subset, split=split, token=token)
|
390 |
+
DatasetCard(DATASET_CARD_CONTENT.format(new_dataset=new_dataset, dataset=dataset, model_id=model_id, prompt=prompt, url=URL)).push_to_hub(repo_id=repo_id, repo_type="dataset", token=token)
|
391 |
yield {full_dataset_generation_label: gr.Label({f"✅ ReWriting {dataset}": len(output_rows) / total, f"✅ Saving to {repo_id}": 1.})}
|
392 |
yield {full_dataset_generation_success_markdown: f"# Open the ReWriten dataset in a new tab: [{repo_id}](https://huggingface.co/datasets/{repo_id})"}
|
393 |
print(f"Saved {repo_id}")
|