|
from trafilatura import fetch_url, extract, extract_metadata |
|
from datasets import load_dataset, Features, Value, Sequence |
|
from typing import Dict, List, Any |
|
from trafilatura.settings import DEFAULT_CONFIG |
|
from copy import deepcopy |
|
|
|
my_config = deepcopy(DEFAULT_CONFIG) |
|
my_config["DEFAULT"]["DOWNLOAD_TIMEOUT"] = "3" |
|
my_config["DEFAULT"]["SLEEP_TIME"] = "0" |
|
|
|
|
|
def is_target(batch: Dict[str, List]) -> List[bool]: |
|
result = [] |
|
for tpe, dead, deleted, url in zip( |
|
batch["type"], batch["dead"], batch["deleted"], batch["url"] |
|
): |
|
if ( |
|
tpe == "story" |
|
and dead is None |
|
and deleted is None |
|
and url is not None |
|
and len(url) > 0 |
|
): |
|
result.append(True) |
|
else: |
|
result.append(False) |
|
return result |
|
|
|
|
|
def fetch_one(doc: Dict[str, Any]) -> Dict[str, Any]: |
|
downloaded = fetch_url(doc["url"], config=my_config) |
|
result = { |
|
"id": doc["id"], |
|
"title": None, |
|
"author": None, |
|
"markdown": None, |
|
"downloaded": False, |
|
"meta_extracted": False, |
|
"parsed": False, |
|
"description": None, |
|
"filedate": None, |
|
"date": None, |
|
"image": None, |
|
"pagetype": None, |
|
"hostname": None, |
|
"sitename": None, |
|
"categories": None, |
|
"tags": None, |
|
} |
|
if downloaded: |
|
result["downloaded"] = True |
|
try: |
|
raw_meta = extract_metadata(downloaded) |
|
if raw_meta: |
|
result["meta_extracted"] = True |
|
meta = raw_meta.as_dict() |
|
result["title"] = meta.get("title", None) |
|
result["author"] = meta.get("author", None) |
|
result["description"] = meta.get("description", None) |
|
result["filedate"] = meta.get("filedate", None) |
|
result["date"] = meta.get("date", None) |
|
result["image"] = meta.get("image", None) |
|
result["pagetype"] = meta.get("pagetype", None) |
|
result["hostname"] = meta.get("hostname", None) |
|
result["sitename"] = meta.get("sitename", None) |
|
md = extract(downloaded, output_format="markdown", with_metadata=False) |
|
if md: |
|
result["parsed"] = True |
|
result["markdown"] = md |
|
except Exception: |
|
print("failed to extract metadata") |
|
return result |
|
|
|
|
|
if __name__ == "__main__": |
|
ds = load_dataset("nixiesearch/hackernews-comments", split="train", num_proc=16) |
|
ds = ds.filter(is_target, num_proc=32, batched=True, desc="selecting stories") |
|
ds = ds.select_columns(["id", "url"]).shuffle() |
|
schema = Features( |
|
{ |
|
"id": Value("int64"), |
|
"url": Value("string"), |
|
"title": Value("string"), |
|
"author": Value("string"), |
|
"markdown": Value("string"), |
|
"downloaded": Value("bool"), |
|
"meta_extracted": Value("bool"), |
|
"parsed": Value("bool"), |
|
"description": Value("string"), |
|
"filedate": Value("string"), |
|
"date": Value("string"), |
|
"image": Value("string"), |
|
"pagetype": Value("string"), |
|
"hostname": Value("string"), |
|
"sitename": Value("string"), |
|
"categories": Sequence(Value("string")), |
|
"tags": Sequence(Value("string")), |
|
} |
|
) |
|
ds = ds.map(fetch_one, num_proc=128, desc="downloading", features=schema) |
|
ds.save_to_disk("/tmp/hnstories") |
|
|