File size: 3,553 Bytes
258c355
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
from trafilatura import fetch_url, extract, extract_metadata
from datasets import load_dataset, Features, Value, Sequence
from typing import Dict, List, Any
from trafilatura.settings import DEFAULT_CONFIG
from copy import deepcopy

my_config = deepcopy(DEFAULT_CONFIG)
my_config["DEFAULT"]["DOWNLOAD_TIMEOUT"] = "3"
my_config["DEFAULT"]["SLEEP_TIME"] = "0"


def is_target(batch: Dict[str, List]) -> List[bool]:
    result = []
    for tpe, dead, deleted, url in zip(
        batch["type"], batch["dead"], batch["deleted"], batch["url"]
    ):
        if (
            tpe == "story"
            and dead is None
            and deleted is None
            and url is not None
            and len(url) > 0
        ):
            result.append(True)
        else:
            result.append(False)
    return result


def fetch_one(doc: Dict[str, Any]) -> Dict[str, Any]:
    downloaded = fetch_url(doc["url"], config=my_config)
    result = {
        "id": doc["id"],
        "title": None,
        "author": None,
        "markdown": None,
        "downloaded": False,
        "meta_extracted": False,
        "parsed": False,
        "description": None,
        "filedate": None,
        "date": None,
        "image": None,
        "pagetype": None,
        "hostname": None,
        "sitename": None,
        "categories": None,
        "tags": None,
    }
    if downloaded:
        result["downloaded"] = True
        try:
            raw_meta = extract_metadata(downloaded)
            if raw_meta:
                result["meta_extracted"] = True
                meta = raw_meta.as_dict()
                result["title"] = meta.get("title", None)
                result["author"] = meta.get("author", None)
                result["description"] = meta.get("description", None)
                result["filedate"] = meta.get("filedate", None)
                result["date"] = meta.get("date", None)
                result["image"] = meta.get("image", None)
                result["pagetype"] = meta.get("pagetype", None)
                result["hostname"] = meta.get("hostname", None)
                result["sitename"] = meta.get("sitename", None)
                md = extract(downloaded, output_format="markdown", with_metadata=False)
                if md:
                    result["parsed"] = True
                    result["markdown"] = md
        except Exception:
            print("failed to extract metadata")
    return result


if __name__ == "__main__":
    ds = load_dataset("nixiesearch/hackernews-comments", split="train", num_proc=16)
    ds = ds.filter(is_target, num_proc=32, batched=True, desc="selecting stories")
    ds = ds.select_columns(["id", "url"]).shuffle()
    schema = Features(
        {
            "id": Value("int64"),
            "url": Value("string"),
            "title": Value("string"),
            "author": Value("string"),
            "markdown": Value("string"),
            "downloaded": Value("bool"),
            "meta_extracted": Value("bool"),
            "parsed": Value("bool"),
            "description": Value("string"),
            "filedate": Value("string"),
            "date": Value("string"),
            "image": Value("string"),
            "pagetype": Value("string"),
            "hostname": Value("string"),
            "sitename": Value("string"),
            "categories": Sequence(Value("string")),
            "tags": Sequence(Value("string")),
        }
    )
    ds = ds.map(fetch_one, num_proc=128, desc="downloading", features=schema)
    ds.save_to_disk("/tmp/hnstories")