Datasets:

Modalities:
Text
Formats:
parquet
ArXiv:
Libraries:
Datasets
pandas
License:
File size: 1,470 Bytes
da1a91a
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
from bs4 import BeautifulSoup
from datasets import load_dataset

def get_titles(file_path):
    # Get titles from html file (html file is downloaded from https://en.wikipedia.org/wiki/Wikipedia:Featured_articles)
    with open(file_path, 'r') as f:
        html_content = f.read()
    soup = BeautifulSoup(html_content, 'html.parser')
    div = soup.find_all('div', attrs={'class': 'wp-fa-contents'})
    titles = []
    for d in div:
        if d is None or d.find('a') is None:
            continue
        a_tags = d.find_all('a')
        for a_tag in a_tags:
            if a_tag and 'title' in a_tag.attrs:
                titles.append(a_tag['title'])
    return titles


if __name__ == '__main__':
    titles = get_titles('featured.html')
    titles = list(set(titles))
    
    # Get wikipedia dataset
    dataset = load_dataset("graelo/wikipedia", "20230901.en", split="train")
    # Filter dataset
    dataset = dataset.filter(lambda example: example['title'] in titles, num_proc=64)
    dataset.map(lambda x: {'text': f"# {x['title']}\n\n{x['text']}"}, remove_columns=['title'], num_proc=64)
    # Save dataset
    used_title = [example['title'] for example in dataset]
    non_used_title = [title for title in titles if title not in used_title]
    print(f'Number of used titles: {len(used_title)}')
    print(f'Number of non used titles: {len(non_used_title)}')
    print(non_used_title[:20])
    dataset.push_to_hub("LeoLM/wiki_en_featured", private=True)