Tunisian_Language_Dataset / preprocess.py
azayz
add scripts to reproduce
4ce09e7
import re
from collections import Counter
import pandas as pd
from datasketch import MinHash, MinHashLSH
from lingua import Language, LanguageDetectorBuilder
# Initialize variables for statistics
word_count = Counter()
longest_sentence = ""
shortest_sentence = None
total_sentences = 0
all_sentences = []
def tokenize(text):
"""
Clean and split text into words.
"""
# Remove punctuation and split by whitespace
words = re.findall(r'\b\w+\b', text.lower())
return words
# Open the file and process line by line
with open('tunisian_data.txt', 'r') as file:
for line in file:
# Strip leading/trailing whitespace
line = line.strip()
# Skip empty lines
if not line:
continue
# Split the line into sentences (using '.', '!', or '?' as delimiters)
sentences = re.split(r'[.!?]', line)
for sentence in sentences:
sentence = sentence.strip()
if sentence:
all_sentences.append(sentence)
total_sentences += 1
# Update longest and shortest sentences
if len(sentence) > len(longest_sentence):
longest_sentence = sentence
if shortest_sentence is None or len(sentence) < len(shortest_sentence):
shortest_sentence = sentence
# Tokenize and count words
words = tokenize(sentence)
word_count.update(words)
# Get the most common words
most_common_words = word_count.most_common(10)
print(f"Most Common Words: {most_common_words}")
def get_minhash(text, num_perm=128):
"""
Generate a MinHash for a given text.
"""
tokens = set(text.split())
m = MinHash(num_perm=num_perm)
for token in tokens:
m.update(token.encode('utf8'))
return m
def minhash_deduplication(docs, threshold=0.8, num_perm=128):
"""
Remove near-duplicate documents using MinHash LSH.
"""
lsh = MinHashLSH(threshold=threshold, num_perm=num_perm)
unique_docs = []
for i, doc in enumerate(docs):
m = get_minhash(doc, num_perm=num_perm)
if not lsh.query(m): # Check if the document is a near duplicate
lsh.insert(i, m)
unique_docs.append(doc)
return unique_docs
unique_docs = minhash_deduplication(all_sentences, threshold=0.8)
print(f"Number of unique documents: {len(unique_docs)}")
# Language detection
detector = LanguageDetectorBuilder.from_languages(*Language.all()).build()
labels = []
cleaned_text = []
for s in unique_docs:
l = detector.detect_language_of(s)
if not l:
print(f"Could not detect language for sentence: {s}")
else:
labels.append(l.name)
cleaned_text.append(s)
# Create a DataFrame with the cleaned text
df = pd.DataFrame({'text': cleaned_text})