|
import re |
|
from collections import Counter |
|
|
|
import pandas as pd |
|
from datasketch import MinHash, MinHashLSH |
|
from lingua import Language, LanguageDetectorBuilder |
|
|
|
|
|
word_count = Counter() |
|
longest_sentence = "" |
|
shortest_sentence = None |
|
total_sentences = 0 |
|
all_sentences = [] |
|
|
|
|
|
def tokenize(text): |
|
""" |
|
Clean and split text into words. |
|
""" |
|
|
|
words = re.findall(r'\b\w+\b', text.lower()) |
|
return words |
|
|
|
|
|
|
|
with open('tunisian_data.txt', 'r') as file: |
|
for line in file: |
|
|
|
line = line.strip() |
|
|
|
|
|
if not line: |
|
continue |
|
|
|
|
|
sentences = re.split(r'[.!?]', line) |
|
|
|
for sentence in sentences: |
|
sentence = sentence.strip() |
|
if sentence: |
|
all_sentences.append(sentence) |
|
total_sentences += 1 |
|
|
|
|
|
if len(sentence) > len(longest_sentence): |
|
longest_sentence = sentence |
|
if shortest_sentence is None or len(sentence) < len(shortest_sentence): |
|
shortest_sentence = sentence |
|
|
|
|
|
words = tokenize(sentence) |
|
word_count.update(words) |
|
|
|
|
|
most_common_words = word_count.most_common(10) |
|
print(f"Most Common Words: {most_common_words}") |
|
|
|
|
|
def get_minhash(text, num_perm=128): |
|
""" |
|
Generate a MinHash for a given text. |
|
""" |
|
tokens = set(text.split()) |
|
m = MinHash(num_perm=num_perm) |
|
for token in tokens: |
|
m.update(token.encode('utf8')) |
|
return m |
|
|
|
|
|
def minhash_deduplication(docs, threshold=0.8, num_perm=128): |
|
""" |
|
Remove near-duplicate documents using MinHash LSH. |
|
""" |
|
lsh = MinHashLSH(threshold=threshold, num_perm=num_perm) |
|
unique_docs = [] |
|
|
|
for i, doc in enumerate(docs): |
|
m = get_minhash(doc, num_perm=num_perm) |
|
if not lsh.query(m): |
|
lsh.insert(i, m) |
|
unique_docs.append(doc) |
|
|
|
return unique_docs |
|
|
|
|
|
unique_docs = minhash_deduplication(all_sentences, threshold=0.8) |
|
print(f"Number of unique documents: {len(unique_docs)}") |
|
|
|
|
|
detector = LanguageDetectorBuilder.from_languages(*Language.all()).build() |
|
labels = [] |
|
cleaned_text = [] |
|
|
|
for s in unique_docs: |
|
l = detector.detect_language_of(s) |
|
if not l: |
|
print(f"Could not detect language for sentence: {s}") |
|
else: |
|
labels.append(l.name) |
|
cleaned_text.append(s) |
|
|
|
|
|
df = pd.DataFrame({'text': cleaned_text}) |
|
|