File size: 5,103 Bytes
80462dc |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 |
from copy import deepcopy
import pathlib
import srt
import typer
import re
import orjson
app = typer.Typer()
alphabets = "([A-Za-z])"
prefixes = "(Mr|St|Mrs|Ms|Dr)[.]"
suffixes = "(Inc|Ltd|Jr|Sr|Co)"
starters = r"(Mr|Mrs|Ms|Dr|Prof|Capt|Cpt|Lt|He\s|She\s|It\s|They\s|Their\s|Our\s|We\s|But\s|However\s|That\s|This\s|Wherever)"
acronyms = "([A-Z][.][A-Z][.](?:[A-Z][.])?)"
websites = "[.](com|net|org|io|gov|edu|me)"
digits = "([0-9])"
multiple_dots = r"\.{2,}"
def split_into_sentences(text: str) -> list[str]:
"""
Split the text into sentences.
If the text contains substrings "<prd>" or "<stop>", they would lead
to incorrect splitting because they are used as markers for splitting.
:param text: text to be split into sentences
:type text: str
:return: list of sentences
:rtype: list[str]
"""
text = " " + text + " "
text = text.replace("\n", " ")
text = re.sub(prefixes, "\\1<prd>", text)
text = re.sub(websites, "<prd>\\1", text)
text = re.sub(digits + "[.]" + digits, "\\1<prd>\\2", text)
text = re.sub(
multiple_dots, lambda match: "<prd>" * len(match.group(0)) + "<stop>", text
)
if "Ph.D" in text:
text = text.replace("Ph.D.", "Ph<prd>D<prd>")
text = re.sub(r"\s" + alphabets + "[.] ", " \\1<prd> ", text)
text = re.sub(acronyms + " " + starters, "\\1<stop> \\2", text)
text = re.sub(
alphabets + "[.]" + alphabets + "[.]" + alphabets + "[.]",
"\\1<prd>\\2<prd>\\3<prd>",
text,
)
text = re.sub(alphabets + "[.]" + alphabets + "[.]", "\\1<prd>\\2<prd>", text)
text = re.sub(" " + suffixes + "[.] " + starters, " \\1<stop> \\2", text)
text = re.sub(" " + suffixes + "[.]", " \\1<prd>", text)
text = re.sub(" " + alphabets + "[.]", " \\1<prd>", text)
if "”" in text:
text = text.replace(".”", "”.")
if '"' in text:
text = text.replace('."', '".')
if "!" in text:
text = text.replace('!"', '"!')
if "?" in text:
text = text.replace('?"', '"?')
text = text.replace(".", ".<stop>")
text = text.replace("?", "?<stop>")
text = text.replace("!", "!<stop>")
text = text.replace("<prd>", ".")
sentences = text.split("<stop>")
sentences = [s.strip() for s in sentences]
if sentences and not sentences[-1]:
sentences = sentences[:-1]
return sentences
@app.command()
def srt_folder(folder: pathlib.Path, output_file: pathlib.Path):
with open(output_file,"wb") as f:
for file in folder.rglob("*.srt"):
if "(576p" in file.stem:
things_string = "_".join(file.stem.split("_")[:-1]).split("-")[1].split("(576p")[0]
elif "(1080p" in file.stem:
things_string = "_".join(file.stem.split("_")[:-1]).split("-")[1].split("(1080p")[0]
else:
print(file.stem,"Missing trailing?")
things = [i.strip() for i in things_string.split(";")]
dict_content = srt_file(file, None, as_dict=True)
dict_content["meta"]["things"] = things
del dict_content["text"]
dict_content["text"] = dict_content["meta"]["list_sentences"]
del dict_content["meta"]["list_sentences"]
f.write(orjson.dumps(dict_content) + b"\n")
@app.command()
def srt_file(file: pathlib.Path, output_file: pathlib.Path, as_dict: bool = False):
data = file.read_text(encoding="utf-8")
sub_lines = list(srt.parse(data))
raw_content = ""
for sub in sub_lines:
sub_content = sub.content.lower()
if "captions by" in sub_content:
continue
if "captions paid for" in sub_content:
continue
if sub_content.startswith("narrator"):
sub_content = sub_content.split("narrator:")[1].strip()
# >> narrator:
if sub_content.startswith(">> narrator:"):
sub_content = sub_content.split(">> narrator:")[1].strip()
# >>
if sub_content.startswith(">>"):
sub_content = sub_content[2:].strip()
raw_content += sub_content.replace("\\N", " ").replace(" ", " ") + " "
raw_content = raw_content.replace(" --", "-- ").replace("♪","").replace(" ", " ")
sents = split_into_sentences(raw_content)
sents = [s[0].upper() + s[1:] for s in sents]
z = len(sents)
for sent in deepcopy(sents):
sent = re.sub(r"\[ .*? \]","",sent).strip()
if not sent.strip("."):
continue
if "have any comments about the show" in sent:
continue
if "have any comments," in sent:
continue
if "have any questions about the show" in sent:
continue
if "drop us a line at" in sent:
continue
else:
sents.append(sent)
sents = sents[z:]
# print(sents)
if as_dict:
return {"text": " ".join(sents), "meta": {"list_sentences": sents}}
output_file.write_bytes(
orjson.dumps({"text": " ".join(sents), "meta": {"list_sentences": sents}})
)
if __name__ == "__main__":
app()
|