File size: 7,149 Bytes
aba3b27
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3d15ff1
 
 
8bfa5bb
3d15ff1
 
 
 
 
 
 
 
 
8bfa5bb
adbaf3e
3d15ff1
adbaf3e
3d15ff1
adbaf3e
aba3b27
 
 
 
 
 
 
 
adbaf3e
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
aba3b27
adbaf3e
 
 
 
 
 
 
 
 
 
 
 
 
aba3b27
 
 
 
 
 
 
adbaf3e
aba3b27
 
 
 
 
 
 
 
 
 
 
 
 
 
adbaf3e
aba3b27
adbaf3e
aba3b27
adbaf3e
aba3b27
1cb45a7
aba3b27
adbaf3e
aba3b27
adbaf3e
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
from transformers import AutoTokenizer
from transformers import AutoModelForSeq2SeqLM
import streamlit as st
import fitz  # PyMuPDF
from docx import Document
import re
import nltk
nltk.download('punkt')

def sentence_tokenize(text):
    sentences = nltk.sent_tokenize(text)
    return sentences

model_dir_large = 'edithram23/Redaction_Personal_info_v1'
tokenizer_large = AutoTokenizer.from_pretrained(model_dir_large)
model_large = AutoModelForSeq2SeqLM.from_pretrained(model_dir_large)

# model_dir_small = 'edithram23/Redaction'
# tokenizer_small = AutoTokenizer.from_pretrained(model_dir_small)
# model_small = AutoModelForSeq2SeqLM.from_pretrained(model_dir_small)

# def small(text, model=model_small, tokenizer=tokenizer_small):
#     inputs = ["Mask Generation: " + text.lower() + '.']
#     inputs = tokenizer(inputs, max_length=256, truncation=True, return_tensors="pt")
#     output = model.generate(**inputs, num_beams=8, do_sample=True, max_length=len(text))
#     decoded_output = tokenizer.batch_decode(output, skip_special_tokens=True)[0]
#     predicted_title = decoded_output.strip()
#     pattern = r'\[.*?\]'
#     redacted_text = re.sub(pattern, '[redacted]', predicted_title)
#     return redacted_text

def mask_generation(text, model=model_large, tokenizer=tokenizer_large):
    if len(text) < 90:
        text = text + '.'
        # return small(text)
    inputs = ["Mask Generation: " + text.lower() + '.']
    inputs = tokenizer(inputs, max_length=512, truncation=True, return_tensors="pt")
    output = model.generate(**inputs, num_beams=8, do_sample=True, max_length=len(text))
    decoded_output = tokenizer.batch_decode(output, skip_special_tokens=True)[0]
    predicted_title = decoded_output.strip()
    pattern = r'\[.*?\]'
    redacted_text = re.sub(pattern, '[redacted]', predicted_title)
    return redacted_text

def find_surrounding_words(text, target="[redacted]"):
    pattern = re.compile(r'([A-Za-z0-9_@#\$%\^&*\(\)\[\]\{\}\.\,]+)?\s*' + re.escape(target) + r'\s*([A-Za-z0-9_@#\$%\^&*\(\)\[\]\{\}\.\,]+)?')
    matches = pattern.finditer(text)
    results = []
    for match in matches:
        before, after = match.group(1), match.group(2)

        if before:
            before_parts = before.split(',')
            before_parts = [item for item in before_parts if item.strip()]
            if len(before_parts) > 1:
                before_word = before_parts[0].strip()
                before_index = match.start(1)
            else:
                before_word = before_parts[0]
                before_index = match.start(1)
        else:
            before_word = None
            before_index = None

        if after:
            after_parts = after.split(',')
            after_parts = [item for item in after_parts if item.strip()]
            if len(after_parts) > 1:
                after_word = after_parts[0].strip()
                after_index = match.start(2)
            else:
                after_word = after_parts[0]
                after_index = match.start(2)
        else:
            after_word = None
            after_index = None

        if match.start() == 0:
            before_word = None
            before_index = None

        if match.end() == len(text):
            after_word = None
            after_index = None

        results.append({
            "before_word": before_word,
            "after_word": after_word,
            "before_index": before_index,
            "after_index": after_index
        })
    return results

def redact_text(page, text):
    text_instances = page.search_for(text)
    for inst in text_instances:
        page.add_redact_annot(inst, fill=(0, 0, 0))
    page.apply_redactions()

def read_pdf(file):
    pdf_document = fitz.open(stream=file.read(), filetype="pdf")
    text = ""
    for page_num in range(len(pdf_document)):
        page = pdf_document.load_page(page_num)
        text += page.get_text()
    return text, pdf_document

def read_docx(file):
    doc = Document(file)
    text = "\n".join([para.text for para in doc.paragraphs])
    return text

def read_txt(file):
    text = file.read().decode("utf-8")
    return text

def process_file(file):
    if file.type == "application/pdf":
        return read_pdf(file)
    elif file.type == "application/vnd.openxmlformats-officedocument.wordprocessingml.document":
        return read_docx(file), None
    elif file.type == "text/plain":
        return read_txt(file), None
    else:
        return "Unsupported file type.", None

st.title("Redaction")
uploaded_file = st.file_uploader("Upload a file", type=["pdf", "docx", "txt"])

if uploaded_file is not None:
    file_contents, pdf_document = process_file(uploaded_file)
    if pdf_document:
        redacted_text = []
        for page in pdf_document:
            pg = page.get_text()
            pg_lower = pg.lower()
            token = sentence_tokenize(pg)
            final = ''
            for t in token:
                t_lower = t.lower()
                final = mask_generation(t)
                words = find_surrounding_words(final)
                for i in range(len(words)):
                    if words[i]['after_index'] is None:
                        if words[i]['before_word'] in t_lower:
                            fi = t_lower.index(words[i]['before_word'])
                            fi = fi + len(words[i]['before_word'])
                            li = len(t)
                            redacted_text.append(t[fi:li])
                    elif words[i]['before_index'] is None:
                        if words[i]['after_word'] in t_lower:
                            fi = 0
                            li = t_lower.index(words[i]['after_word'])
                            redacted_text.append(t[fi:li])
                    else:
                        if words[i]['after_word'] in t_lower and words[i]['before_word'] in t_lower:
                            before_word = words[i]['before_word']
                            after_word = words[i]['after_word']
                            fi = t_lower.index(before_word)
                            fi = fi + len(before_word)
                            li = t_lower.index(after_word)
                            redacted_text.append(t[fi:li])
        for page in pdf_document:
            for i in redacted_text:
                redact_text(page, i)
        output_pdf = "output_redacted.pdf"
        pdf_document.save(output_pdf)

        with open(output_pdf, "rb") as file:
            st.download_button(
                label="Download Processed PDF",
                data=file,
                file_name="processed_file.pdf",
                mime="application/pdf",
            )
    else:
        token = sentence_tokenize(file_contents)
        final = ''
        for i in range(0, len(token)):
            final += mask_generation(token[i]) + '\n'
        processed_text = final
        st.text_area("OUTPUT", processed_text, height=400)
        st.download_button(
            label="Download Processed File",
            data=processed_text,
            file_name="processed_file.txt",
            mime="text/plain",
        )