Spaces:
Sleeping
Sleeping
File size: 4,999 Bytes
40bbfdf aba3b27 40bbfdf aba3b27 40bbfdf aba3b27 adbaf3e 3d15ff1 adbaf3e 3d15ff1 adbaf3e aba3b27 40bbfdf adbaf3e aba3b27 adbaf3e aba3b27 fb16cd6 aba3b27 adbaf3e aba3b27 adbaf3e aba3b27 adbaf3e aba3b27 1cb45a7 aba3b27 adbaf3e aba3b27 adbaf3e 0b66063 40bbfdf 627170a 40bbfdf adbaf3e |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 |
from transformers import pipeline
import streamlit as st
import fitz # PyMuPDF
from transformers import AutoTokenizer
from transformers import AutoModelForSeq2SeqLM
from docx import Document
import re
import nltk
nltk.download('punkt')
def sentence_tokenize(text):
sentences = nltk.sent_tokenize(text)
return sentences
# Use a pipeline as a high-level helper
model_dir_large = 'edithram23/Redaction_Personal_info_v1'
tokenizer_large = AutoTokenizer.from_pretrained(model_dir_large)
model_large = AutoModelForSeq2SeqLM.from_pretrained(model_dir_large)
def mask_generation(text, model=model_large, tokenizer=tokenizer_large):
if len(text) < 90:
text = text + '.'
# return small(text)
inputs = ["Mask Generation: " + text.lower() + '.']
inputs = tokenizer(inputs, max_length=512, truncation=True, return_tensors="pt")
output = model.generate(**inputs, num_beams=8, do_sample=True, max_length=len(text))
decoded_output = tokenizer.batch_decode(output, skip_special_tokens=True)[0]
predicted_title = decoded_output.strip()
pattern = r'\[.*?\]'
redacted_text = re.sub(pattern, '[redacted]', predicted_title)
return redacted_text
pipe1 = pipeline("token-classification", model="edithram23/new-bert-v2")
def redact_text(page, text):
text_instances = page.search_for(text)
for inst in text_instances:
page.add_redact_annot(inst, fill=(0, 0, 0))
page.apply_redactions()
def read_pdf(file):
pdf_document = fitz.open(stream=file.read(), filetype="pdf")
text = ""
for page_num in range(len(pdf_document)):
page = pdf_document.load_page(page_num)
text += page.get_text()
return text, pdf_document
def combine_words(entities):
combined_entities = []
current_entity = None
for entity in entities:
if current_entity:
if current_entity['end'] == entity['start']:
# Combine the words without space
current_entity['word'] += entity['word'].replace('##', '')
current_entity['end'] = entity['end']
elif current_entity['end'] + 1 == entity['start']:
# Combine the words with a space
current_entity['word'] += ' ' + entity['word'].replace('##', '')
current_entity['end'] = entity['end']
else:
# Add the previous combined entity to the list
combined_entities.append(current_entity)
# Start a new entity
current_entity = entity.copy()
current_entity['word'] = current_entity['word'].replace('##', '')
else:
# Initialize the first entity
current_entity = entity.copy()
current_entity['word'] = current_entity['word'].replace('##', '')
# Add the last entity
if current_entity:
combined_entities.append(current_entity)
return combined_entities
def read_docx(file):
doc = Document(file)
text = "\n".join([para.text for para in doc.paragraphs])
return text
def read_txt(file):
text = file.read().decode("utf-8")
return text
def process_file(file):
if file.type == "application/pdf":
return read_pdf(file)
elif file.type == "application/vnd.openxmlformats-officedocument.wordprocessingml.document":
return read_docx(file), None
elif file.type == "text/plain":
return read_txt(file), None
else:
return "Unsupported file type.", None
st.title("Redaction")
uploaded_file = st.file_uploader("Upload a file", type=["pdf", "docx", "txt"])
if uploaded_file is not None:
file_contents, pdf_document = process_file(uploaded_file)
if pdf_document:
redacted_text = []
for pg in pdf_document:
final=[]
text = pg.get_text()
sentences = sentence_tokenize(text)
for sentence in sentences:
x=[pipe1(sentence)]
m = combine_words(x[0])
for j in m:
if(j['entity']!='none' and len(j['word'])>1 and j['word']!=', '):
final.append(j['word'])
for i in final:
redact_text(pg,i)
output_pdf = "output_redacted.pdf"
pdf_document.save(output_pdf)
with open(output_pdf, "rb") as file:
st.download_button(
label="Download Processed PDF",
data=file,
file_name="processed_file.pdf",
mime="application/pdf",
)
else:
token = sentence_tokenize(file_contents)
final = ''
for i in range(0, len(token)):
final += mask_generation(token[i]) + '\n'
processed_text = final
st.text_area("OUTPUT", processed_text, height=400)
st.download_button(
label="Download Processed File",
data=processed_text,
file_name="processed_file.txt",
mime="text/plain",
)
|