mosel / scripts /flagSuspiciousSingleWord.py
mgaido91's picture
[DEV] Add scripts for hallucination detection
04fcf96 verified
# Copyright 2024 FBK
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License
try:
import pandas as pd
except ImportError:
print("Please install the pandas package with 'pip install pandas' and try again.")
exit(1)
import argparse
_VERSION = "1.01"
class ExplicitDefaultsHelpFormatter(argparse.ArgumentDefaultsHelpFormatter):
def _get_help_string(self, action):
if action.default is None or action.default is False:
return action.help
return super()._get_help_string(action)
def processColumn(col, dict):
# compute the counts of words that appear alone in the lines
for line in col:
if len(line.split()) == 1:
key=line.strip()
if key in dict:
dict[key]+=1
else:
dict[key]=1
def processTSVfiles(files, dict):
# open and process textual content of each TSV file in the input list;
# return the word with the greatest count
for tsv in files:
try:
inDF = pd.read_csv(tsv, sep='\t', dtype=str, low_memory=False, na_filter=False, quoting=3)
except IOError:
print("Error in opening "+tsv+" file")
try:
col = inDF[parsed_args.column]
except KeyError:
print("Error in reading column <"+parsed_args.column+"> in TSV file")
exit(1)
processColumn(col, dict)
return findSuspiciousWord(dict)
def findSuspiciousWord(dict):
# look for the word in dict with the greatest count
argmax = ""
max = -1
for w,c in list(dict.items()):
if c > max:
max = c
argmax = w
return argmax
def main(args):
"""
This script flags (by setting True the corresponding entry of
the hall_frequent_single_word) those sentences which consists
of only one single suspicious word. This word can be either passed
as a parameter (suspiciousWord option) or found inside the TSV
input files. In the latter case, it is set as the most frequent
word in the text included in files to be inspected.
The TSV files to inspect can be passed through the
tsv-SuspiciousWordFiles option. If no explicit suspiciousWord nor
tsv-SuspiciousWordFiles is passed, the tsv-InFile is inspected.
"""
# Support structure:
dict = {}
if (parsed_args.version):
print(f"Version {_VERSION} of anomalous string detector")
exit(1)
if not (tsv_files_specified):
print("--tsv-InFile and --tsv-OutFile are both required")
parser.print_usage()
exit(1)
if (contrastive_options):
print("Either specify SuspiciousWord or SuspiciousWordFiles, both cannot be passed")
parser.print_usage()
exit(1)
"""
Get the suspiciousWord:
"""
if getattr(parsed_args, 'suspiciousWord') is not None:
# passed as parameter
suspiciousWord = parsed_args.suspiciousWord.strip()
elif getattr(parsed_args, 'tsv_SuspiciousWordFiles') is not None:
# to be searched in TSV files passed for that
suspiciousWord = processTSVfiles(parsed_args.tsv_SuspiciousWordFiles, dict)
else:
# to be searched in the input TSV file to process
suspiciousWord = processTSVfiles([parsed_args.tsv_InFile], dict)
# open input TSV file and get the text to process
try:
inDF = pd.read_csv(args.tsv_InFile, sep='\t', dtype=str, low_memory=False, na_filter=False, quoting=3)
except IOError:
print("Error in opening "+tsv+" file")
try:
txt = inDF[parsed_args.column]
except KeyError:
print("Error in reading column <"+parsed_args.column+"> in TSV file")
exit(1)
# scan each input line and check if it consists of
# only the suspicious word
flag = []
for line in txt:
if suspiciousWord == line.strip():
if args.quiet:
flag.append("True")
else:
flag.append("True ("+suspiciousWord+")")
else:
flag.append("False")
# add the column to the original Data Frame read from the input TSV file
# and store the updated Data Frame in the output TSV file:
inDF['hall_frequent_single_word'] = flag
inDF.to_csv(args.tsv_OutFile, sep="\t", index=False, quoting=3)
if __name__ == '__main__':
parser = argparse.ArgumentParser(formatter_class=ExplicitDefaultsHelpFormatter)
# I/O related arguments
parser.add_argument(
'--tsv-InFile', '-i', type=str,
help="The input TSV file [Mandatory]")
parser.add_argument(
'--tsv-OutFile', '-o', type=str,
help="The output TSV file [Mandatory. If equal to input TSV file, the new column ('suspicious single word') is added to the original file]")
parser.add_argument(
'--tsv-SuspiciousWordFiles', '-s', type=str, nargs='+',
help="The TSV file(s) used to look for the suspicious word [Optional. If not present, the input TSV file is used instead]")
# Processing arguments:
parser.add_argument(
'--column', '-c', default='source',
help="Column name of the text to process [Optional]")
parser.add_argument(
'--suspiciousWord', '-w', type=str,
help="suspicious word [if not specified, found in other TSV files passed as parameters]")
# Reporting related arguments
parser.add_argument(
'--quiet', '-q', default=False, action='store_true',
help='Print only True/False, no explanation for True\'s')
# Get version information:
parser.add_argument(
'--version', '-v', action='store_true', default=False,
help="Print version of the script and exit")
parsed_args = parser.parse_args()
tsv_files_specified = \
getattr(parsed_args, 'tsv_InFile') is not None \
and len(parsed_args.tsv_InFile) > 0 \
and getattr(parsed_args, 'tsv_OutFile') is not None \
and len(parsed_args.tsv_OutFile) > 0
contrastive_options = \
getattr(parsed_args, 'tsv_SuspiciousWordFiles') is not None \
and getattr(parsed_args, 'suspiciousWord') is not None
main(parsed_args)