|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
""" |
|
Fine-tuning the library models for question answering. |
|
""" |
|
|
|
|
|
import json |
|
import logging |
|
import os |
|
import sys |
|
from dataclasses import dataclass, field |
|
from pathlib import Path |
|
from typing import Optional |
|
|
|
import evaluate |
|
import tensorflow as tf |
|
from datasets import load_dataset |
|
from utils_qa import postprocess_qa_predictions |
|
|
|
import transformers |
|
from transformers import ( |
|
AutoConfig, |
|
AutoTokenizer, |
|
EvalPrediction, |
|
HfArgumentParser, |
|
PreTrainedTokenizerFast, |
|
PushToHubCallback, |
|
TFAutoModelForQuestionAnswering, |
|
TFTrainingArguments, |
|
create_optimizer, |
|
set_seed, |
|
) |
|
from transformers.utils import CONFIG_NAME, TF2_WEIGHTS_NAME, check_min_version, send_example_telemetry |
|
|
|
|
|
|
|
check_min_version("4.28.0.dev0") |
|
|
|
logger = logging.getLogger(__name__) |
|
|
|
|
|
|
|
@dataclass |
|
class ModelArguments: |
|
""" |
|
Arguments pertaining to which model/config/tokenizer we are going to fine-tune from. |
|
""" |
|
|
|
model_name_or_path: str = field( |
|
metadata={"help": "Path to pretrained model or model identifier from huggingface.co/models"} |
|
) |
|
config_name: Optional[str] = field( |
|
default=None, metadata={"help": "Pretrained config name or path if not the same as model_name"} |
|
) |
|
tokenizer_name: Optional[str] = field( |
|
default=None, metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} |
|
) |
|
cache_dir: Optional[str] = field( |
|
default=None, |
|
metadata={"help": "Path to directory to store the pretrained models downloaded from huggingface.co"}, |
|
) |
|
model_revision: str = field( |
|
default="main", |
|
metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."}, |
|
) |
|
use_auth_token: bool = field( |
|
default=False, |
|
metadata={ |
|
"help": ( |
|
"Will use the token generated when running `huggingface-cli login` (necessary to use this script " |
|
"with private models)." |
|
) |
|
}, |
|
) |
|
|
|
|
|
@dataclass |
|
class DataTrainingArguments: |
|
""" |
|
Arguments pertaining to what data we are going to input our model for training and eval. |
|
""" |
|
|
|
dataset_name: Optional[str] = field( |
|
default=None, metadata={"help": "The name of the dataset to use (via the datasets library)."} |
|
) |
|
dataset_config_name: Optional[str] = field( |
|
default=None, metadata={"help": "The configuration name of the dataset to use (via the datasets library)."} |
|
) |
|
train_file: Optional[str] = field(default=None, metadata={"help": "The input training data file (a text file)."}) |
|
validation_file: Optional[str] = field( |
|
default=None, |
|
metadata={"help": "An optional input evaluation data file to evaluate the perplexity on (a text file)."}, |
|
) |
|
test_file: Optional[str] = field( |
|
default=None, |
|
metadata={"help": "An optional input test data file to evaluate the perplexity on (a text file)."}, |
|
) |
|
overwrite_cache: bool = field( |
|
default=False, metadata={"help": "Overwrite the cached training and evaluation sets"} |
|
) |
|
preprocessing_num_workers: Optional[int] = field( |
|
default=None, |
|
metadata={"help": "The number of processes to use for the preprocessing."}, |
|
) |
|
max_seq_length: int = field( |
|
default=384, |
|
metadata={ |
|
"help": ( |
|
"The maximum total input sequence length after tokenization. Sequences longer " |
|
"than this will be truncated, sequences shorter will be padded." |
|
) |
|
}, |
|
) |
|
pad_to_max_length: bool = field( |
|
default=False, |
|
metadata={ |
|
"help": ( |
|
"Whether to pad all samples to `max_seq_length`. If False, will pad the samples dynamically when" |
|
" batching to the maximum length in the batch (which can be faster on GPU but will be slower on TPU)." |
|
) |
|
}, |
|
) |
|
max_train_samples: Optional[int] = field( |
|
default=None, |
|
metadata={ |
|
"help": ( |
|
"For debugging purposes or quicker training, truncate the number of training examples to this " |
|
"value if set." |
|
) |
|
}, |
|
) |
|
max_eval_samples: Optional[int] = field( |
|
default=None, |
|
metadata={ |
|
"help": ( |
|
"For debugging purposes or quicker training, truncate the number of evaluation examples to this " |
|
"value if set." |
|
) |
|
}, |
|
) |
|
max_predict_samples: Optional[int] = field( |
|
default=None, |
|
metadata={ |
|
"help": ( |
|
"For debugging purposes or quicker training, truncate the number of prediction examples to this " |
|
"value if set." |
|
) |
|
}, |
|
) |
|
version_2_with_negative: bool = field( |
|
default=False, metadata={"help": "If true, some of the examples do not have an answer."} |
|
) |
|
null_score_diff_threshold: float = field( |
|
default=0.0, |
|
metadata={ |
|
"help": ( |
|
"The threshold used to select the null answer: if the best answer has a score that is less than " |
|
"the score of the null answer minus this threshold, the null answer is selected for this example. " |
|
"Only useful when `version_2_with_negative=True`." |
|
) |
|
}, |
|
) |
|
doc_stride: int = field( |
|
default=128, |
|
metadata={"help": "When splitting up a long document into chunks, how much stride to take between chunks."}, |
|
) |
|
n_best_size: int = field( |
|
default=20, |
|
metadata={"help": "The total number of n-best predictions to generate when looking for an answer."}, |
|
) |
|
max_answer_length: int = field( |
|
default=30, |
|
metadata={ |
|
"help": ( |
|
"The maximum length of an answer that can be generated. This is needed because the start " |
|
"and end predictions are not conditioned on one another." |
|
) |
|
}, |
|
) |
|
|
|
def __post_init__(self): |
|
if ( |
|
self.dataset_name is None |
|
and self.train_file is None |
|
and self.validation_file is None |
|
and self.test_file is None |
|
): |
|
raise ValueError("Need either a dataset name or a training/validation file/test_file.") |
|
else: |
|
if self.train_file is not None: |
|
extension = self.train_file.split(".")[-1] |
|
assert extension in ["csv", "json"], "`train_file` should be a csv or a json file." |
|
if self.validation_file is not None: |
|
extension = self.validation_file.split(".")[-1] |
|
assert extension in ["csv", "json"], "`validation_file` should be a csv or a json file." |
|
if self.test_file is not None: |
|
extension = self.test_file.split(".")[-1] |
|
assert extension in ["csv", "json"], "`test_file` should be a csv or a json file." |
|
|
|
|
|
|
|
|
|
|
|
|
|
class SavePretrainedCallback(tf.keras.callbacks.Callback): |
|
|
|
|
|
|
|
def __init__(self, output_dir, **kwargs): |
|
super().__init__() |
|
self.output_dir = output_dir |
|
|
|
def on_epoch_end(self, epoch, logs=None): |
|
self.model.save_pretrained(self.output_dir) |
|
|
|
|
|
|
|
|
|
|
|
def main(): |
|
|
|
|
|
|
|
|
|
|
|
parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TFTrainingArguments)) |
|
if len(sys.argv) == 2 and sys.argv[1].endswith(".json"): |
|
|
|
|
|
model_args, data_args, training_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1])) |
|
else: |
|
model_args, data_args, training_args = parser.parse_args_into_dataclasses() |
|
|
|
|
|
|
|
send_example_telemetry("run_qa", model_args, data_args, framework="tensorflow") |
|
|
|
output_dir = Path(training_args.output_dir) |
|
output_dir.mkdir(parents=True, exist_ok=True) |
|
|
|
|
|
|
|
checkpoint = None |
|
if len(os.listdir(training_args.output_dir)) > 0 and not training_args.overwrite_output_dir: |
|
if (output_dir / CONFIG_NAME).is_file() and (output_dir / TF2_WEIGHTS_NAME).is_file(): |
|
checkpoint = output_dir |
|
logger.info( |
|
f"Checkpoint detected, resuming training from checkpoint in {training_args.output_dir}. To avoid this" |
|
" behavior, change the `--output_dir` or add `--overwrite_output_dir` to train from scratch." |
|
) |
|
else: |
|
raise ValueError( |
|
f"Output directory ({training_args.output_dir}) already exists and is not empty. " |
|
"Use --overwrite_output_dir to continue regardless." |
|
) |
|
|
|
|
|
|
|
logging.basicConfig( |
|
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s", |
|
datefmt="%m/%d/%Y %H:%M:%S", |
|
handlers=[logging.StreamHandler(sys.stdout)], |
|
) |
|
logger.setLevel(logging.INFO if training_args.should_log else logging.WARN) |
|
|
|
|
|
if training_args.should_log: |
|
transformers.utils.logging.set_verbosity_info() |
|
transformers.utils.logging.enable_default_handler() |
|
transformers.utils.logging.enable_explicit_format() |
|
logger.info(f"Training/evaluation parameters {training_args}") |
|
|
|
|
|
|
|
set_seed(training_args.seed) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
if data_args.dataset_name is not None: |
|
|
|
datasets = load_dataset( |
|
data_args.dataset_name, |
|
data_args.dataset_config_name, |
|
cache_dir=model_args.cache_dir, |
|
use_auth_token=True if model_args.use_auth_token else None, |
|
) |
|
else: |
|
data_files = {} |
|
if data_args.train_file is not None: |
|
data_files["train"] = data_args.train_file |
|
extension = data_args.train_file.split(".")[-1] |
|
|
|
if data_args.validation_file is not None: |
|
data_files["validation"] = data_args.validation_file |
|
extension = data_args.validation_file.split(".")[-1] |
|
if data_args.test_file is not None: |
|
data_files["test"] = data_args.test_file |
|
extension = data_args.test_file.split(".")[-1] |
|
datasets = load_dataset( |
|
extension, |
|
data_files=data_files, |
|
field="data", |
|
cache_dir=model_args.cache_dir, |
|
use_auth_token=True if model_args.use_auth_token else None, |
|
) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
config = AutoConfig.from_pretrained( |
|
model_args.config_name if model_args.config_name else model_args.model_name_or_path, |
|
cache_dir=model_args.cache_dir, |
|
revision=model_args.model_revision, |
|
use_auth_token=True if model_args.use_auth_token else None, |
|
) |
|
tokenizer = AutoTokenizer.from_pretrained( |
|
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path, |
|
cache_dir=model_args.cache_dir, |
|
use_fast=True, |
|
revision=model_args.model_revision, |
|
use_auth_token=True if model_args.use_auth_token else None, |
|
) |
|
|
|
|
|
|
|
if not isinstance(tokenizer, PreTrainedTokenizerFast): |
|
raise ValueError( |
|
"This example script only works for models that have a fast tokenizer. Checkout the big table of models at" |
|
" https://huggingface.co/transformers/index.html#supported-frameworks to find the model types that meet" |
|
" this requirement" |
|
) |
|
|
|
|
|
|
|
|
|
if training_args.do_train: |
|
column_names = datasets["train"].column_names |
|
elif training_args.do_eval: |
|
column_names = datasets["validation"].column_names |
|
else: |
|
column_names = datasets["test"].column_names |
|
question_column_name = "question" if "question" in column_names else column_names[0] |
|
context_column_name = "context" if "context" in column_names else column_names[1] |
|
answer_column_name = "answers" if "answers" in column_names else column_names[2] |
|
|
|
|
|
pad_on_right = tokenizer.padding_side == "right" |
|
|
|
if data_args.max_seq_length > tokenizer.model_max_length: |
|
logger.warning( |
|
f"The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the" |
|
f"model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}." |
|
) |
|
max_seq_length = min(data_args.max_seq_length, tokenizer.model_max_length) |
|
|
|
if data_args.pad_to_max_length or isinstance(training_args.strategy, tf.distribute.TPUStrategy): |
|
logger.info("Padding all batches to max length because argument was set or we're on TPU.") |
|
padding = "max_length" |
|
else: |
|
padding = False |
|
|
|
|
|
def prepare_train_features(examples): |
|
|
|
|
|
|
|
examples[question_column_name] = [q.lstrip() for q in examples[question_column_name]] |
|
|
|
|
|
|
|
|
|
tokenized_examples = tokenizer( |
|
examples[question_column_name if pad_on_right else context_column_name], |
|
examples[context_column_name if pad_on_right else question_column_name], |
|
truncation="only_second" if pad_on_right else "only_first", |
|
max_length=max_seq_length, |
|
stride=data_args.doc_stride, |
|
return_overflowing_tokens=True, |
|
return_offsets_mapping=True, |
|
padding=padding, |
|
) |
|
|
|
|
|
|
|
sample_mapping = tokenized_examples.pop("overflow_to_sample_mapping") |
|
|
|
|
|
offset_mapping = tokenized_examples.pop("offset_mapping") |
|
|
|
|
|
tokenized_examples["start_positions"] = [] |
|
tokenized_examples["end_positions"] = [] |
|
|
|
for i, offsets in enumerate(offset_mapping): |
|
|
|
input_ids = tokenized_examples["input_ids"][i] |
|
cls_index = input_ids.index(tokenizer.cls_token_id) |
|
|
|
|
|
sequence_ids = tokenized_examples.sequence_ids(i) |
|
|
|
|
|
sample_index = sample_mapping[i] |
|
answers = examples[answer_column_name][sample_index] |
|
|
|
if len(answers["answer_start"]) == 0: |
|
tokenized_examples["start_positions"].append(cls_index) |
|
tokenized_examples["end_positions"].append(cls_index) |
|
else: |
|
|
|
start_char = answers["answer_start"][0] |
|
end_char = start_char + len(answers["text"][0]) |
|
|
|
|
|
token_start_index = 0 |
|
while sequence_ids[token_start_index] != (1 if pad_on_right else 0): |
|
token_start_index += 1 |
|
|
|
|
|
token_end_index = len(input_ids) - 1 |
|
while sequence_ids[token_end_index] != (1 if pad_on_right else 0): |
|
token_end_index -= 1 |
|
|
|
|
|
if not (offsets[token_start_index][0] <= start_char and offsets[token_end_index][1] >= end_char): |
|
tokenized_examples["start_positions"].append(cls_index) |
|
tokenized_examples["end_positions"].append(cls_index) |
|
else: |
|
|
|
|
|
while token_start_index < len(offsets) and offsets[token_start_index][0] <= start_char: |
|
token_start_index += 1 |
|
tokenized_examples["start_positions"].append(token_start_index - 1) |
|
while offsets[token_end_index][1] >= end_char: |
|
token_end_index -= 1 |
|
tokenized_examples["end_positions"].append(token_end_index + 1) |
|
|
|
return tokenized_examples |
|
|
|
processed_datasets = {} |
|
if training_args.do_train: |
|
if "train" not in datasets: |
|
raise ValueError("--do_train requires a train dataset") |
|
train_dataset = datasets["train"] |
|
if data_args.max_train_samples is not None: |
|
|
|
max_train_samples = min(len(train_dataset), data_args.max_train_samples) |
|
train_dataset = train_dataset.select(range(max_train_samples)) |
|
|
|
train_dataset = train_dataset.map( |
|
prepare_train_features, |
|
batched=True, |
|
num_proc=data_args.preprocessing_num_workers, |
|
remove_columns=column_names, |
|
load_from_cache_file=not data_args.overwrite_cache, |
|
) |
|
if data_args.max_train_samples is not None: |
|
|
|
max_train_samples = min(len(train_dataset), data_args.max_train_samples) |
|
train_dataset = train_dataset.select(range(max_train_samples)) |
|
processed_datasets["train"] = train_dataset |
|
|
|
|
|
def prepare_validation_features(examples): |
|
|
|
|
|
|
|
examples[question_column_name] = [q.lstrip() for q in examples[question_column_name]] |
|
|
|
|
|
|
|
|
|
tokenized_examples = tokenizer( |
|
examples[question_column_name if pad_on_right else context_column_name], |
|
examples[context_column_name if pad_on_right else question_column_name], |
|
truncation="only_second" if pad_on_right else "only_first", |
|
max_length=max_seq_length, |
|
stride=data_args.doc_stride, |
|
return_overflowing_tokens=True, |
|
return_offsets_mapping=True, |
|
padding=padding, |
|
) |
|
|
|
|
|
|
|
sample_mapping = tokenized_examples.pop("overflow_to_sample_mapping") |
|
|
|
|
|
|
|
tokenized_examples["example_id"] = [] |
|
|
|
for i in range(len(tokenized_examples["input_ids"])): |
|
|
|
sequence_ids = tokenized_examples.sequence_ids(i) |
|
context_index = 1 if pad_on_right else 0 |
|
|
|
|
|
sample_index = sample_mapping[i] |
|
tokenized_examples["example_id"].append(examples["id"][sample_index]) |
|
|
|
|
|
|
|
tokenized_examples["offset_mapping"][i] = [ |
|
(o if sequence_ids[k] == context_index else None) |
|
for k, o in enumerate(tokenized_examples["offset_mapping"][i]) |
|
] |
|
|
|
return tokenized_examples |
|
|
|
if training_args.do_eval: |
|
if "validation" not in datasets: |
|
raise ValueError("--do_eval requires a validation dataset") |
|
eval_examples = datasets["validation"] |
|
if data_args.max_eval_samples is not None: |
|
|
|
max_eval_samples = min(len(eval_examples), data_args.max_eval_samples) |
|
eval_examples = eval_examples.select(range(max_eval_samples)) |
|
|
|
eval_dataset = eval_examples.map( |
|
prepare_validation_features, |
|
batched=True, |
|
num_proc=data_args.preprocessing_num_workers, |
|
remove_columns=column_names, |
|
load_from_cache_file=not data_args.overwrite_cache, |
|
) |
|
if data_args.max_eval_samples is not None: |
|
|
|
max_eval_samples = min(len(eval_dataset), data_args.max_eval_samples) |
|
eval_dataset = eval_dataset.select(range(max_eval_samples)) |
|
processed_datasets["validation"] = eval_dataset |
|
|
|
if training_args.do_predict: |
|
if "test" not in datasets: |
|
raise ValueError("--do_predict requires a test dataset") |
|
predict_examples = datasets["test"] |
|
if data_args.max_predict_samples is not None: |
|
|
|
predict_examples = predict_examples.select(range(data_args.max_predict_samples)) |
|
|
|
predict_dataset = predict_examples.map( |
|
prepare_validation_features, |
|
batched=True, |
|
num_proc=data_args.preprocessing_num_workers, |
|
remove_columns=column_names, |
|
load_from_cache_file=not data_args.overwrite_cache, |
|
) |
|
if data_args.max_predict_samples is not None: |
|
|
|
max_predict_samples = min(len(predict_dataset), data_args.max_predict_samples) |
|
predict_dataset = predict_dataset.select(range(max_predict_samples)) |
|
processed_datasets["test"] = predict_dataset |
|
|
|
|
|
|
|
def post_processing_function(examples, features, predictions, stage="eval"): |
|
|
|
predictions = postprocess_qa_predictions( |
|
examples=examples, |
|
features=features, |
|
predictions=predictions, |
|
version_2_with_negative=data_args.version_2_with_negative, |
|
n_best_size=data_args.n_best_size, |
|
max_answer_length=data_args.max_answer_length, |
|
null_score_diff_threshold=data_args.null_score_diff_threshold, |
|
output_dir=training_args.output_dir, |
|
prefix=stage, |
|
) |
|
|
|
if data_args.version_2_with_negative: |
|
formatted_predictions = [ |
|
{"id": k, "prediction_text": v, "no_answer_probability": 0.0} for k, v in predictions.items() |
|
] |
|
else: |
|
formatted_predictions = [{"id": k, "prediction_text": v} for k, v in predictions.items()] |
|
|
|
references = [{"id": ex["id"], "answers": ex[answer_column_name]} for ex in examples] |
|
return EvalPrediction(predictions=formatted_predictions, label_ids=references) |
|
|
|
metric = evaluate.load("squad_v2" if data_args.version_2_with_negative else "squad") |
|
|
|
def compute_metrics(p: EvalPrediction): |
|
return metric.compute(predictions=p.predictions, references=p.label_ids) |
|
|
|
|
|
|
|
with training_args.strategy.scope(): |
|
dataset_options = tf.data.Options() |
|
dataset_options.experimental_distribute.auto_shard_policy = tf.data.experimental.AutoShardPolicy.OFF |
|
num_replicas = training_args.strategy.num_replicas_in_sync |
|
|
|
|
|
if checkpoint is None: |
|
model_path = model_args.model_name_or_path |
|
else: |
|
model_path = checkpoint |
|
model = TFAutoModelForQuestionAnswering.from_pretrained( |
|
model_path, |
|
config=config, |
|
cache_dir=model_args.cache_dir, |
|
revision=model_args.model_revision, |
|
use_auth_token=True if model_args.use_auth_token else None, |
|
) |
|
if training_args.do_train: |
|
training_dataset = model.prepare_tf_dataset( |
|
processed_datasets["train"], |
|
shuffle=True, |
|
batch_size=training_args.per_device_train_batch_size * num_replicas, |
|
tokenizer=tokenizer, |
|
) |
|
|
|
training_dataset = training_dataset.with_options(dataset_options) |
|
|
|
num_train_steps = len(training_dataset) * training_args.num_train_epochs |
|
if training_args.warmup_steps > 0: |
|
num_warmup_steps = training_args.warmup_steps |
|
elif training_args.warmup_ratio > 0: |
|
num_warmup_steps = int(num_train_steps * training_args.warmup_ratio) |
|
else: |
|
num_warmup_steps = 0 |
|
|
|
optimizer, schedule = create_optimizer( |
|
init_lr=training_args.learning_rate, |
|
num_train_steps=len(training_dataset) * training_args.num_train_epochs, |
|
num_warmup_steps=num_warmup_steps, |
|
adam_beta1=training_args.adam_beta1, |
|
adam_beta2=training_args.adam_beta2, |
|
adam_epsilon=training_args.adam_epsilon, |
|
weight_decay_rate=training_args.weight_decay, |
|
adam_global_clipnorm=training_args.max_grad_norm, |
|
) |
|
|
|
|
|
model.compile(optimizer=optimizer, jit_compile=training_args.xla, metrics=["accuracy"]) |
|
|
|
else: |
|
model.compile(optimizer=None, jit_compile=training_args.xla, metrics=["accuracy"]) |
|
training_dataset = None |
|
|
|
if training_args.do_eval: |
|
eval_dataset = model.prepare_tf_dataset( |
|
processed_datasets["validation"], |
|
shuffle=False, |
|
batch_size=training_args.per_device_train_batch_size * num_replicas, |
|
tokenizer=tokenizer, |
|
) |
|
eval_dataset = eval_dataset.with_options(dataset_options) |
|
else: |
|
eval_dataset = None |
|
|
|
if training_args.do_predict: |
|
predict_dataset = model.prepare_tf_dataset( |
|
processed_datasets["test"], |
|
shuffle=False, |
|
batch_size=training_args.per_device_eval_batch_size * num_replicas, |
|
tokenizer=tokenizer, |
|
) |
|
predict_dataset = predict_dataset.with_options(dataset_options) |
|
else: |
|
predict_dataset = None |
|
|
|
|
|
|
|
|
|
push_to_hub_model_id = training_args.push_to_hub_model_id |
|
model_name = model_args.model_name_or_path.split("/")[-1] |
|
if not push_to_hub_model_id: |
|
if data_args.dataset_name is not None: |
|
push_to_hub_model_id = f"{model_name}-finetuned-{data_args.dataset_name}" |
|
else: |
|
push_to_hub_model_id = f"{model_name}-finetuned-question-answering" |
|
|
|
model_card_kwargs = {"finetuned_from": model_args.model_name_or_path, "tasks": "question-answering"} |
|
if data_args.dataset_name is not None: |
|
model_card_kwargs["dataset_tags"] = data_args.dataset_name |
|
if data_args.dataset_config_name is not None: |
|
model_card_kwargs["dataset_args"] = data_args.dataset_config_name |
|
model_card_kwargs["dataset"] = f"{data_args.dataset_name} {data_args.dataset_config_name}" |
|
else: |
|
model_card_kwargs["dataset"] = data_args.dataset_name |
|
|
|
if training_args.push_to_hub: |
|
callbacks = [ |
|
PushToHubCallback( |
|
output_dir=training_args.output_dir, |
|
hub_model_id=push_to_hub_model_id, |
|
hub_token=training_args.push_to_hub_token, |
|
tokenizer=tokenizer, |
|
**model_card_kwargs, |
|
) |
|
] |
|
else: |
|
callbacks = [] |
|
|
|
|
|
|
|
|
|
if training_args.do_train: |
|
|
|
|
|
|
|
|
|
model.fit(training_dataset, epochs=int(training_args.num_train_epochs), callbacks=callbacks) |
|
|
|
if training_args.do_eval: |
|
logger.info("*** Evaluation ***") |
|
|
|
|
|
|
|
|
|
|
|
|
|
eval_predictions = model.predict(eval_dataset) |
|
if isinstance(eval_predictions.start_logits, tf.RaggedTensor): |
|
|
|
|
|
|
|
|
|
eval_start_logits = eval_predictions.start_logits.to_tensor(default_value=-1000).numpy() |
|
eval_end_logits = eval_predictions.end_logits.to_tensor(default_value=-1000).numpy() |
|
else: |
|
eval_start_logits = eval_predictions.start_logits |
|
eval_end_logits = eval_predictions.end_logits |
|
|
|
post_processed_eval = post_processing_function( |
|
datasets["validation"], |
|
processed_datasets["validation"], |
|
(eval_start_logits, eval_end_logits), |
|
) |
|
metrics = compute_metrics(post_processed_eval) |
|
logging.info("Evaluation metrics:") |
|
for metric, value in metrics.items(): |
|
logging.info(f"{metric}: {value:.3f}") |
|
if training_args.output_dir is not None: |
|
output_eval_file = os.path.join(training_args.output_dir, "all_results.json") |
|
with open(output_eval_file, "w") as writer: |
|
writer.write(json.dumps(metrics)) |
|
|
|
|
|
|
|
if training_args.do_predict: |
|
logger.info("*** Predict ***") |
|
|
|
test_predictions = model.predict(predict_dataset) |
|
if isinstance(test_predictions.start_logits, tf.RaggedTensor): |
|
|
|
|
|
|
|
|
|
test_start_logits = test_predictions.start_logits.to_tensor(default_value=-1000).numpy() |
|
test_end_logits = test_predictions.end_logits.to_tensor(default_value=-1000).numpy() |
|
else: |
|
test_start_logits = test_predictions.start_logits |
|
test_end_logits = test_predictions.end_logits |
|
post_processed_test = post_processing_function( |
|
datasets["test"], |
|
processed_datasets["test"], |
|
(test_start_logits, test_end_logits), |
|
) |
|
metrics = compute_metrics(post_processed_test) |
|
|
|
logging.info("Test metrics:") |
|
for metric, value in metrics.items(): |
|
logging.info(f"{metric}: {value:.3f}") |
|
|
|
|
|
if training_args.output_dir is not None and not training_args.push_to_hub: |
|
|
|
model.save_pretrained(training_args.output_dir) |
|
|
|
|
|
if __name__ == "__main__": |
|
main() |
|
|