Spaces:
Runtime error
Runtime error
# coding=utf-8 | |
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team. | |
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved. | |
# | |
# Licensed under the Apache License, Version 2.0 (the "License"); | |
# you may not use this file except in compliance with the License. | |
# You may obtain a copy of the License at | |
# | |
# http://www.apache.org/licenses/LICENSE-2.0 | |
# | |
# Unless required by applicable law or agreed to in writing, software | |
# distributed under the License is distributed on an "AS IS" BASIS, | |
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |
# See the License for the specific language governing permissions and | |
# limitations under the License. | |
""" Finetuning the library models for sequence classification on GLUE (Bert, XLM, XLNet, RoBERTa).""" | |
from __future__ import absolute_import, division, print_function | |
import argparse | |
import glob | |
import logging | |
import os | |
import random | |
import pdb | |
cwd = os.getcwd() | |
print(f"Current working dir is {cwd}") | |
import sys | |
sys.path.append('./') | |
pt_path = os.path.join( cwd, 'pytorch_transformers') | |
sys.path.append(pt_path) | |
print(f"Pytorch Transformer {pt_path}") | |
import numpy as np | |
import torch | |
from torch.utils.data import (DataLoader, RandomSampler, SequentialSampler, | |
TensorDataset) | |
from torch.utils.data.distributed import DistributedSampler | |
from tensorboardX import SummaryWriter | |
from tqdm import tqdm, trange | |
from pytorch_transformers import (WEIGHTS_NAME, BertConfig, | |
BertForSequenceClassification, BertTokenizer,BertForSequenceClassificationLatentConnector, | |
RobertaConfig, | |
RobertaForSequenceClassification, | |
RobertaTokenizer, | |
XLMConfig, XLMForSequenceClassification, | |
XLMTokenizer, XLNetConfig, | |
XLNetForSequenceClassification, | |
XLNetTokenizer) | |
from pytorch_transformers import AdamW, WarmupLinearSchedule | |
from utils_glue import (compute_metrics, convert_examples_to_features, | |
output_modes, processors) | |
logger = logging.getLogger(__name__) | |
ALL_MODELS = sum((tuple(conf.pretrained_config_archive_map.keys()) for conf in (BertConfig, XLNetConfig, XLMConfig, RobertaConfig)), ()) | |
MODEL_CLASSES = { | |
'bert': (BertConfig, BertForSequenceClassificationLatentConnector, BertTokenizer), | |
'xlnet': (XLNetConfig, XLNetForSequenceClassification, XLNetTokenizer), | |
'xlm': (XLMConfig, XLMForSequenceClassification, XLMTokenizer), | |
'roberta': (RobertaConfig, RobertaForSequenceClassification, RobertaTokenizer), | |
} | |
def set_seed(args): | |
random.seed(args.seed) | |
np.random.seed(args.seed) | |
torch.manual_seed(args.seed) | |
if args.n_gpu > 0: | |
torch.cuda.manual_seed_all(args.seed) | |
def load_and_cache_examples(args, task, tokenizer, file_txt, evaluate=False): | |
if args.local_rank not in [-1, 0] and not evaluate: | |
torch.distributed.barrier() # Make sure only the first process in distributed training process the dataset, and the others will use the cache | |
processor = processors[task]() | |
output_mode = output_modes[task] | |
label_list = processor.get_labels() | |
if task in ['mnli', 'mnli-mm'] and args.model_type in ['roberta']: | |
# HACK(label indices are swapped in RoBERTa pretrained model) | |
label_list[1], label_list[2] = label_list[2], label_list[1] | |
examples = processor.get_train_examples(args.data_dir, args.percentage_per_label, args.sample_per_label) | |
# Chunyuan: convert examples into text lines here | |
# write data in a file. | |
for item in examples: | |
# pdb.set_trace() | |
if item.text_b: | |
line = item.text_a + " " + tokenizer.sep_token + " " + item.text_b + "\n" | |
else: | |
line = item.text_a + " \n" | |
file_txt.write(line) | |
file_txt.close() | |
def main(): | |
parser = argparse.ArgumentParser() | |
## Required parameters | |
parser.add_argument("--data_dir", default=None, type=str, required=True, | |
help="The input data dir. Should contain the .tsv files (or other data files) for the task.") | |
parser.add_argument("--output_dir", default=None, type=str, required=True, | |
help="The output directory where the model predictions and checkpoints will be written.") | |
parser.add_argument('--gloabl_step_eval', type=int, default=661, | |
help="Evaluate the results at the given global step") | |
parser.add_argument("--model_type", default=None, type=str, required=True, | |
help="Model type selected in the list: " + ", ".join(MODEL_CLASSES.keys())) | |
parser.add_argument("--model_name_or_path", default=None, type=str, required=True, | |
help="Path to pre-trained model or shortcut name selected in the list: " + ", ".join(ALL_MODELS)) | |
## Other parameters | |
parser.add_argument("--config_name", default="", type=str, | |
help="Pretrained config name or path if not the same as model_name") | |
parser.add_argument("--tokenizer_name", default="", type=str, | |
help="Pretrained tokenizer name or path if not the same as model_name") | |
parser.add_argument("--cache_dir", default="", type=str, | |
help="Where do you want to store the pre-trained models downloaded from s3") | |
parser.add_argument("--max_seq_length", default=128, type=int, | |
help="The maximum total input sequence length after tokenization. Sequences longer " | |
"than this will be truncated, sequences shorter will be padded.") | |
parser.add_argument("--do_train", action='store_true', | |
help="Whether to run training.") | |
parser.add_argument("--do_lower_case", action='store_true', | |
help="Set this flag if you are using an uncased model.") | |
parser.add_argument("--percentage_per_label", type=float, default=1.0, | |
help="Set this value (<1.0), if you are using a subset of training dataset.") | |
parser.add_argument("--sample_per_label", type=int, default=-1, | |
help="Set this value, if you are using a subset of training dataset, and a fixed number of samples are specified.") | |
parser.add_argument("--use_freeze", action='store_true', | |
help="Set this flag if you are not updating the model.") | |
parser.add_argument('--logging_steps', type=int, default=50, | |
help="Log every X updates steps.") | |
parser.add_argument('--save_steps', type=int, default=50, | |
help="Save checkpoint every X updates steps.") | |
parser.add_argument("--eval_all_checkpoints", action='store_true', | |
help="Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number") | |
parser.add_argument("--no_cuda", action='store_true', | |
help="Avoid using CUDA when available") | |
parser.add_argument('--overwrite_output_dir', action='store_true', | |
help="Overwrite the content of the output directory") | |
parser.add_argument('--overwrite_cache', action='store_true', | |
help="Overwrite the cached training and evaluation sets") | |
parser.add_argument('--seed', type=int, default=42, | |
help="random seed for initialization") | |
parser.add_argument("--use_philly", action='store_true', | |
help="Use Philly for computing.") | |
parser.add_argument("--local_rank", type=int, default=-1, | |
help="For distributed training: local_rank") | |
args = parser.parse_args() | |
if os.path.exists(args.output_dir) and os.listdir(args.output_dir) and args.do_train and not args.overwrite_output_dir: | |
raise ValueError("Output directory ({}) already exists and is not empty. Use --overwrite_output_dir to overcome.".format(args.output_dir)) | |
# Setup CUDA, GPU & distributed training | |
if args.local_rank == -1 or args.no_cuda: | |
device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu") | |
args.n_gpu = torch.cuda.device_count() | |
else: # Initializes the distributed backend which will take care of sychronizing nodes/GPUs | |
torch.cuda.set_device(args.local_rank) | |
device = torch.device("cuda", args.local_rank) | |
torch.distributed.init_process_group(backend='nccl') | |
args.n_gpu = 1 | |
args.device = device | |
# Setup logging | |
logging.basicConfig(format = '%(asctime)s - %(levelname)s - %(name)s - %(message)s', | |
datefmt = '%m/%d/%Y %H:%M:%S', | |
level = logging.INFO if args.local_rank in [-1, 0] else logging.WARN) | |
# Set seed | |
set_seed(args) | |
## Tokenizer | |
args.model_type = args.model_type.lower() | |
config_class, model_class, tokenizer_class = MODEL_CLASSES[args.model_type] | |
tokenizer = tokenizer_class.from_pretrained(args.model_name_or_path, do_lower_case=args.do_lower_case) | |
# Load pretrained model and tokenizer | |
if args.local_rank not in [-1, 0]: | |
torch.distributed.barrier() # Make sure only the first process in distributed training will download model & vocab | |
if not os.path.isdir(args.output_dir): | |
os.mkdir(args.output_dir) | |
logger.info("Parameters %s", args) | |
# Prepare GLUE task | |
TASK_NAME = ['CoLA', 'SST-2', 'MRPC', 'STS-B', 'QQP', 'MNLI', 'QNLI', 'RTE', 'WNLI'] | |
parent_path = args.data_dir | |
for task_ in TASK_NAME: | |
args.data_dir = os.path.join(parent_path, task_) | |
args.task_name = task_.lower() | |
if args.task_name not in processors: | |
raise ValueError("Task not found: %s" % (args.task_name)) | |
processor = processors[args.task_name]() | |
args.output_mode = output_modes[args.task_name] | |
args.output_file_name = os.path.join(args.output_dir, f"{args.task_name}.txt") | |
logger.info("Dataset input file at %s", args.data_dir) | |
logger.info("Dataset ouput file at %s", args.output_file_name) | |
file_txt = open(args.output_file_name, "w") | |
load_and_cache_examples(args, args.task_name, tokenizer, file_txt, evaluate=False) | |
if __name__ == "__main__": | |
main() | |