File size: 8,682 Bytes
d181956 05d7cf5 d181956 05d7cf5 d181956 05d7cf5 d9473b2 05d7cf5 d9473b2 05d7cf5 d9473b2 d181956 d9473b2 d181956 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 |
import os
import torch
import torch.nn as nn
from transformers import PreTrainedModel, HubertConfig, HubertModel
from transformers.file_utils import (
WEIGHTS_NAME,
TF2_WEIGHTS_NAME,
TF_WEIGHTS_NAME,
cached_path,
hf_bucket_url,
is_remote_url,
)
from transformers.utils import logging
from .configuration_emotion_classifier import EmotionClassifierConfig
logger = logging.get_logger(__name__)
class EmotionClassifierHuBERT(PreTrainedModel):
config_class = EmotionClassifierConfig
def __init__(self, config):
super().__init__(config)
# Initialize HuBERT without pre-trained weights
hubert_config = HubertConfig.from_pretrained("facebook/hubert-large-ls960-ft")
self.hubert = HubertModel(hubert_config)
self.conv1 = nn.Conv1d(in_channels=1024, out_channels=512, kernel_size=3, padding=1)
self.conv2 = nn.Conv1d(in_channels=512, out_channels=256, kernel_size=3, padding=1)
self.transformer_encoder = nn.TransformerEncoderLayer(d_model=256, nhead=8)
self.bilstm = nn.LSTM(input_size=256, hidden_size=config.hidden_size_lstm, num_layers=2, batch_first=True, bidirectional=True)
self.fc = nn.Linear(config.hidden_size_lstm * 2, config.num_classes)
def forward(self, x):
with torch.no_grad():
features = self.hubert(x).last_hidden_state
features = features.transpose(1, 2)
x = torch.relu(self.conv1(features))
x = torch.relu(self.conv2(x))
x = x.transpose(1, 2)
x = self.transformer_encoder(x)
x, _ = self.bilstm(x)
x = self.fc(x[:, -1, :])
return x
@classmethod
def from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs):
config = kwargs.pop("config", None)
state_dict = kwargs.pop("state_dict", None)
cache_dir = kwargs.pop("cache_dir", None)
from_tf = kwargs.pop("from_tf", False)
force_download = kwargs.pop("force_download", False)
resume_download = kwargs.pop("resume_download", False)
proxies = kwargs.pop("proxies", None)
output_loading_info = kwargs.pop("output_loading_info", False)
local_files_only = kwargs.pop("local_files_only", False)
use_auth_token = kwargs.pop("use_auth_token", None)
revision = kwargs.pop("revision", None)
mirror = kwargs.pop("mirror", None)
# Load config if we don't provide a configuration
if not isinstance(config, EmotionClassifierConfig):
config_path = config if config is not None else pretrained_model_name_or_path
config, model_kwargs = cls.config_class.from_pretrained(
config_path,
*model_args,
cache_dir=cache_dir,
return_unused_kwargs=True,
force_download=force_download,
resume_download=resume_download,
proxies=proxies,
local_files_only=local_files_only,
use_auth_token=use_auth_token,
revision=revision,
**kwargs,
)
else:
model_kwargs = kwargs
# Load model
if pretrained_model_name_or_path is not None:
pretrained_model_name_or_path = str(pretrained_model_name_or_path)
if os.path.isdir(pretrained_model_name_or_path):
if from_tf and os.path.isfile(os.path.join(pretrained_model_name_or_path, TF_WEIGHTS_NAME + ".index")):
# Load from a TF 1.0 checkpoint in priority if from_tf
archive_file = os.path.join(pretrained_model_name_or_path, TF_WEIGHTS_NAME + ".index")
elif from_tf and os.path.isfile(os.path.join(pretrained_model_name_or_path, TF2_WEIGHTS_NAME)):
# Load from a TF 2.0 checkpoint in priority if from_tf
archive_file = os.path.join(pretrained_model_name_or_path, TF2_WEIGHTS_NAME)
elif os.path.isfile(os.path.join(pretrained_model_name_or_path, WEIGHTS_NAME)):
# Load from a PyTorch checkpoint
archive_file = os.path.join(pretrained_model_name_or_path, WEIGHTS_NAME)
else:
raise EnvironmentError(
f"Error no file named {[WEIGHTS_NAME, TF2_WEIGHTS_NAME, TF_WEIGHTS_NAME + '.index']} found in "
f"directory {pretrained_model_name_or_path} or '{pretrained_model_name_or_path}' is not a directory."
)
elif os.path.isfile(pretrained_model_name_or_path) or is_remote_url(pretrained_model_name_or_path):
archive_file = pretrained_model_name_or_path
else:
# Load from URL or cache
archive_file = hf_bucket_url(
pretrained_model_name_or_path,
filename=WEIGHTS_NAME,
revision=revision,
mirror=mirror,
)
try:
# Load from URL or cache
resolved_archive_file = cached_path(
archive_file,
cache_dir=cache_dir,
force_download=force_download,
proxies=proxies,
resume_download=resume_download,
local_files_only=local_files_only,
use_auth_token=use_auth_token,
)
except EnvironmentError as err:
logger.error(err)
msg = (
f"Can't load weights for '{pretrained_model_name_or_path}'. Make sure that:\n\n"
f"- '{pretrained_model_name_or_path}' is a correct model identifier listed on 'https://huggingface.co/models'\n\n"
f"- or '{pretrained_model_name_or_path}' is the correct path to a directory containing a file named one of {WEIGHTS_NAME}, {TF2_WEIGHTS_NAME}, {TF_WEIGHTS_NAME}.\n\n"
)
raise EnvironmentError(msg)
if resolved_archive_file == archive_file:
logger.info(f"loading weights file {archive_file}")
else:
logger.info(f"loading weights file {archive_file} from cache at {resolved_archive_file}")
else:
resolved_archive_file = None
# Initialize the model
model = cls(config)
if state_dict is None:
try:
state_dict = torch.load(resolved_archive_file, map_location="cpu")
except Exception:
raise OSError(
f"Unable to load weights from pytorch checkpoint file for '{pretrained_model_name_or_path}' "
f"at '{resolved_archive_file}'"
)
# Remove the prefix 'module' from the keys if present (happens when using DataParallel)
state_dict = {k.replace("module.", ""): v for k, v in state_dict.items()}
# Load only the custom model weights, excluding HuBERT
custom_state_dict = {k: v for k, v in state_dict.items() if not k.startswith('hubert.')}
missing_keys, unexpected_keys = model.load_state_dict(custom_state_dict, strict=False)
if len(missing_keys) > 0:
logger.warning(f"Some weights of {model.__class__.__name__} were not initialized from the model checkpoint at {pretrained_model_name_or_path} "
f"and are newly initialized: {missing_keys}\n"
f"You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference.")
if len(unexpected_keys) > 0:
logger.warning(f"Some weights of the model checkpoint at {pretrained_model_name_or_path} were not used when "
f"initializing {model.__class__.__name__}: {unexpected_keys}\n"
f"This IS expected if you are initializing {model.__class__.__name__} from the checkpoint of a model trained on another task "
f"or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n"
f"This IS NOT expected if you are initializing {model.__class__.__name__} from the checkpoint of a model that you expect to be exactly identical "
f"(initializing a BertForSequenceClassification model from a BertForSequenceClassification model).")
if output_loading_info:
loading_info = {"missing_keys": missing_keys, "unexpected_keys": unexpected_keys}
return model, loading_info
return model |