| import torch | |
| import torch.nn as nn | |
| import numpy as np | |
| import numpy as np | |
| import pandas as pd | |
| import torch.nn.functional as F | |
| from transformers import PretrainedConfig | |
| import torch.optim as optim | |
| class BertCustomConfig(PretrainedConfig): | |
| model_type = "bert" | |
| def __init__( | |
| self, | |
| vocab_size=30873, | |
| hidden_size=768, | |
| num_hidden_layers=12, | |
| num_attention_heads=12, | |
| intermediate_size=3072, | |
| hidden_act="gelu", | |
| hidden_dropout_prob=0.1, | |
| attention_probs_dropout_prob=0.1, | |
| max_position_embeddings=512, | |
| type_vocab_size=2, | |
| initializer_range=0.02, | |
| layer_norm_eps=1e-12, | |
| pad_token_id=0, | |
| position_embedding_type="absolute", | |
| use_cache=True, | |
| classifier_dropout=None, | |
| max_length=512, | |
| id2label={"0": "Neutral", "1": "Hawkish", "2": "Dovish"}, | |
| label2id={"positive": 1, "negative": 2, "neutral": 0}, | |
| hyperparams=None, | |
| **kwargs | |
| ): | |
| super().__init__(pad_token_id=pad_token_id, **kwargs) | |
| self.vocab_size = vocab_size | |
| self.hidden_size = hidden_size | |
| self.num_hidden_layers = num_hidden_layers | |
| self.num_attention_heads = num_attention_heads | |
| self.hidden_act = hidden_act | |
| self.intermediate_size = intermediate_size | |
| self.hidden_dropout_prob = hidden_dropout_prob | |
| self.attention_probs_dropout_prob = attention_probs_dropout_prob | |
| self.max_position_embeddings = max_position_embeddings | |
| self.type_vocab_size = type_vocab_size | |
| self.initializer_range = initializer_range | |
| self.layer_norm_eps = layer_norm_eps | |
| self.position_embedding_type = position_embedding_type | |
| self.use_cache = use_cache | |
| self.classifier_dropout = classifier_dropout | |
| self.max_length = max_length | |
| self.id2label = id2label | |
| self.label2id = label2id | |
| self.hyperparams = hyperparams | |