ailieus commited on
Commit
b39f8cb
·
verified ·
1 Parent(s): edaa40f

Upload model

Browse files
Files changed (3) hide show
  1. config.json +14 -0
  2. model.py +95 -0
  3. model.safetensors +3 -0
config.json ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "./checkpoints/mcqa_model_downsampled_bf16",
3
+ "architectures": [
4
+ "MCQHFModel"
5
+ ],
6
+ "auto_map": {
7
+ "AutoConfig": "model.MyCustomConfig",
8
+ "AutoModelForCausalLM": "model.MCQHFModel"
9
+ },
10
+ "model_type": "mcq_hf_model",
11
+ "name_model": "apple/OpenELM-450M-Instruct",
12
+ "torch_dtype": "float32",
13
+ "transformers_version": "4.40.2"
14
+ }
model.py ADDED
@@ -0,0 +1,95 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import torch.nn as nn
3
+ from transformers import (
4
+ PreTrainedModel,
5
+ PretrainedConfig,
6
+ AutoConfig,
7
+ AutoModel,
8
+ AutoModelForCausalLM,
9
+ AutoTokenizer,
10
+ )
11
+
12
+ import pandas as pd
13
+ from datasets import load_dataset, Dataset, DatasetDict
14
+
15
+
16
+ class MCQModel(nn.Module):
17
+ def __init__(self, name_model):
18
+ super(MCQModel, self).__init__()
19
+ self.model = AutoModelForCausalLM.from_pretrained(
20
+ name_model,
21
+ trust_remote_code=True,
22
+ output_hidden_states=True,
23
+ )
24
+ self.classifier = nn.Linear(
25
+ self.model.config.model_dim, 4
26
+ ) # 4 classes for 'A', 'B', 'C', 'D'
27
+
28
+ def forward(self, input_ids, attention_mask=None, labels=None,
29
+ position_ids = None,
30
+ past_key_values = None,
31
+ inputs_embeds = None,
32
+ use_cache = None,
33
+ output_attentions = None,
34
+ output_hidden_states = None,
35
+ return_dict = None,
36
+ cache_position = None):
37
+
38
+ print("INSIDE CUSTOM MODEL LABELS: ", labels)
39
+ outputs = self.model(input_ids, attention_mask=attention_mask, position_ids= position_ids, past_key_values=past_key_values, inputs_embeds=inputs_embeds,
40
+ output_attentions=output_attentions, cache_position=cache_position, use_cache=use_cache, return_dict=return_dict, output_hidden_states=output_hidden_states)
41
+ print("OUTPUT KEYS: " , outputs.keys())
42
+ # print(outputs.logits.shape)
43
+ # # print(outputs.hidden_states)
44
+ # print(outputs.hidden_states[0].shape)
45
+ # print(outputs.hidden_states[1].shape)
46
+ # print(len(outputs.hidden_states))
47
+
48
+ # hidden state is a tuple with all the hidden layer outputs from the attention,
49
+ # We are only interested in the last hidden layer and the last token
50
+ logits = self.classifier(outputs.hidden_states[-1][:, -1, :])
51
+ outputs.logits = logits
52
+ loss = None
53
+ if labels is not None:
54
+ loss_fct = nn.CrossEntropyLoss()
55
+ loss = loss_fct(
56
+ logits, labels
57
+ ) # labels [batch_size], logits [batch_size x num_classes]
58
+ # print("LOSS", loss)
59
+ outputs["loss"] = loss
60
+
61
+ print("===================")
62
+ print(loss)
63
+ print("===================")
64
+ # print("OUTPUTS KEY" ,outputs.keys())
65
+ return outputs
66
+
67
+
68
+ class MyCustomConfig(PretrainedConfig):
69
+ model_type = "mcq_hf_model"
70
+
71
+ def __init__(self, name_model="apple/OpenELM-450M-Instruct", **kwargs):
72
+ super().__init__(**kwargs)
73
+ self.name_model = name_model
74
+
75
+
76
+ class MCQHFModel(PreTrainedModel):
77
+ config_class = MyCustomConfig
78
+
79
+ def __init__(self, config):
80
+ super().__init__(config)
81
+ self.model = MCQModel(config.name_model)
82
+ self.lm_head = None
83
+
84
+ def forward(self, input_ids, attention_mask=None, labels=None,
85
+ position_ids = None,
86
+ past_key_values = None,
87
+ inputs_embeds = None,
88
+ use_cache = None,
89
+ output_attentions = None,
90
+ output_hidden_states = None,
91
+ return_dict = None,
92
+ cache_position = None):
93
+
94
+ return self.model(input_ids, labels=labels, attention_mask=attention_mask, position_ids= position_ids, past_key_values=past_key_values, inputs_embeds=inputs_embeds,
95
+ output_attentions=output_attentions, cache_position=cache_position, use_cache=use_cache, return_dict=return_dict, output_hidden_states=output_hidden_states)
model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:52583f7b7cfc861baff10050d6aaaac5d89b52a53fb73c828e8c1a62213c2f18
3
+ size 1828761528