ailieus commited on
Commit
5210e74
·
verified ·
1 Parent(s): 9a622ff

Upload model

Browse files
Files changed (3) hide show
  1. adapter_config.json +32 -0
  2. adapter_model.safetensors +3 -0
  3. model.py +95 -0
adapter_config.json ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "alpha_pattern": {},
3
+ "auto_mapping": null,
4
+ "base_model_name_or_path": "./checkpoints/mcq_hf_model",
5
+ "bias": "none",
6
+ "fan_in_fan_out": false,
7
+ "inference_mode": true,
8
+ "init_lora_weights": true,
9
+ "layer_replication": null,
10
+ "layers_pattern": null,
11
+ "layers_to_transform": null,
12
+ "loftq_config": {},
13
+ "lora_alpha": 32,
14
+ "lora_dropout": 0.05,
15
+ "megatron_config": null,
16
+ "megatron_core": "megatron.core",
17
+ "modules_to_save": null,
18
+ "peft_type": "LORA",
19
+ "r": 16,
20
+ "rank_pattern": {},
21
+ "revision": null,
22
+ "target_modules": [
23
+ "attn.out_proj",
24
+ "classifier",
25
+ "ffn.proj_2",
26
+ "ffn.proj_1",
27
+ "attn.qkv_proj"
28
+ ],
29
+ "task_type": "CAUSAL_LM",
30
+ "use_dora": false,
31
+ "use_rslora": false
32
+ }
adapter_model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:24b4c7db13193697e22635e38f93429c53087f5661d55ae54fdc474351a6603d
3
+ size 24985072
model.py ADDED
@@ -0,0 +1,95 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import torch.nn as nn
3
+ from transformers import (
4
+ PreTrainedModel,
5
+ PretrainedConfig,
6
+ AutoConfig,
7
+ AutoModel,
8
+ AutoModelForCausalLM,
9
+ AutoTokenizer,
10
+ )
11
+
12
+ import pandas as pd
13
+ from datasets import load_dataset, Dataset, DatasetDict
14
+
15
+
16
+ class MCQModel(nn.Module):
17
+ def __init__(self, name_model):
18
+ super(MCQModel, self).__init__()
19
+ self.model = AutoModelForCausalLM.from_pretrained(
20
+ name_model,
21
+ trust_remote_code=True,
22
+ output_hidden_states=True,
23
+ )
24
+ self.classifier = nn.Linear(
25
+ self.model.config.model_dim, 4
26
+ ) # 4 classes for 'A', 'B', 'C', 'D'
27
+
28
+ def forward(self, input_ids, attention_mask=None, labels=None,
29
+ position_ids = None,
30
+ past_key_values = None,
31
+ inputs_embeds = None,
32
+ use_cache = None,
33
+ output_attentions = None,
34
+ output_hidden_states = None,
35
+ return_dict = None,
36
+ cache_position = None):
37
+
38
+ print("INSIDE CUSTOM MODEL LABELS: ", labels)
39
+ outputs = self.model(input_ids, attention_mask=attention_mask, position_ids= position_ids, past_key_values=past_key_values, inputs_embeds=inputs_embeds,
40
+ output_attentions=output_attentions, cache_position=cache_position, use_cache=use_cache, return_dict=return_dict, output_hidden_states=output_hidden_states)
41
+ print("OUTPUT KEYS: " , outputs.keys())
42
+ # print(outputs.logits.shape)
43
+ # # print(outputs.hidden_states)
44
+ # print(outputs.hidden_states[0].shape)
45
+ # print(outputs.hidden_states[1].shape)
46
+ # print(len(outputs.hidden_states))
47
+
48
+ # hidden state is a tuple with all the hidden layer outputs from the attention,
49
+ # We are only interested in the last hidden layer and the last token
50
+ logits = self.classifier(outputs.hidden_states[-1][:, -1, :])
51
+ outputs.logits = logits
52
+ loss = None
53
+ if labels is not None:
54
+ loss_fct = nn.CrossEntropyLoss()
55
+ loss = loss_fct(
56
+ logits, labels
57
+ ) # labels [batch_size], logits [batch_size x num_classes]
58
+ # print("LOSS", loss)
59
+ outputs["loss"] = loss
60
+
61
+ print("===================")
62
+ print(loss)
63
+ print("===================")
64
+ # print("OUTPUTS KEY" ,outputs.keys())
65
+ return outputs
66
+
67
+
68
+ class MyCustomConfig(PretrainedConfig):
69
+ model_type = "mcq_hf_model"
70
+
71
+ def __init__(self, name_model="apple/OpenELM-450M-Instruct", **kwargs):
72
+ super().__init__(**kwargs)
73
+ self.name_model = name_model
74
+
75
+
76
+ class MCQHFModel(PreTrainedModel):
77
+ config_class = MyCustomConfig
78
+
79
+ def __init__(self, config):
80
+ super().__init__(config)
81
+ self.model = MCQModel(config.name_model)
82
+ self.lm_head = None
83
+
84
+ def forward(self, input_ids, attention_mask=None, labels=None,
85
+ position_ids = None,
86
+ past_key_values = None,
87
+ inputs_embeds = None,
88
+ use_cache = None,
89
+ output_attentions = None,
90
+ output_hidden_states = None,
91
+ return_dict = None,
92
+ cache_position = None):
93
+
94
+ return self.model(input_ids, labels=labels, attention_mask=attention_mask, position_ids= position_ids, past_key_values=past_key_values, inputs_embeds=inputs_embeds,
95
+ output_attentions=output_attentions, cache_position=cache_position, use_cache=use_cache, return_dict=return_dict, output_hidden_states=output_hidden_states)