paturi1710 commited on
Commit
f4c90dd
·
1 Parent(s): 62b6722

Upload model

Browse files
Files changed (3) hide show
  1. README.md +9 -0
  2. adapter_config.json +130 -0
  3. adapter_model.bin +3 -0
README.md ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ library_name: peft
3
+ ---
4
+ ## Training procedure
5
+
6
+ ### Framework versions
7
+
8
+
9
+ - PEFT 0.5.0
adapter_config.json ADDED
@@ -0,0 +1,130 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "auto_mapping": {
3
+ "base_model_class": "Pix2StructForConditionalGeneration",
4
+ "parent_library": "transformers.models.pix2struct.modeling_pix2struct"
5
+ },
6
+ "base_model_name_or_path": "google/pix2struct-docvqa-base",
7
+ "bias": "lora_only",
8
+ "fan_in_fan_out": false,
9
+ "inference_mode": true,
10
+ "init_lora_weights": true,
11
+ "layers_pattern": null,
12
+ "layers_to_transform": null,
13
+ "lora_alpha": 8,
14
+ "lora_dropout": 0.1,
15
+ "modules_to_save": [],
16
+ "peft_type": "LORA",
17
+ "r": 4,
18
+ "revision": null,
19
+ "target_modules": [
20
+ "decoder.layer.0.self_attention.attention.query",
21
+ "decoder.layer.0.self_attention.attention.key",
22
+ "decoder.layer.0.self_attention.attention.value",
23
+ "decoder.layer.0.encoder_decoder_attention.attention.query",
24
+ "decoder.layer.0.encoder_decoder_attention.attention.key",
25
+ "decoder.layer.0.encoder_decoder_attention.attention.value",
26
+ "encoder.encoder.layer.0.attention.query",
27
+ "encoder.encoder.layer.0.attention.key",
28
+ "encoder.encoder.layer.0.attention.value",
29
+ "decoder.layer.1.self_attention.attention.query",
30
+ "decoder.layer.1.self_attention.attention.key",
31
+ "decoder.layer.1.self_attention.attention.value",
32
+ "decoder.layer.1.encoder_decoder_attention.attention.query",
33
+ "decoder.layer.1.encoder_decoder_attention.attention.key",
34
+ "decoder.layer.1.encoder_decoder_attention.attention.value",
35
+ "encoder.encoder.layer.1.attention.query",
36
+ "encoder.encoder.layer.1.attention.key",
37
+ "encoder.encoder.layer.1.attention.value",
38
+ "decoder.layer.2.self_attention.attention.query",
39
+ "decoder.layer.2.self_attention.attention.key",
40
+ "decoder.layer.2.self_attention.attention.value",
41
+ "decoder.layer.2.encoder_decoder_attention.attention.query",
42
+ "decoder.layer.2.encoder_decoder_attention.attention.key",
43
+ "decoder.layer.2.encoder_decoder_attention.attention.value",
44
+ "encoder.encoder.layer.2.attention.query",
45
+ "encoder.encoder.layer.2.attention.key",
46
+ "encoder.encoder.layer.2.attention.value",
47
+ "decoder.layer.3.self_attention.attention.query",
48
+ "decoder.layer.3.self_attention.attention.key",
49
+ "decoder.layer.3.self_attention.attention.value",
50
+ "decoder.layer.3.encoder_decoder_attention.attention.query",
51
+ "decoder.layer.3.encoder_decoder_attention.attention.key",
52
+ "decoder.layer.3.encoder_decoder_attention.attention.value",
53
+ "encoder.encoder.layer.3.attention.query",
54
+ "encoder.encoder.layer.3.attention.key",
55
+ "encoder.encoder.layer.3.attention.value",
56
+ "decoder.layer.4.self_attention.attention.query",
57
+ "decoder.layer.4.self_attention.attention.key",
58
+ "decoder.layer.4.self_attention.attention.value",
59
+ "decoder.layer.4.encoder_decoder_attention.attention.query",
60
+ "decoder.layer.4.encoder_decoder_attention.attention.key",
61
+ "decoder.layer.4.encoder_decoder_attention.attention.value",
62
+ "encoder.encoder.layer.4.attention.query",
63
+ "encoder.encoder.layer.4.attention.key",
64
+ "encoder.encoder.layer.4.attention.value",
65
+ "decoder.layer.5.self_attention.attention.query",
66
+ "decoder.layer.5.self_attention.attention.key",
67
+ "decoder.layer.5.self_attention.attention.value",
68
+ "decoder.layer.5.encoder_decoder_attention.attention.query",
69
+ "decoder.layer.5.encoder_decoder_attention.attention.key",
70
+ "decoder.layer.5.encoder_decoder_attention.attention.value",
71
+ "encoder.encoder.layer.5.attention.query",
72
+ "encoder.encoder.layer.5.attention.key",
73
+ "encoder.encoder.layer.5.attention.value",
74
+ "decoder.layer.6.self_attention.attention.query",
75
+ "decoder.layer.6.self_attention.attention.key",
76
+ "decoder.layer.6.self_attention.attention.value",
77
+ "decoder.layer.6.encoder_decoder_attention.attention.query",
78
+ "decoder.layer.6.encoder_decoder_attention.attention.key",
79
+ "decoder.layer.6.encoder_decoder_attention.attention.value",
80
+ "encoder.encoder.layer.6.attention.query",
81
+ "encoder.encoder.layer.6.attention.key",
82
+ "encoder.encoder.layer.6.attention.value",
83
+ "decoder.layer.7.self_attention.attention.query",
84
+ "decoder.layer.7.self_attention.attention.key",
85
+ "decoder.layer.7.self_attention.attention.value",
86
+ "decoder.layer.7.encoder_decoder_attention.attention.query",
87
+ "decoder.layer.7.encoder_decoder_attention.attention.key",
88
+ "decoder.layer.7.encoder_decoder_attention.attention.value",
89
+ "encoder.encoder.layer.7.attention.query",
90
+ "encoder.encoder.layer.7.attention.key",
91
+ "encoder.encoder.layer.7.attention.value",
92
+ "decoder.layer.8.self_attention.attention.query",
93
+ "decoder.layer.8.self_attention.attention.key",
94
+ "decoder.layer.8.self_attention.attention.value",
95
+ "decoder.layer.8.encoder_decoder_attention.attention.query",
96
+ "decoder.layer.8.encoder_decoder_attention.attention.key",
97
+ "decoder.layer.8.encoder_decoder_attention.attention.value",
98
+ "encoder.encoder.layer.8.attention.query",
99
+ "encoder.encoder.layer.8.attention.key",
100
+ "encoder.encoder.layer.8.attention.value",
101
+ "decoder.layer.9.self_attention.attention.query",
102
+ "decoder.layer.9.self_attention.attention.key",
103
+ "decoder.layer.9.self_attention.attention.value",
104
+ "decoder.layer.9.encoder_decoder_attention.attention.query",
105
+ "decoder.layer.9.encoder_decoder_attention.attention.key",
106
+ "decoder.layer.9.encoder_decoder_attention.attention.value",
107
+ "encoder.encoder.layer.9.attention.query",
108
+ "encoder.encoder.layer.9.attention.key",
109
+ "encoder.encoder.layer.9.attention.value",
110
+ "decoder.layer.10.self_attention.attention.query",
111
+ "decoder.layer.10.self_attention.attention.key",
112
+ "decoder.layer.10.self_attention.attention.value",
113
+ "decoder.layer.10.encoder_decoder_attention.attention.query",
114
+ "decoder.layer.10.encoder_decoder_attention.attention.key",
115
+ "decoder.layer.10.encoder_decoder_attention.attention.value",
116
+ "encoder.encoder.layer.10.attention.query",
117
+ "encoder.encoder.layer.10.attention.key",
118
+ "encoder.encoder.layer.10.attention.value",
119
+ "decoder.layer.11.self_attention.attention.query",
120
+ "decoder.layer.11.self_attention.attention.key",
121
+ "decoder.layer.11.self_attention.attention.value",
122
+ "decoder.layer.11.encoder_decoder_attention.attention.query",
123
+ "decoder.layer.11.encoder_decoder_attention.attention.key",
124
+ "decoder.layer.11.encoder_decoder_attention.attention.value",
125
+ "encoder.encoder.layer.11.attention.query",
126
+ "encoder.encoder.layer.11.attention.key",
127
+ "encoder.encoder.layer.11.attention.value"
128
+ ],
129
+ "task_type": null
130
+ }
adapter_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a70f1dcdadf3b8ed5f9066b70249fa381afe4276a3bb8f4e94fc84da5691b98f
3
+ size 2732013