Upload AutoEncoder
Browse files- config.json +3 -0
- model.safetensors +1 -1
- modeling_autoencoder.py +3 -0
config.json
CHANGED
@@ -7,15 +7,18 @@
|
|
7 |
"AutoModel": "modeling_autoencoder.AutoEncoder"
|
8 |
},
|
9 |
"bidirectional": false,
|
|
|
10 |
"compression_rate": 0.5,
|
11 |
"dropout_rate": 0.1,
|
12 |
"embed": false,
|
|
|
13 |
"input_dim": 128,
|
14 |
"latent_dim": 64,
|
15 |
"layer_types": "linear",
|
16 |
"max_position": false,
|
17 |
"model_type": "autoencoder",
|
18 |
"num_layers": 3,
|
|
|
19 |
"torch_dtype": "float32",
|
20 |
"transformers_version": "4.35.2",
|
21 |
"vocab_size": false
|
|
|
7 |
"AutoModel": "modeling_autoencoder.AutoEncoder"
|
8 |
},
|
9 |
"bidirectional": false,
|
10 |
+
"bos_token_id": 1,
|
11 |
"compression_rate": 0.5,
|
12 |
"dropout_rate": 0.1,
|
13 |
"embed": false,
|
14 |
+
"eos_token_id": 2,
|
15 |
"input_dim": 128,
|
16 |
"latent_dim": 64,
|
17 |
"layer_types": "linear",
|
18 |
"max_position": false,
|
19 |
"model_type": "autoencoder",
|
20 |
"num_layers": 3,
|
21 |
+
"pad_token_id": 0,
|
22 |
"torch_dtype": "float32",
|
23 |
"transformers_version": "4.35.2",
|
24 |
"vocab_size": false
|
model.safetensors
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 133840
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:359d4bc07fabf4f4a00c60c8a28a185ff7813a185a6f620d42884833c0546e0e
|
3 |
size 133840
|
modeling_autoencoder.py
CHANGED
@@ -116,6 +116,9 @@ class AutoEncoderConfig(PretrainedConfig):
|
|
116 |
self.embed = embed
|
117 |
self.vocab_size = vocab_size
|
118 |
self.max_position = max_position
|
|
|
|
|
|
|
119 |
|
120 |
if self.embed:
|
121 |
if not self.vocab_size and isinstance(self.vocab_size, int):
|
|
|
116 |
self.embed = embed
|
117 |
self.vocab_size = vocab_size
|
118 |
self.max_position = max_position
|
119 |
+
self.pad_token_id = pad_token_id
|
120 |
+
self.bos_token_id = bos_token_id
|
121 |
+
self.eos_token_id = eos_token_id
|
122 |
|
123 |
if self.embed:
|
124 |
if not self.vocab_size and isinstance(self.vocab_size, int):
|