Bo1015 commited on
Commit
09b7cd2
·
verified ·
1 Parent(s): 39fdf5d

Delete configuration_xtrimopglm.py

Browse files
Files changed (1) hide show
  1. configuration_xtrimopglm.py +0 -86
configuration_xtrimopglm.py DELETED
@@ -1,86 +0,0 @@
1
- from transformers import PretrainedConfig
2
-
3
-
4
- class xTrimoPGLMConfig(PretrainedConfig):
5
- model_type = "xTrimoPGLM"
6
- def __init__(
7
- self,
8
- num_layers=72,
9
- padded_vocab_size=128,
10
- hidden_size=10240,
11
- ffn_hidden_size=31744,
12
- kv_channels=128,
13
- num_attention_heads=80,
14
- seq_length=2048,
15
- hidden_dropout=0.0,
16
- attention_dropout=0.0,
17
- layernorm_epsilon=1e-5,
18
- initializer_range=0.02,
19
- glu_activation='geglu',
20
- rmsnorm=False,
21
- deepnorm=True,
22
- apply_residual_connection_post_layernorm=True,
23
- post_layer_norm=True,
24
- add_bias_linear=True,
25
- add_qkv_bias=True,
26
- bias_dropout_fusion=True,
27
- multi_query_attention=False,
28
- multi_query_group_num=1,
29
- apply_query_key_layer_scaling=True,
30
- attention_softmax_in_fp32=True,
31
- fp32_residual_connection=False,
32
- quantization_bit=0,
33
- rotary_embedding_2d=True,
34
- use_pytorch_sdpa=True,
35
- is_causal=False,
36
- use_cache=True,
37
- moe=False,
38
- num_experts=0,
39
- experts_per_token=0,
40
- untie_head=False,
41
- head_num=1,
42
- **kwargs
43
- ):
44
-
45
- if not deepnorm and apply_residual_connection_post_layernorm:
46
- print(f"Warning: deepnorm is False and apply_residual_connection_post_layernorm is True")
47
-
48
- if deepnorm:
49
- apply_residual_connection_post_layernorm = True
50
-
51
- self.num_layers = num_layers
52
- self.vocab_size = padded_vocab_size
53
- self.padded_vocab_size = padded_vocab_size
54
- self.hidden_size = hidden_size
55
- self.ffn_hidden_size = ffn_hidden_size
56
- self.kv_channels = kv_channels
57
- self.num_attention_heads = num_attention_heads
58
- self.seq_length = seq_length
59
- self.hidden_dropout = hidden_dropout
60
- self.attention_dropout = attention_dropout
61
- self.layernorm_epsilon = layernorm_epsilon
62
- self.glu_activation = glu_activation
63
- self.initializer_range = initializer_range
64
- self.rmsnorm = rmsnorm
65
- self.deepnorm = deepnorm
66
- self.apply_residual_connection_post_layernorm = apply_residual_connection_post_layernorm
67
- self.post_layer_norm = post_layer_norm
68
- self.add_bias_linear = add_bias_linear
69
- self.add_qkv_bias = add_qkv_bias
70
- self.bias_dropout_fusion = bias_dropout_fusion
71
- self.multi_query_attention = multi_query_attention
72
- self.multi_query_group_num = multi_query_group_num
73
- self.apply_query_key_layer_scaling = apply_query_key_layer_scaling
74
- self.attention_softmax_in_fp32 = attention_softmax_in_fp32
75
- self.fp32_residual_connection = fp32_residual_connection
76
- self.quantization_bit = quantization_bit
77
- self.rotary_embedding_2d = rotary_embedding_2d
78
- self.is_causal = is_causal
79
- self.use_cache=use_cache
80
- self.use_pytorch_sdpa = use_pytorch_sdpa
81
- self.moe = moe
82
- self.num_experts = num_experts
83
- self.experts_per_token = experts_per_token
84
- self.untie_head = untie_head
85
- self.head_num=head_num
86
- super().__init__(**kwargs)