prasadsachin commited on
Commit
d8235aa
·
verified ·
1 Parent(s): f58f17a

Upload folder using huggingface_hub

Browse files
README.md ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ library_name: keras-hub
3
+ pipeline_tag: text-generation
4
+ ---
5
+ This is a [`Qwen3` model](https://keras.io/api/keras_hub/models/qwen3) uploaded using the KerasHub library and can be used with JAX, TensorFlow, and PyTorch backends.
6
+ This model is related to a `CausalLM` task.
7
+
8
+ Model config:
9
+ * **name:** qwen3_backbone
10
+ * **trainable:** True
11
+ * **vocabulary_size:** 151936
12
+ * **num_layers:** 28
13
+ * **num_query_heads:** 16
14
+ * **hidden_dim:** 1024
15
+ * **head_dim:** 128
16
+ * **intermediate_dim:** 3072
17
+ * **rope_max_wavelength:** 1000000
18
+ * **rope_scaling_factor:** 1.0
19
+ * **num_key_value_heads:** 8
20
+ * **layer_norm_epsilon:** 1e-06
21
+ * **dropout:** 0.0
22
+ * **tie_word_embeddings:** True
23
+ * **sliding_window_size:** None
24
+
25
+ This model card has been generated automatically and should be completed by the model author. See [Model Cards documentation](https://huggingface.co/docs/hub/model-cards) for more information.
assets/tokenizer/merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
assets/tokenizer/vocabulary.json ADDED
The diff for this file is too large to render. See raw diff
 
config.json ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "module": "keras_hub.src.models.qwen3.qwen3_backbone",
3
+ "class_name": "Qwen3Backbone",
4
+ "config": {
5
+ "name": "qwen3_backbone",
6
+ "trainable": true,
7
+ "vocabulary_size": 151936,
8
+ "num_layers": 28,
9
+ "num_query_heads": 16,
10
+ "hidden_dim": 1024,
11
+ "head_dim": 128,
12
+ "intermediate_dim": 3072,
13
+ "rope_max_wavelength": 1000000,
14
+ "rope_scaling_factor": 1.0,
15
+ "num_key_value_heads": 8,
16
+ "layer_norm_epsilon": 1e-06,
17
+ "dropout": 0.0,
18
+ "tie_word_embeddings": true,
19
+ "sliding_window_size": null
20
+ },
21
+ "registered_name": "keras_hub>Qwen3Backbone"
22
+ }
metadata.json ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "keras_version": "3.10.0.dev2025071003",
3
+ "keras_hub_version": "0.22.0.dev0",
4
+ "parameter_count": 596049920,
5
+ "date_saved": "2025-07-10@17:20:20",
6
+ "tasks": [
7
+ "CausalLM"
8
+ ]
9
+ }
model.weights.h5 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6b0f8e9bf7fc503ffab82e5499ed4ce7f7bf278afe3be3e6f91caf7bd8119278
3
+ size 2385316824
preprocessor.json ADDED
@@ -0,0 +1,69 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "module": "keras_hub.src.models.qwen3.qwen3_causal_lm_preprocessor",
3
+ "class_name": "Qwen3CausalLMPreprocessor",
4
+ "config": {
5
+ "name": "qwen3_causal_lm_preprocessor_2",
6
+ "trainable": true,
7
+ "dtype": {
8
+ "module": "keras",
9
+ "class_name": "DTypePolicy",
10
+ "config": {
11
+ "name": "float32"
12
+ },
13
+ "registered_name": null
14
+ },
15
+ "tokenizer": {
16
+ "module": "keras_hub.src.models.qwen3.qwen3_tokenizer",
17
+ "class_name": "Qwen3Tokenizer",
18
+ "config": {
19
+ "name": "qwen3_tokenizer",
20
+ "trainable": true,
21
+ "dtype": {
22
+ "module": "keras",
23
+ "class_name": "DTypePolicy",
24
+ "config": {
25
+ "name": "int32"
26
+ },
27
+ "registered_name": null
28
+ },
29
+ "config_file": "tokenizer.json",
30
+ "sequence_length": null,
31
+ "add_prefix_space": false,
32
+ "unsplittable_tokens": [
33
+ "<|image_pad|>",
34
+ "<|endoftext|>",
35
+ "<tool_response>",
36
+ "<|vision_start|>",
37
+ "<|object_ref_end|>",
38
+ "<|repo_name|>",
39
+ "<|box_start|>",
40
+ "<|vision_end|>",
41
+ "<|quad_end|>",
42
+ "<|file_sep|>",
43
+ "<|object_ref_start|>",
44
+ "<|video_pad|>",
45
+ "<|fim_prefix|>",
46
+ "<|fim_suffix|>",
47
+ "<|fim_middle|>",
48
+ "<|box_end|>",
49
+ "</tool_call>",
50
+ "<tool_call>",
51
+ "</think>",
52
+ "<|quad_start|>",
53
+ "<|im_end|>",
54
+ "<think>",
55
+ "<|im_start|>",
56
+ "<|fim_pad|>",
57
+ "<|vision_pad|>",
58
+ "</tool_response>"
59
+ ]
60
+ },
61
+ "registered_name": "keras_hub>Qwen3Tokenizer"
62
+ },
63
+ "config_file": "preprocessor.json",
64
+ "sequence_length": 1024,
65
+ "add_start_token": true,
66
+ "add_end_token": true
67
+ },
68
+ "registered_name": "keras_hub>Qwen3CausalLMPreprocessor"
69
+ }
task.json ADDED
@@ -0,0 +1,99 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "module": "keras_hub.src.models.qwen3.qwen3_causal_lm",
3
+ "class_name": "Qwen3CausalLM",
4
+ "config": {
5
+ "backbone": {
6
+ "module": "keras_hub.src.models.qwen3.qwen3_backbone",
7
+ "class_name": "Qwen3Backbone",
8
+ "config": {
9
+ "name": "qwen3_backbone",
10
+ "trainable": true,
11
+ "vocabulary_size": 151936,
12
+ "num_layers": 28,
13
+ "num_query_heads": 16,
14
+ "hidden_dim": 1024,
15
+ "head_dim": 128,
16
+ "intermediate_dim": 3072,
17
+ "rope_max_wavelength": 1000000,
18
+ "rope_scaling_factor": 1.0,
19
+ "num_key_value_heads": 8,
20
+ "layer_norm_epsilon": 1e-06,
21
+ "dropout": 0.0,
22
+ "tie_word_embeddings": true,
23
+ "sliding_window_size": null
24
+ },
25
+ "registered_name": "keras_hub>Qwen3Backbone"
26
+ },
27
+ "preprocessor": {
28
+ "module": "keras_hub.src.models.qwen3.qwen3_causal_lm_preprocessor",
29
+ "class_name": "Qwen3CausalLMPreprocessor",
30
+ "config": {
31
+ "name": "qwen3_causal_lm_preprocessor_2",
32
+ "trainable": true,
33
+ "dtype": {
34
+ "module": "keras",
35
+ "class_name": "DTypePolicy",
36
+ "config": {
37
+ "name": "float32"
38
+ },
39
+ "registered_name": null
40
+ },
41
+ "tokenizer": {
42
+ "module": "keras_hub.src.models.qwen3.qwen3_tokenizer",
43
+ "class_name": "Qwen3Tokenizer",
44
+ "config": {
45
+ "name": "qwen3_tokenizer",
46
+ "trainable": true,
47
+ "dtype": {
48
+ "module": "keras",
49
+ "class_name": "DTypePolicy",
50
+ "config": {
51
+ "name": "int32"
52
+ },
53
+ "registered_name": null
54
+ },
55
+ "config_file": "tokenizer.json",
56
+ "sequence_length": null,
57
+ "add_prefix_space": false,
58
+ "unsplittable_tokens": [
59
+ "<|image_pad|>",
60
+ "<|endoftext|>",
61
+ "<tool_response>",
62
+ "<|vision_start|>",
63
+ "<|object_ref_end|>",
64
+ "<|repo_name|>",
65
+ "<|box_start|>",
66
+ "<|vision_end|>",
67
+ "<|quad_end|>",
68
+ "<|file_sep|>",
69
+ "<|object_ref_start|>",
70
+ "<|video_pad|>",
71
+ "<|fim_prefix|>",
72
+ "<|fim_suffix|>",
73
+ "<|fim_middle|>",
74
+ "<|box_end|>",
75
+ "</tool_call>",
76
+ "<tool_call>",
77
+ "</think>",
78
+ "<|quad_start|>",
79
+ "<|im_end|>",
80
+ "<think>",
81
+ "<|im_start|>",
82
+ "<|fim_pad|>",
83
+ "<|vision_pad|>",
84
+ "</tool_response>"
85
+ ]
86
+ },
87
+ "registered_name": "keras_hub>Qwen3Tokenizer"
88
+ },
89
+ "config_file": "preprocessor.json",
90
+ "sequence_length": 1024,
91
+ "add_start_token": true,
92
+ "add_end_token": true
93
+ },
94
+ "registered_name": "keras_hub>Qwen3CausalLMPreprocessor"
95
+ },
96
+ "name": "qwen3_causal_lm"
97
+ },
98
+ "registered_name": "keras_hub>Qwen3CausalLM"
99
+ }
tokenizer.json ADDED
@@ -0,0 +1,48 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "module": "keras_hub.src.models.qwen3.qwen3_tokenizer",
3
+ "class_name": "Qwen3Tokenizer",
4
+ "config": {
5
+ "name": "qwen3_tokenizer",
6
+ "trainable": true,
7
+ "dtype": {
8
+ "module": "keras",
9
+ "class_name": "DTypePolicy",
10
+ "config": {
11
+ "name": "int32"
12
+ },
13
+ "registered_name": null
14
+ },
15
+ "config_file": "tokenizer.json",
16
+ "sequence_length": null,
17
+ "add_prefix_space": false,
18
+ "unsplittable_tokens": [
19
+ "<|image_pad|>",
20
+ "<|endoftext|>",
21
+ "<tool_response>",
22
+ "<|vision_start|>",
23
+ "<|object_ref_end|>",
24
+ "<|repo_name|>",
25
+ "<|box_start|>",
26
+ "<|vision_end|>",
27
+ "<|quad_end|>",
28
+ "<|file_sep|>",
29
+ "<|object_ref_start|>",
30
+ "<|video_pad|>",
31
+ "<|fim_prefix|>",
32
+ "<|fim_suffix|>",
33
+ "<|fim_middle|>",
34
+ "<|box_end|>",
35
+ "</tool_call>",
36
+ "<tool_call>",
37
+ "</think>",
38
+ "<|quad_start|>",
39
+ "<|im_end|>",
40
+ "<think>",
41
+ "<|im_start|>",
42
+ "<|fim_pad|>",
43
+ "<|vision_pad|>",
44
+ "</tool_response>"
45
+ ]
46
+ },
47
+ "registered_name": "keras_hub>Qwen3Tokenizer"
48
+ }