danielhanchen commited on
Commit
e156b58
·
verified ·
1 Parent(s): b7c1571

Add files using upload-large-folder tool

Browse files
.gitattributes CHANGED
@@ -33,3 +33,11 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ gemma-3-1b-it-Q2_K_L.gguf filter=lfs diff=lfs merge=lfs -text
37
+ gemma-3-1b-it.BF16.gguf filter=lfs diff=lfs merge=lfs -text
38
+ gemma-3-1b-it-Q2_K.gguf filter=lfs diff=lfs merge=lfs -text
39
+ gemma-3-1b-it-Q3_K_M.gguf filter=lfs diff=lfs merge=lfs -text
40
+ gemma-3-1b-it-Q5_K_M.gguf filter=lfs diff=lfs merge=lfs -text
41
+ gemma-3-1b-it-Q4_K_M.gguf filter=lfs diff=lfs merge=lfs -text
42
+ gemma-3-1b-it-Q6_K.gguf filter=lfs diff=lfs merge=lfs -text
43
+ gemma-3-1b-it.Q8_0.gguf filter=lfs diff=lfs merge=lfs -text
config.json ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "Gemma3ForCausalLM"
4
+ ],
5
+ "attention_bias": false,
6
+ "attention_dropout": 0.0,
7
+ "attn_logit_softcapping": null,
8
+ "bos_token_id": 2,
9
+ "cache_implementation": "hybrid",
10
+ "eos_token_id": 106,
11
+ "final_logit_softcapping": null,
12
+ "head_dim": 256,
13
+ "hidden_activation": "gelu_pytorch_tanh",
14
+ "hidden_size": 1152,
15
+ "initializer_range": 0.02,
16
+ "intermediate_size": 6912,
17
+ "max_position_embeddings": 32768,
18
+ "model_type": "gemma3_text",
19
+ "num_attention_heads": 4,
20
+ "num_hidden_layers": 26,
21
+ "num_key_value_heads": 1,
22
+ "pad_token_id": 0,
23
+ "query_pre_attn_scalar": 256,
24
+ "rms_norm_eps": 1e-06,
25
+ "rope_local_base_freq": 10000,
26
+ "rope_scaling": null,
27
+ "rope_theta": 1000000,
28
+ "sliding_window": 512,
29
+ "sliding_window_pattern": 6,
30
+ "torch_dtype": "bfloat16",
31
+ "transformers_version": "4.50.0.dev0",
32
+ "unsloth_fixed": true,
33
+ "use_cache": true,
34
+ "vocab_size": 262145
35
+ }
gemma-3-1b-it-Q2_K.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d5dac0cec150ea5f83b5c12f3e031f3c7bdddbef2afae098b00dece0333a37fe
3
+ size 689815488
gemma-3-1b-it-Q2_K_L.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:517550eace390b4a6efa4a20be3431c815ce4e3c9f7eff3013d05168a8c5b46f
3
+ size 6537440
gemma-3-1b-it-Q3_K_M.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d988f418862fc00c930f6c2227ec983a6d35107bf0fca38310d802fa256cfeab
3
+ size 722417088
gemma-3-1b-it-Q4_K_M.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:03c629dcef41c86eff17c2a0d8945b80d7f05d1ecfaa589c4384c85f112d9ba5
3
+ size 806059200
gemma-3-1b-it-Q5_K_M.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9ed89116a2ea57e73f63e284121f18e630987467bbf4d25f5219ad3c8f781f84
3
+ size 851346624
gemma-3-1b-it-Q6_K.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e075bc7cb4fe46067a0daf47eda3d9adcd7bd3eab53e2efa80789e5312b38ec0
3
+ size 1011739584
gemma-3-1b-it.BF16.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a43172069900cb4b0f83dc9f9eb46a8d8cc76c1f7cd4187ace97fae5d22bfa42
3
+ size 2006575584
gemma-3-1b-it.Q8_0.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ed80ede9af2d4ee5c53390fe615550f815c40402dd3f06651c3d93e8d0aab8d6
3
+ size 1069307328