Upload folder using huggingface_hub
Browse files- adapter_0.pt +3 -0
- adapter_config.json +1 -0
- config.json +1 -0
- hf_model_0001_0.pt +3 -0
- hf_model_0002_0.pt +3 -0
adapter_0.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:b4df35252e7247866a6fd278663fdd36d6e6738adce0c18bc91c8bc7b6f4b845
|
3 |
+
size 8436346
|
adapter_config.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"r": 8, "lora_alpha": 16, "target_modules": ["q_proj", "v_proj"]}
|
config.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"_name_or_path": "meta-llama/Llama-2-7b-hf", "architectures": ["LlamaForCausalLM"], "bos_token_id": 1, "eos_token_id": 2, "hidden_act": "silu", "hidden_size": 4096, "initializer_range": 0.02, "intermediate_size": 11008, "max_position_embeddings": 4096, "model_type": "llama", "num_attention_heads": 32, "num_hidden_layers": 32, "num_key_value_heads": 32, "pretraining_tp": 1, "rms_norm_eps": 1e-05, "rope_scaling": null, "tie_word_embeddings": false, "torch_dtype": "float16", "transformers_version": "4.31.0.dev0", "use_cache": true, "vocab_size": 32000}
|
hf_model_0001_0.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:ab67fbe4aa6d64384fef9b5b0ee6754bd39b0b1293b2eec85b7adede2d104e61
|
3 |
+
size 9976617636
|
hf_model_0002_0.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:76236673005615863ba2bb7565fbb1dccc580c1162f9c35ea0040d4e917b112a
|
3 |
+
size 3500310120
|