dacorvo HF staff commited on
Commit
ec83404
·
verified ·
1 Parent(s): 4475f92

Synchronizing local compiler cache.

Browse files
neuronxcc-2.16.372.0+4a9b2326/0_REGISTRY/0.1.0.dev1/inference/llama/NousResearch/Hermes-2-Theta-Llama-3-8B/7bae97d51948b959db6b.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"architectures": ["LlamaForCausalLM"], "attention_bias": false, "attention_dropout": 0.0, "bos_token_id": 128000, "eos_token_id": 128003, "head_dim": 128, "hidden_act": "silu", "hidden_size": 4096, "initializer_range": 0.02, "intermediate_size": 14336, "max_position_embeddings": 8192, "mlp_bias": false, "model_type": "llama", "neuron": {"auto_cast_type": "fp16", "batch_size": 4, "checkpoint_id": "NousResearch/Hermes-2-Theta-Llama-3-8B", "checkpoint_revision": "57a73110702e7b05ba3f39fef36297454c680725", "compiler_type": "neuronx-cc", "compiler_version": "2.16.372.0+4a9b2326", "num_cores": 2, "sequence_length": 4096, "task": "text-generation"}, "num_attention_heads": 32, "num_hidden_layers": 32, "num_key_value_heads": 8, "pretraining_tp": 1, "rms_norm_eps": 1e-05, "rope_scaling": null, "rope_theta": 500000.0, "tie_word_embeddings": false, "torch_dtype": "bfloat16", "use_cache": true, "vocab_size": 128256}
neuronxcc-2.16.372.0+4a9b2326/MODULE_850d5a3d0694f6f65d91+613edded/model.neff CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:27b6799e586f81adacc67d18a909f65119305ae72632cdc60671e40362511133
3
  size 6022144
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0cf2e014dbe2347889acd4d495e12509894548adb07e5f4030bafe5d778f0058
3
  size 6022144
neuronxcc-2.16.372.0+4a9b2326/MODULE_f6b2b8267d631f2f0fad+613edded/model.neff CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:dcadb7a41ff7dbeec379bdbc29573832528c0d58aa23955c3e352fb4284f49a5
3
  size 10415104
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8bf4fec0bfd6ae7c0c12dfe9ceb1b30d9f3af3c0fae83345e05099ad7b95126a
3
  size 10415104