Upload folder using huggingface_hub
Browse files- README.md +2 -0
- model.safetensors +1 -1
README.md
CHANGED
|
@@ -36,6 +36,7 @@ config.num_attention_heads = 4
|
|
| 36 |
config.num_hidden_layers = 2
|
| 37 |
config.max_window_layers = 1
|
| 38 |
|
|
|
|
| 39 |
model = transformers.AutoModelForCausalLM.from_config(
|
| 40 |
config,
|
| 41 |
trust_remote_code=True,
|
|
@@ -44,6 +45,7 @@ model.generation_config = transformers.GenerationConfig.from_pretrained(
|
|
| 44 |
model_id)
|
| 45 |
model = model.to(torch.bfloat16)
|
| 46 |
|
|
|
|
| 47 |
with torch.no_grad():
|
| 48 |
for p in model.parameters():
|
| 49 |
torch.nn.init.normal_(p)
|
|
|
|
| 36 |
config.num_hidden_layers = 2
|
| 37 |
config.max_window_layers = 1
|
| 38 |
|
| 39 |
+
transformers.set_seed(42)
|
| 40 |
model = transformers.AutoModelForCausalLM.from_config(
|
| 41 |
config,
|
| 42 |
trust_remote_code=True,
|
|
|
|
| 45 |
model_id)
|
| 46 |
model = model.to(torch.bfloat16)
|
| 47 |
|
| 48 |
+
transformers.set_seed(42)
|
| 49 |
with torch.no_grad():
|
| 50 |
for p in model.parameters():
|
| 51 |
torch.nn.init.normal_(p)
|
model.safetensors
CHANGED
|
@@ -1,3 +1,3 @@
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
-
oid sha256:
|
| 3 |
size 4871288
|
|
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:de519377d9979c8d2faf6c299702790a64c7eb402b8e2c93814634197514ea0c
|
| 3 |
size 4871288
|