Upload folder using huggingface_hub
Browse files- training_checkpoints/checkpoint-643/config.json +63 -0
- training_checkpoints/checkpoint-643/model.safetensors +3 -0
- training_checkpoints/checkpoint-643/optimizer.pt +3 -0
- training_checkpoints/checkpoint-643/rng_state.pth +3 -0
- training_checkpoints/checkpoint-643/scheduler.pt +3 -0
- training_checkpoints/checkpoint-643/trainer_state.json +50 -0
- training_checkpoints/checkpoint-643/training_args.bin +3 -0
training_checkpoints/checkpoint-643/config.json
ADDED
|
@@ -0,0 +1,63 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"_name_or_path": "openaccess-ai-collective/tiny-mistral",
|
| 3 |
+
"architectures": [
|
| 4 |
+
"MistralForSequenceClassification"
|
| 5 |
+
],
|
| 6 |
+
"attention_dropout": 0.0,
|
| 7 |
+
"bos_token_id": 1,
|
| 8 |
+
"dropout_p": 0.1,
|
| 9 |
+
"eos_token_id": 2,
|
| 10 |
+
"hidden_act": "silu",
|
| 11 |
+
"hidden_size": 512,
|
| 12 |
+
"id2label": {
|
| 13 |
+
"0": "Issue",
|
| 14 |
+
"1": "Court Discourse",
|
| 15 |
+
"2": "Conclusion",
|
| 16 |
+
"3": "Precedent Analysis",
|
| 17 |
+
"4": "Section Analysis",
|
| 18 |
+
"5": "Argument by Petitioner",
|
| 19 |
+
"6": "Fact",
|
| 20 |
+
"7": "Argument by Respondent",
|
| 21 |
+
"8": "Ratio",
|
| 22 |
+
"9": "Appellant",
|
| 23 |
+
"10": "Respondent",
|
| 24 |
+
"11": "Argument by Appellant",
|
| 25 |
+
"12": "Petitioner",
|
| 26 |
+
"13": "Judge",
|
| 27 |
+
"14": "Argument by Defendant"
|
| 28 |
+
},
|
| 29 |
+
"initializer_range": 0.02,
|
| 30 |
+
"intermediate_size": 14336,
|
| 31 |
+
"label2id": {
|
| 32 |
+
"Appellant": 9,
|
| 33 |
+
"Argument by Appellant": 11,
|
| 34 |
+
"Argument by Defendant": 14,
|
| 35 |
+
"Argument by Petitioner": 5,
|
| 36 |
+
"Argument by Respondent": 7,
|
| 37 |
+
"Conclusion": 2,
|
| 38 |
+
"Court Discourse": 1,
|
| 39 |
+
"Fact": 6,
|
| 40 |
+
"Issue": 0,
|
| 41 |
+
"Judge": 13,
|
| 42 |
+
"Petitioner": 12,
|
| 43 |
+
"Precedent Analysis": 3,
|
| 44 |
+
"Ratio": 8,
|
| 45 |
+
"Respondent": 10,
|
| 46 |
+
"Section Analysis": 4
|
| 47 |
+
},
|
| 48 |
+
"max_position_embeddings": 32768,
|
| 49 |
+
"model_type": "mistral",
|
| 50 |
+
"num_attention_heads": 16,
|
| 51 |
+
"num_hidden_layers": 8,
|
| 52 |
+
"num_key_value_heads": 4,
|
| 53 |
+
"pad_token_id": 32000,
|
| 54 |
+
"problem_type": "single_label_classification",
|
| 55 |
+
"rms_norm_eps": 1e-05,
|
| 56 |
+
"rope_theta": 10000.0,
|
| 57 |
+
"sliding_window": 4096,
|
| 58 |
+
"tie_word_embeddings": false,
|
| 59 |
+
"torch_dtype": "float32",
|
| 60 |
+
"transformers_version": "4.39.3",
|
| 61 |
+
"use_cache": true,
|
| 62 |
+
"vocab_size": 32001
|
| 63 |
+
}
|
training_checkpoints/checkpoint-643/model.safetensors
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:a10f3cab1e2ffe87455f3790ab0900af88cd41a4a89865b9068d37bff0f3018d
|
| 3 |
+
size 791226496
|
training_checkpoints/checkpoint-643/optimizer.pt
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:9809d21b7830814eb1a4e9771f318d36a69c8d580f9950e69ed8705c281e1347
|
| 3 |
+
size 1582499834
|
training_checkpoints/checkpoint-643/rng_state.pth
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:f2e155718eaa0de9768e57816e6e6874ba931ec4489afb1c2874db91c97f7a96
|
| 3 |
+
size 14244
|
training_checkpoints/checkpoint-643/scheduler.pt
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:8ad5fcde2bc2cfd97977cd562ef7c24c5f2c9ec5897b502e41c50cc5d48884a6
|
| 3 |
+
size 1064
|
training_checkpoints/checkpoint-643/trainer_state.json
ADDED
|
@@ -0,0 +1,50 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"best_metric": 0.45467871431664447,
|
| 3 |
+
"best_model_checkpoint": "tiny-mistral/checkpoint-643",
|
| 4 |
+
"epoch": 1.0,
|
| 5 |
+
"eval_steps": 500,
|
| 6 |
+
"global_step": 643,
|
| 7 |
+
"is_hyper_param_search": false,
|
| 8 |
+
"is_local_process_zero": true,
|
| 9 |
+
"is_world_process_zero": true,
|
| 10 |
+
"log_history": [
|
| 11 |
+
{
|
| 12 |
+
"epoch": 0.78,
|
| 13 |
+
"grad_norm": 13.069233894348145,
|
| 14 |
+
"learning_rate": 4.7413167444271646e-05,
|
| 15 |
+
"loss": 1.4479,
|
| 16 |
+
"step": 500
|
| 17 |
+
},
|
| 18 |
+
{
|
| 19 |
+
"epoch": 1.0,
|
| 20 |
+
"eval_accuracy": 0.6498838109992254,
|
| 21 |
+
"eval_f1_macro": 0.45467871431664447,
|
| 22 |
+
"eval_f1_micro": 0.6498838109992254,
|
| 23 |
+
"eval_f1_weighted": 0.6214028764904677,
|
| 24 |
+
"eval_loss": 1.118202805519104,
|
| 25 |
+
"eval_macro_fpr": 0.039027492204575735,
|
| 26 |
+
"eval_macro_sensitivity": 0.4743680961365506,
|
| 27 |
+
"eval_macro_specificity": 0.9731278674075015,
|
| 28 |
+
"eval_precision": 0.6258370043663808,
|
| 29 |
+
"eval_precision_macro": 0.4712458606651466,
|
| 30 |
+
"eval_recall": 0.6498838109992254,
|
| 31 |
+
"eval_recall_macro": 0.4743680961365506,
|
| 32 |
+
"eval_runtime": 52.8829,
|
| 33 |
+
"eval_samples_per_second": 24.412,
|
| 34 |
+
"eval_steps_per_second": 3.063,
|
| 35 |
+
"eval_weighted_fpr": 0.03705525495982948,
|
| 36 |
+
"eval_weighted_sensitivity": 0.6498838109992254,
|
| 37 |
+
"eval_weighted_specificity": 0.9470342001132973,
|
| 38 |
+
"step": 643
|
| 39 |
+
}
|
| 40 |
+
],
|
| 41 |
+
"logging_steps": 500,
|
| 42 |
+
"max_steps": 9645,
|
| 43 |
+
"num_input_tokens_seen": 0,
|
| 44 |
+
"num_train_epochs": 15,
|
| 45 |
+
"save_steps": 500,
|
| 46 |
+
"total_flos": 2863522174205952.0,
|
| 47 |
+
"train_batch_size": 8,
|
| 48 |
+
"trial_name": null,
|
| 49 |
+
"trial_params": null
|
| 50 |
+
}
|
training_checkpoints/checkpoint-643/training_args.bin
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:ae13135576265f1180bcf8daa3dbfb279a0746197d617e38954842ee54fec7e1
|
| 3 |
+
size 4920
|