Saving train state of step 5
Browse files
distil-whisper/events.out.tfevents.1715073503.server02.1431090.0
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:ce53bdcb28f245a46122283081f5c6c2f3e33cca3de217f4fa873fc9b8143fec
|
3 |
+
size 88
|
run_distillation.py
CHANGED
@@ -874,7 +874,7 @@ def main():
|
|
874 |
raw_datasets["eval"] = load_dataset(
|
875 |
dataset_dict["name"],
|
876 |
dataset_dict["config"],
|
877 |
-
split=dataset_dict["split"],
|
878 |
cache_dir=data_args.dataset_cache_dir,
|
879 |
token=model_args.token,
|
880 |
streaming=data_args.streaming,
|
@@ -1595,9 +1595,8 @@ def main():
|
|
1595 |
eval_preds = []
|
1596 |
eval_labels = []
|
1597 |
eval_start = time.time()
|
1598 |
-
|
1599 |
-
|
1600 |
-
validation_dataloader = DataLoader(
|
1601 |
vectorized_datasets[eval_split],
|
1602 |
collate_fn=data_collator,
|
1603 |
batch_size=per_device_eval_batch_size,
|
|
|
874 |
raw_datasets["eval"] = load_dataset(
|
875 |
dataset_dict["name"],
|
876 |
dataset_dict["config"],
|
877 |
+
split=dataset_dict["split"],
|
878 |
cache_dir=data_args.dataset_cache_dir,
|
879 |
token=model_args.token,
|
880 |
streaming=data_args.streaming,
|
|
|
1595 |
eval_preds = []
|
1596 |
eval_labels = []
|
1597 |
eval_start = time.time()
|
1598 |
+
|
1599 |
+
F = DataLoader(
|
|
|
1600 |
vectorized_datasets[eval_split],
|
1601 |
collate_fn=data_collator,
|
1602 |
batch_size=per_device_eval_batch_size,
|