jooni22 commited on
Commit
2617270
Β·
verified Β·
1 Parent(s): 3afae7a

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +214 -3
README.md CHANGED
@@ -1,3 +1,214 @@
1
- ---
2
- license: apache-2.0
3
- ---
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: apache-2.0
3
+ ---
4
+ ### Label info:
5
+ ```
6
+ 0: "fragment",
7
+ 1: "statement",
8
+ 2: "question",
9
+ 3: "command",
10
+ 4: "rhetorical question",
11
+ 5: "rhetorical command",
12
+ 6: "intonation-dependent utterance"
13
+ ```
14
+
15
+ ### Training process:
16
+ ```
17
+ {'loss': 1.8008, 'grad_norm': 7.2770233154296875, 'learning_rate': 1e-05, 'epoch': 0.03}
18
+ {'loss': 0.894, 'grad_norm': 27.84651756286621, 'learning_rate': 2e-05, 'epoch': 0.06}
19
+ {'loss': 0.6504, 'grad_norm': 30.617990493774414, 'learning_rate': 3e-05, 'epoch': 0.09}
20
+ {'loss': 0.5939, 'grad_norm': 34.73934555053711, 'learning_rate': 4e-05, 'epoch': 0.12}
21
+ {'loss': 0.5786, 'grad_norm': 6.585583209991455, 'learning_rate': 5e-05, 'epoch': 0.15}
22
+ {'eval_loss': 0.5915874242782593, 'eval_accuracy': 0.8297766749379653, 'eval_f1': 0.8315132136625163, 'eval_precision': 0.8410462605264737, 'eval_recall': 0.8297766749379653, 'eval_runtime': 265.1144, 'eval_samples_per_second': 22.801, 'eval_steps_per_second': 1.426, 'epoch': 0.15}
23
+ {'loss': 0.5928, 'grad_norm': 10.66515064239502, 'learning_rate': 4.8276456394346784e-05, 'epoch': 0.18}
24
+ {'loss': 0.5611, 'grad_norm': 3.804234266281128, 'learning_rate': 4.655291278869355e-05, 'epoch': 0.21}
25
+ {'loss': 0.5151, 'grad_norm': 8.275078773498535, 'learning_rate': 4.4829369183040333e-05, 'epoch': 0.24}
26
+ {'loss': 0.4696, 'grad_norm': 2.44854474067688, 'learning_rate': 4.310582557738711e-05, 'epoch': 0.26}
27
+ {'loss': 0.5183, 'grad_norm': 8.534456253051758, 'learning_rate': 4.138228197173389e-05, 'epoch': 0.29}
28
+ {'eval_loss': 0.5429911017417908, 'eval_accuracy': 0.8415219189412738, 'eval_f1': 0.8231674368620022, 'eval_precision': 0.8383674385161947, 'eval_recall': 0.8415219189412738, 'eval_runtime': 268.1016, 'eval_samples_per_second': 22.547, 'eval_steps_per_second': 1.41, 'epoch': 0.29}
29
+ {'loss': 0.4802, 'grad_norm': 10.636425018310547, 'learning_rate': 3.965873836608066e-05, 'epoch': 0.32}
30
+ {'loss': 0.4877, 'grad_norm': 6.05213737487793, 'learning_rate': 3.793519476042744e-05, 'epoch': 0.35}
31
+ {'loss': 0.5093, 'grad_norm': 5.5984015464782715, 'learning_rate': 3.621165115477422e-05, 'epoch': 0.38}
32
+ {'loss': 0.496, 'grad_norm': 7.945780277252197, 'learning_rate': 3.4488107549120996e-05, 'epoch': 0.41}
33
+ {'loss': 0.5005, 'grad_norm': 5.778200626373291, 'learning_rate': 3.276456394346777e-05, 'epoch': 0.44}
34
+ {'eval_loss': 0.41184064745903015, 'eval_accuracy': 0.8684863523573201, 'eval_f1': 0.8635611747282996, 'eval_precision': 0.8629771033516368, 'eval_recall': 0.8684863523573201, 'eval_runtime': 270.0108, 'eval_samples_per_second': 22.388, 'eval_steps_per_second': 1.4, 'epoch': 0.44}
35
+ {'loss': 0.4436, 'grad_norm': 4.413114070892334, 'learning_rate': 3.1041020337814545e-05, 'epoch': 0.47}
36
+ {'loss': 0.4899, 'grad_norm': 18.563016891479492, 'learning_rate': 2.9317476732161327e-05, 'epoch': 0.5}
37
+ {'loss': 0.4637, 'grad_norm': 26.92985725402832, 'learning_rate': 2.7593933126508105e-05, 'epoch': 0.53}
38
+ {'loss': 0.4387, 'grad_norm': 7.494612693786621, 'learning_rate': 2.5870389520854876e-05, 'epoch': 0.56}
39
+ {'loss': 0.4401, 'grad_norm': 20.5152530670166, 'learning_rate': 2.4146845915201654e-05, 'epoch': 0.59}
40
+ {'eval_loss': 0.42229706048965454, 'eval_accuracy': 0.8663358147229115, 'eval_f1': 0.859666580414163, 'eval_precision': 0.8638930298685418, 'eval_recall': 0.8663358147229115, 'eval_runtime': 272.7465, 'eval_samples_per_second': 22.163, 'eval_steps_per_second': 1.386, 'epoch': 0.59}
41
+ {'loss': 0.4289, 'grad_norm': 10.1361665725708, 'learning_rate': 2.2423302309548433e-05, 'epoch': 0.62}
42
+ {'loss': 0.4193, 'grad_norm': 8.068666458129883, 'learning_rate': 2.0699758703895207e-05, 'epoch': 0.65}
43
+ {'loss': 0.4038, 'grad_norm': 8.713869094848633, 'learning_rate': 1.8976215098241985e-05, 'epoch': 0.68}
44
+ {'loss': 0.4073, 'grad_norm': 12.182595252990723, 'learning_rate': 1.7252671492588764e-05, 'epoch': 0.71}
45
+ {'loss': 0.4095, 'grad_norm': 13.43953800201416, 'learning_rate': 1.5529127886935542e-05, 'epoch': 0.74}
46
+ {'eval_loss': 0.3974127173423767, 'eval_accuracy': 0.8726220016542597, 'eval_f1': 0.8677290061110087, 'eval_precision': 0.8672987137526573, 'eval_recall': 0.8726220016542597, 'eval_runtime': 270.2975, 'eval_samples_per_second': 22.364, 'eval_steps_per_second': 1.398, 'epoch': 0.74}
47
+ {'loss': 0.3473, 'grad_norm': 16.423139572143555, 'learning_rate': 1.3805584281282317e-05, 'epoch': 0.76}
48
+ {'loss': 0.3982, 'grad_norm': 6.357703685760498, 'learning_rate': 1.2082040675629095e-05, 'epoch': 0.79}
49
+ {'loss': 0.3286, 'grad_norm': 4.977189064025879, 'learning_rate': 1.0358497069975871e-05, 'epoch': 0.82}
50
+ {'loss': 0.3712, 'grad_norm': 4.068944454193115, 'learning_rate': 8.634953464322648e-06, 'epoch': 0.85}
51
+ {'loss': 0.345, 'grad_norm': 6.266202926635742, 'learning_rate': 6.911409858669425e-06, 'epoch': 0.88}
52
+ {'eval_loss': 0.3740645945072174, 'eval_accuracy': 0.8822167080231597, 'eval_f1': 0.8780706451391699, 'eval_precision': 0.877925468669178, 'eval_recall': 0.8822167080231597, 'eval_runtime': 270.0795, 'eval_samples_per_second': 22.382, 'eval_steps_per_second': 1.4, 'epoch': 0.88}
53
+ {'loss': 0.4049, 'grad_norm': 10.76927375793457, 'learning_rate': 5.187866253016201e-06, 'epoch': 0.91}
54
+ {'loss': 0.3919, 'grad_norm': 12.331282615661621, 'learning_rate': 3.4643226473629783e-06, 'epoch': 0.94}
55
+ {'loss': 0.3576, 'grad_norm': 8.6154203414917, 'learning_rate': 1.7407790417097554e-06, 'epoch': 0.97}
56
+ {'loss': 0.3544, 'grad_norm': 10.01504135131836, 'learning_rate': 1.723543605653223e-08, 'epoch': 1.0}
57
+ {'train_runtime': 7076.4012, 'train_samples_per_second': 7.688, 'train_steps_per_second': 0.481, 'train_loss': 0.5087223172678522, 'epoch': 1.0}
58
+ 100%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ| 3401/3401 [1:57:56<00:00, 2.08s/it]
59
+ Training completed. Model saved.
60
+ ```
61
+
62
+ ### Classification Report:
63
+ ```
64
+ precision recall f1-score support
65
+
66
+ fragment 0.95 0.92 0.94 597
67
+ statement 0.84 0.91 0.87 1811
68
+ question 0.95 0.94 0.94 1786
69
+ command 0.88 0.91 0.90 1296
70
+ rhetorical question 0.73 0.62 0.67 174
71
+ rhetorical command 0.86 0.56 0.68 108
72
+ intonation-dependent utterance 0.57 0.38 0.46 273
73
+
74
+ accuracy 0.88 6045
75
+ macro avg 0.83 0.75 0.78 6045
76
+ weighted avg 0.88 0.88 0.88 6045
77
+
78
+ Predictions saved
79
+ ```
80
+ ### Train code:
81
+ ```python
82
+ import pandas as pd
83
+ from sklearn.model_selection import train_test_split
84
+ from transformers import (
85
+ RobertaTokenizerFast,
86
+ RobertaForSequenceClassification,
87
+ Trainer,
88
+ TrainingArguments,
89
+ EarlyStoppingCallback
90
+ )
91
+ from datasets import Dataset
92
+ import torch
93
+ import numpy as np
94
+ from sklearn.metrics import accuracy_score, precision_recall_fscore_support, classification_report
95
+ from tensorflow.python.keras.optimizer_v2.adam import Adam
96
+
97
+ # Load and prepare data
98
+ train_df = pd.read_csv("./train_fix_v1.csv")
99
+ test_df = pd.read_csv("./test_fix_v1.csv")
100
+
101
+ # Convert to Dataset objects
102
+ train_dataset = Dataset.from_pandas(train_df)
103
+ test_dataset = Dataset.from_pandas(test_df)
104
+
105
+ # Initialize tokenizer and model
106
+ model_name = "FacebookAI/roberta-base"
107
+ tokenizer = RobertaTokenizerFast.from_pretrained(model_name)
108
+ model = RobertaForSequenceClassification.from_pretrained(
109
+ model_name,
110
+ num_labels=7,
111
+ id2label={
112
+ 0: "fragment",
113
+ 1: "statement",
114
+ 2: "question",
115
+ 3: "command",
116
+ 4: "rhetorical question",
117
+ 5: "rhetorical command",
118
+ 6: "intonation-dependent utterance"
119
+ },
120
+ label2id={
121
+ "fragment": 0,
122
+ "statement": 1,
123
+ "question": 2,
124
+ "command": 3,
125
+ "rhetorical question": 4,
126
+ "rhetorical command": 5,
127
+ "intonation-dependent utterance": 6
128
+ }
129
+ )
130
+
131
+ # Tokenize function
132
+ def tokenize_function(examples):
133
+ return tokenizer(examples["text"], padding="max_length", truncation=True, max_length=512)
134
+
135
+ # Tokenize datasets
136
+ tokenized_train = train_dataset.map(tokenize_function, batched=True)
137
+ tokenized_test = test_dataset.map(tokenize_function, batched=True)
138
+
139
+ # Compute metrics function for evaluation
140
+ def compute_metrics(pred):
141
+ labels = pred.label_ids
142
+ preds = pred.predictions.argmax(-1)
143
+ precision, recall, f1, _ = precision_recall_fscore_support(labels, preds, average='weighted')
144
+ acc = accuracy_score(labels, preds)
145
+ return {
146
+ 'accuracy': acc,
147
+ 'f1': f1,
148
+ 'precision': precision,
149
+ 'recall': recall
150
+ }
151
+
152
+ # Training arguments
153
+ training_args = TrainingArguments(
154
+ output_dir="./roberta_base_stock",
155
+ num_train_epochs=1, # Ustawione na 10, ale z early stopping
156
+ per_device_train_batch_size=16,
157
+ per_device_eval_batch_size=16,
158
+ warmup_steps=500,
159
+ weight_decay=0.01,
160
+ logging_dir='./logs',
161
+ logging_steps=100,
162
+ evaluation_strategy="steps",
163
+ eval_steps=500,
164
+ save_strategy="steps",
165
+ save_steps=500,
166
+ load_best_model_at_end=True,
167
+ metric_for_best_model="f1",
168
+ learning_rate=5e-05,
169
+ )
170
+
171
+ # Initialize Trainer
172
+ trainer = Trainer(
173
+ model=model,
174
+ args=training_args,
175
+ train_dataset=tokenized_train,
176
+ eval_dataset=tokenized_test,
177
+ compute_metrics=compute_metrics,
178
+ callbacks=[EarlyStoppingCallback(early_stopping_patience=3)]
179
+ )
180
+
181
+ # Train the model
182
+ trainer.train()
183
+
184
+ # Save the fine-tuned model
185
+ model.save_pretrained("./roberta_base_stock")
186
+ tokenizer.save_pretrained("./roberta_base_stock")
187
+
188
+ print("Training completed. Model saved.")
189
+
190
+ # Evaluate the model on the test set
191
+ print("Evaluating model on test set...")
192
+ test_results = trainer.evaluate(tokenized_test)
193
+
194
+ print("Test set evaluation results:")
195
+ for key, value in test_results.items():
196
+ print(f"{key}: {value}")
197
+
198
+ # Perform predictions on the test set
199
+ test_predictions = trainer.predict(tokenized_test)
200
+
201
+ # Get predicted labels
202
+ predicted_labels = np.argmax(test_predictions.predictions, axis=1)
203
+ true_labels = test_predictions.label_ids
204
+
205
+ # Print classification report
206
+ print("\nClassification Report:")
207
+ print(classification_report(true_labels, predicted_labels,
208
+ target_names=model.config.id2label.values()))
209
+
210
+ # Optional: Save predictions to CSV
211
+ test_df['predicted_label'] = predicted_labels
212
+ test_df.to_csv("./roberta_base_stock/test_predictions.csv", index=False)
213
+ print("Predictions saved")
214
+ ```