
jooni22/custom-dst-roberta-base
Updated
β’
9
label
int64 0
6
| text
stringlengths 1
337
|
---|---|
0 | comic book |
0 | It makes sense |
0 | approximately |
0 | Meanwhile |
0 | salty |
0 | boon |
0 | expenditure |
0 | yesternight |
0 | promotion |
0 | runt |
0 | application |
0 | Mom, definitely, definitely |
0 | Oh, that's it |
0 | Showing brochures |
0 | kimbap |
0 | Finished |
0 | university |
0 | knob |
0 | participation |
0 | army |
0 | Chacha |
0 | housework |
0 | Packed all the time |
0 | just |
0 | distinction |
0 | match |
0 | letter |
0 | similar |
0 | doctor |
0 | okay |
0 | female soldier |
0 | alley |
0 | competitiveness |
0 | even |
0 | earthly |
0 | drama |
0 | Like this |
0 | There won't be |
0 | kneading |
0 | seat |
0 | Arrived now |
0 | fried rice |
0 | that |
0 | participant |
0 | chopsticks |
0 | ensign |
0 | wealth |
0 | some |
0 | story |
0 | too |
0 | pronunciation |
0 | also |
0 | station |
0 | announcer |
0 | north |
0 | step |
0 | Just the two of us |
0 | ham |
0 | doctor |
0 | mistake |
0 | teeth |
0 | ant |
0 | Proud |
0 | first |
0 | who |
0 | movie star |
0 | girlfriend |
0 | butter |
0 | disastrous |
0 | we |
0 | entirely |
0 | I |
0 | story |
0 | indoor |
0 | simply |
0 | assignment |
0 | seat |
0 | many |
0 | circumference |
0 | once |
0 | What is that |
0 | Bad Noomi |
0 | at least |
0 | sister-in-law |
0 | direction |
0 | area |
0 | cancer |
0 | bell |
0 | stop |
0 | international |
0 | hi |
0 | tutoring |
0 | wall |
0 | sash |
0 | severance pay |
0 | up and down |
0 | snack |
0 | a moment ago |
0 | here |
0 | gold medal |
0: "fragment",
1: "statement",
2: "question",
3: "command",
4: "rhetorical question",
5: "rhetorical command",
6: "intonation-dependent utterance"
{'loss': 1.8008, 'grad_norm': 7.2770233154296875, 'learning_rate': 1e-05, 'epoch': 0.03}
{'loss': 0.894, 'grad_norm': 27.84651756286621, 'learning_rate': 2e-05, 'epoch': 0.06}
{'loss': 0.6504, 'grad_norm': 30.617990493774414, 'learning_rate': 3e-05, 'epoch': 0.09}
{'loss': 0.5939, 'grad_norm': 34.73934555053711, 'learning_rate': 4e-05, 'epoch': 0.12}
{'loss': 0.5786, 'grad_norm': 6.585583209991455, 'learning_rate': 5e-05, 'epoch': 0.15}
{'eval_loss': 0.5915874242782593, 'eval_accuracy': 0.8297766749379653, 'eval_f1': 0.8315132136625163, 'eval_precision': 0.8410462605264737, 'eval_recall': 0.8297766749379653, 'eval_runtime': 265.1144, 'eval_samples_per_second': 22.801, 'eval_steps_per_second': 1.426, 'epoch': 0.15}
{'loss': 0.5928, 'grad_norm': 10.66515064239502, 'learning_rate': 4.8276456394346784e-05, 'epoch': 0.18}
{'loss': 0.5611, 'grad_norm': 3.804234266281128, 'learning_rate': 4.655291278869355e-05, 'epoch': 0.21}
{'loss': 0.5151, 'grad_norm': 8.275078773498535, 'learning_rate': 4.4829369183040333e-05, 'epoch': 0.24}
{'loss': 0.4696, 'grad_norm': 2.44854474067688, 'learning_rate': 4.310582557738711e-05, 'epoch': 0.26}
{'loss': 0.5183, 'grad_norm': 8.534456253051758, 'learning_rate': 4.138228197173389e-05, 'epoch': 0.29}
{'eval_loss': 0.5429911017417908, 'eval_accuracy': 0.8415219189412738, 'eval_f1': 0.8231674368620022, 'eval_precision': 0.8383674385161947, 'eval_recall': 0.8415219189412738, 'eval_runtime': 268.1016, 'eval_samples_per_second': 22.547, 'eval_steps_per_second': 1.41, 'epoch': 0.29}
{'loss': 0.4802, 'grad_norm': 10.636425018310547, 'learning_rate': 3.965873836608066e-05, 'epoch': 0.32}
{'loss': 0.4877, 'grad_norm': 6.05213737487793, 'learning_rate': 3.793519476042744e-05, 'epoch': 0.35}
{'loss': 0.5093, 'grad_norm': 5.5984015464782715, 'learning_rate': 3.621165115477422e-05, 'epoch': 0.38}
{'loss': 0.496, 'grad_norm': 7.945780277252197, 'learning_rate': 3.4488107549120996e-05, 'epoch': 0.41}
{'loss': 0.5005, 'grad_norm': 5.778200626373291, 'learning_rate': 3.276456394346777e-05, 'epoch': 0.44}
{'eval_loss': 0.41184064745903015, 'eval_accuracy': 0.8684863523573201, 'eval_f1': 0.8635611747282996, 'eval_precision': 0.8629771033516368, 'eval_recall': 0.8684863523573201, 'eval_runtime': 270.0108, 'eval_samples_per_second': 22.388, 'eval_steps_per_second': 1.4, 'epoch': 0.44}
{'loss': 0.4436, 'grad_norm': 4.413114070892334, 'learning_rate': 3.1041020337814545e-05, 'epoch': 0.47}
{'loss': 0.4899, 'grad_norm': 18.563016891479492, 'learning_rate': 2.9317476732161327e-05, 'epoch': 0.5}
{'loss': 0.4637, 'grad_norm': 26.92985725402832, 'learning_rate': 2.7593933126508105e-05, 'epoch': 0.53}
{'loss': 0.4387, 'grad_norm': 7.494612693786621, 'learning_rate': 2.5870389520854876e-05, 'epoch': 0.56}
{'loss': 0.4401, 'grad_norm': 20.5152530670166, 'learning_rate': 2.4146845915201654e-05, 'epoch': 0.59}
{'eval_loss': 0.42229706048965454, 'eval_accuracy': 0.8663358147229115, 'eval_f1': 0.859666580414163, 'eval_precision': 0.8638930298685418, 'eval_recall': 0.8663358147229115, 'eval_runtime': 272.7465, 'eval_samples_per_second': 22.163, 'eval_steps_per_second': 1.386, 'epoch': 0.59}
{'loss': 0.4289, 'grad_norm': 10.1361665725708, 'learning_rate': 2.2423302309548433e-05, 'epoch': 0.62}
{'loss': 0.4193, 'grad_norm': 8.068666458129883, 'learning_rate': 2.0699758703895207e-05, 'epoch': 0.65}
{'loss': 0.4038, 'grad_norm': 8.713869094848633, 'learning_rate': 1.8976215098241985e-05, 'epoch': 0.68}
{'loss': 0.4073, 'grad_norm': 12.182595252990723, 'learning_rate': 1.7252671492588764e-05, 'epoch': 0.71}
{'loss': 0.4095, 'grad_norm': 13.43953800201416, 'learning_rate': 1.5529127886935542e-05, 'epoch': 0.74}
{'eval_loss': 0.3974127173423767, 'eval_accuracy': 0.8726220016542597, 'eval_f1': 0.8677290061110087, 'eval_precision': 0.8672987137526573, 'eval_recall': 0.8726220016542597, 'eval_runtime': 270.2975, 'eval_samples_per_second': 22.364, 'eval_steps_per_second': 1.398, 'epoch': 0.74}
{'loss': 0.3473, 'grad_norm': 16.423139572143555, 'learning_rate': 1.3805584281282317e-05, 'epoch': 0.76}
{'loss': 0.3982, 'grad_norm': 6.357703685760498, 'learning_rate': 1.2082040675629095e-05, 'epoch': 0.79}
{'loss': 0.3286, 'grad_norm': 4.977189064025879, 'learning_rate': 1.0358497069975871e-05, 'epoch': 0.82}
{'loss': 0.3712, 'grad_norm': 4.068944454193115, 'learning_rate': 8.634953464322648e-06, 'epoch': 0.85}
{'loss': 0.345, 'grad_norm': 6.266202926635742, 'learning_rate': 6.911409858669425e-06, 'epoch': 0.88}
{'eval_loss': 0.3740645945072174, 'eval_accuracy': 0.8822167080231597, 'eval_f1': 0.8780706451391699, 'eval_precision': 0.877925468669178, 'eval_recall': 0.8822167080231597, 'eval_runtime': 270.0795, 'eval_samples_per_second': 22.382, 'eval_steps_per_second': 1.4, 'epoch': 0.88}
{'loss': 0.4049, 'grad_norm': 10.76927375793457, 'learning_rate': 5.187866253016201e-06, 'epoch': 0.91}
{'loss': 0.3919, 'grad_norm': 12.331282615661621, 'learning_rate': 3.4643226473629783e-06, 'epoch': 0.94}
{'loss': 0.3576, 'grad_norm': 8.6154203414917, 'learning_rate': 1.7407790417097554e-06, 'epoch': 0.97}
{'loss': 0.3544, 'grad_norm': 10.01504135131836, 'learning_rate': 1.723543605653223e-08, 'epoch': 1.0}
{'train_runtime': 7076.4012, 'train_samples_per_second': 7.688, 'train_steps_per_second': 0.481, 'train_loss': 0.5087223172678522, 'epoch': 1.0}
100%|ββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββββ| 3401/3401 [1:57:56<00:00, 2.08s/it]
Training completed. Model saved.
precision recall f1-score support
fragment 0.95 0.92 0.94 597
statement 0.84 0.91 0.87 1811
question 0.95 0.94 0.94 1786
command 0.88 0.91 0.90 1296
rhetorical question 0.73 0.62 0.67 174
rhetorical command 0.86 0.56 0.68 108
intonation-dependent utterance 0.57 0.38 0.46 273
accuracy 0.88 6045
macro avg 0.83 0.75 0.78 6045
weighted avg 0.88 0.88 0.88 6045
Predictions saved
import pandas as pd
from sklearn.model_selection import train_test_split
from transformers import (
RobertaTokenizerFast,
RobertaForSequenceClassification,
Trainer,
TrainingArguments,
EarlyStoppingCallback
)
from datasets import Dataset
import torch
import numpy as np
from sklearn.metrics import accuracy_score, precision_recall_fscore_support, classification_report
from tensorflow.python.keras.optimizer_v2.adam import Adam
# Load and prepare data
train_df = pd.read_csv("./train_fix_v1.csv")
test_df = pd.read_csv("./test_fix_v1.csv")
# Convert to Dataset objects
train_dataset = Dataset.from_pandas(train_df)
test_dataset = Dataset.from_pandas(test_df)
# Initialize tokenizer and model
model_name = "FacebookAI/roberta-base"
tokenizer = RobertaTokenizerFast.from_pretrained(model_name)
model = RobertaForSequenceClassification.from_pretrained(
model_name,
num_labels=7,
id2label={
0: "fragment",
1: "statement",
2: "question",
3: "command",
4: "rhetorical question",
5: "rhetorical command",
6: "intonation-dependent utterance"
},
label2id={
"fragment": 0,
"statement": 1,
"question": 2,
"command": 3,
"rhetorical question": 4,
"rhetorical command": 5,
"intonation-dependent utterance": 6
}
)
# Tokenize function
def tokenize_function(examples):
return tokenizer(examples["text"], padding="max_length", truncation=True, max_length=512)
# Tokenize datasets
tokenized_train = train_dataset.map(tokenize_function, batched=True)
tokenized_test = test_dataset.map(tokenize_function, batched=True)
# Compute metrics function for evaluation
def compute_metrics(pred):
labels = pred.label_ids
preds = pred.predictions.argmax(-1)
precision, recall, f1, _ = precision_recall_fscore_support(labels, preds, average='weighted')
acc = accuracy_score(labels, preds)
return {
'accuracy': acc,
'f1': f1,
'precision': precision,
'recall': recall
}
# Training arguments
training_args = TrainingArguments(
output_dir="./roberta_base_stock",
num_train_epochs=1, # Ustawione na 10, ale z early stopping
per_device_train_batch_size=16,
per_device_eval_batch_size=16,
warmup_steps=500,
weight_decay=0.01,
logging_dir='./logs',
logging_steps=100,
evaluation_strategy="steps",
eval_steps=500,
save_strategy="steps",
save_steps=500,
load_best_model_at_end=True,
metric_for_best_model="f1",
learning_rate=5e-05,
)
# Initialize Trainer
trainer = Trainer(
model=model,
args=training_args,
train_dataset=tokenized_train,
eval_dataset=tokenized_test,
compute_metrics=compute_metrics,
callbacks=[EarlyStoppingCallback(early_stopping_patience=3)]
)
# Train the model
trainer.train()
# Save the fine-tuned model
model.save_pretrained("./roberta_base_stock")
tokenizer.save_pretrained("./roberta_base_stock")
print("Training completed. Model saved.")
# Evaluate the model on the test set
print("Evaluating model on test set...")
test_results = trainer.evaluate(tokenized_test)
print("Test set evaluation results:")
for key, value in test_results.items():
print(f"{key}: {value}")
# Perform predictions on the test set
test_predictions = trainer.predict(tokenized_test)
# Get predicted labels
predicted_labels = np.argmax(test_predictions.predictions, axis=1)
true_labels = test_predictions.label_ids
# Print classification report
print("\nClassification Report:")
print(classification_report(true_labels, predicted_labels,
target_names=model.config.id2label.values()))
# Optional: Save predictions to CSV
test_df['predicted_label'] = predicted_labels
test_df.to_csv("./roberta_base_stock/test_predictions.csv", index=False)
print("Predictions saved")