stephen-dataset / train.py
dgtalbug's picture
Update train.py
8211b00 verified
# ==============================================
# Stephen Model Fine-Tuning Script (LoRA + PEFT)
# Clean + Debug-Enhanced for Grad & Deprecation Warnings
# ==============================================
!pip install -q "transformers>=4.44.0" "datasets" "peft>=0.12.0" accelerate bitsandbytes sentencepiece huggingface_hub
import os
from datetime import datetime
from huggingface_hub import login, whoami
from datasets import load_dataset
from transformers import (
AutoTokenizer, AutoModelForCausalLM, TrainingArguments,
Trainer, DataCollatorForLanguageModeling, BitsAndBytesConfig
)
from peft import LoraConfig, get_peft_model
from peft import prepare_model_for_kbit_training
import torch
# ==============================================
# Logging helper
# ==============================================
def log(msg):
print(f"[{datetime.now().strftime('%H:%M:%S')}] {msg}")
# ==============================================
# 1. Hugging Face Login
# ==============================================
HF_TOKEN = os.getenv("HF_TOKEN")
if not HF_TOKEN:
raise ValueError("❌ HF_TOKEN environment variable not set.")
log("Logging into Hugging Face...")
login(token=HF_TOKEN, add_to_git_credential=True)
log(f"Logged in as: {whoami()['name']} ✅")
# ==============================================
# 2. Load Dataset
# ==============================================
dataset_name = "dgtalbug/stephen-dataset" # CHANGE THIS
data_file = "stephen.jsonl" # CHANGE THIS
log(f"Loading dataset: {dataset_name}/{data_file} ...")
dataset = load_dataset(dataset_name, data_files=data_file, split="train")
log(f"Dataset loaded — {len(dataset)} rows")
log(f"First example: {dataset[0]}")
# ==============================================
# 3. Load Base Model & Tokenizer
# ==============================================
base_model = "dgtalbug/stable-code-instruct-3b" # CHANGE THIS
log(f"Loading base model: {base_model}...")
tokenizer = AutoTokenizer.from_pretrained(
base_model,
token=HF_TOKEN,
use_fast=True
)
if tokenizer.pad_token is None:
tokenizer.pad_token = tokenizer.eos_token
# ✅ Quantization config
bnb_config = BitsAndBytesConfig(
load_in_8bit=True,
llm_int8_threshold=6.0
)
try:
model = AutoModelForCausalLM.from_pretrained(
base_model,
token=HF_TOKEN,
device_map="auto",
torch_dtype=torch.float16,
trust_remote_code=True,
return_dict=True,
quantization_config=bnb_config
)
except Exception as e:
log(f"⚠️ Quantized load failed: {e} — falling back to fp16.")
model = AutoModelForCausalLM.from_pretrained(
base_model,
token=HF_TOKEN,
device_map="auto",
torch_dtype=torch.float16,
trust_remote_code=True,
return_dict=True
)
log("Base model loaded ✅")
# ==============================================
# 4. LoRA Config
# ==============================================
# log("Configuring LoRA...")
# lora_config = LoraConfig(
# r=16,
# lora_alpha=32,
# target_modules=["q_proj", "v_proj", "k_proj", "o_proj"],
# lora_dropout=0.05,
# bias="none",
# task_type="CAUSAL_LM"
# )
# model = get_peft_model(model, lora_config)
# # ✅ Ensure LoRA params require grad
# for name, param in model.named_parameters():
# if "lora" in name:
# param.requires_grad = True
# else:
# param.requires_grad = False
# # ✅ Sanity check: see how many params are trainable
# model.print_trainable_parameters()
# log("LoRA config applied ✅")
log("Configuring LoRA...")
# First, prepare for 8-bit training (important for bitsandbytes)
model = prepare_model_for_kbit_training(model)
# LoRA config
lora_config = LoraConfig(
r=16,
lora_alpha=32,
target_modules=["q_proj", "v_proj", "k_proj", "o_proj"], # adjust if needed
lora_dropout=0.05,
bias="none",
task_type="CAUSAL_LM"
)
# Apply LoRA
model = get_peft_model(model, lora_config)
# Double-check trainable params
trainable_params = []
for name, param in model.named_parameters():
if param.requires_grad:
trainable_params.append(name)
if not trainable_params:
raise RuntimeError("❌ No parameters set to require gradients! LoRA not applied correctly.")
log(f"✅ Found {len(trainable_params)} trainable parameters.")
log(f"First 20 trainable params: {trainable_params[:20]}")
# Print PEFT/LoRA summary
model.print_trainable_parameters()
# ==============================================
# 5. Tokenize Dataset
# ==============================================
log("Tokenizing dataset...")
first_row = dataset[0]
if "text" in first_row:
text_key = "text"
elif "prompt" in first_row:
text_key = "prompt"
else:
text_key = list(first_row.keys())[0]
log(f"Using text key: '{text_key}'")
def tokenize_fn(example):
tokenized = tokenizer(example[text_key], truncation=True, padding="max_length", max_length=512)
tokenized["labels"] = tokenized["input_ids"].copy() # ✅ Ensure labels exist for grad
return tokenized
tokenized_dataset = dataset.map(tokenize_fn, batched=True, remove_columns=dataset.column_names)
log("Tokenization complete ✅")
log(f"Tokenized sample: {tokenized_dataset[0]}")
# ==============================================
# 6. Data Collator
# ==============================================
data_collator = DataCollatorForLanguageModeling(tokenizer, mlm=False)
# ==============================================
# 7. Training Arguments
# ==============================================
output_dir = "./stephen-lora"
log("Preparing training arguments...")
training_args = TrainingArguments(
output_dir=output_dir,
overwrite_output_dir=True,
per_device_train_batch_size=8,
gradient_accumulation_steps=2,
gradient_checkpointing=True,
warmup_steps=50,
num_train_epochs=3,
max_steps=-1,
learning_rate=1e-4,
lr_scheduler_type="cosine",
fp16=True,
optim="adamw_torch",
logging_dir="./logs",
logging_steps=20,
save_strategy="epoch",
save_total_limit=2,
push_to_hub=True,
hub_strategy="end",
ddp_find_unused_parameters=False,
label_names=["labels"]
)
log("Training arguments ready ✅")
# ==============================================
# 8. Debugging Helper Hooks
# ==============================================
def debug_batch(batch):
log(f"🔍 Debug batch keys: {list(batch.keys())}")
log(f"🔍 First input_ids: {batch['input_ids'][0][:10]}")
log(f"🔍 First labels: {batch['labels'][0][:10]}")
log(f"🔍 labels.requires_grad? {torch.tensor(batch['labels']).requires_grad}")
# ==============================================
# 9. Custom Trainer (safe + debug)
# ==============================================
class SafeTrainer(Trainer):
def compute_loss(self, model, inputs, return_outputs=False, **kwargs):
# Debug batch content once
if self.state.global_step == 0:
debug_batch(inputs)
if "labels" not in inputs:
inputs["labels"] = inputs["input_ids"].clone()
outputs = model(**inputs)
loss = outputs.get("loss") if isinstance(outputs, dict) else outputs[0]
return (loss, outputs) if return_outputs else loss
log("Initializing Trainer...")
trainer = SafeTrainer(
model=model,
args=training_args,
train_dataset=tokenized_dataset,
data_collator=data_collator
)
log("Trainer initialized ✅")
# ==============================================
# 10. Train & Push
# ==============================================
trainable_params = [n for n, p in model.named_parameters() if p.requires_grad]
log(f"Trainable params count: {len(trainable_params)}")
log(f"First 20 trainable params: {trainable_params[:20]}")
last_ckpt = None
if os.path.isdir(output_dir):
checkpoints = [d for d in os.listdir(output_dir) if d.startswith("checkpoint-")]
if checkpoints:
last_ckpt = os.path.join(output_dir, sorted(checkpoints)[-1])
if last_ckpt and os.path.isdir(last_ckpt):
log(f"Resuming from checkpoint: {last_ckpt}")
trainer.train(resume_from_checkpoint=last_ckpt)
else:
log("No checkpoint found — starting fresh training.")
trainer.train()
log("Training completed ✅")
try:
log("Pushing fine-tuned model to Hugging Face Hub...")
trainer.push_to_hub(repo_id="dgtalbug/stephen", token=HF_TOKEN)
log(f"Model pushed to: https://huggingface.co/dgtalbug/stephen ✅")
except Exception as e:
log(f"⚠️ Push to hub failed: {e}")