|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
!pip install -q "transformers>=4.44.0" "datasets" "peft>=0.12.0" accelerate bitsandbytes sentencepiece huggingface_hub |
|
|
|
|
|
import os |
|
|
from datetime import datetime |
|
|
from huggingface_hub import login, whoami |
|
|
from datasets import load_dataset |
|
|
from transformers import ( |
|
|
AutoTokenizer, AutoModelForCausalLM, TrainingArguments, |
|
|
Trainer, DataCollatorForLanguageModeling, BitsAndBytesConfig |
|
|
) |
|
|
from peft import LoraConfig, get_peft_model |
|
|
from peft import prepare_model_for_kbit_training |
|
|
import torch |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def log(msg): |
|
|
print(f"[{datetime.now().strftime('%H:%M:%S')}] {msg}") |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
HF_TOKEN = os.getenv("HF_TOKEN") |
|
|
if not HF_TOKEN: |
|
|
raise ValueError("❌ HF_TOKEN environment variable not set.") |
|
|
|
|
|
log("Logging into Hugging Face...") |
|
|
login(token=HF_TOKEN, add_to_git_credential=True) |
|
|
log(f"Logged in as: {whoami()['name']} ✅") |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
dataset_name = "dgtalbug/stephen-dataset" |
|
|
data_file = "stephen.jsonl" |
|
|
|
|
|
log(f"Loading dataset: {dataset_name}/{data_file} ...") |
|
|
dataset = load_dataset(dataset_name, data_files=data_file, split="train") |
|
|
log(f"Dataset loaded — {len(dataset)} rows") |
|
|
log(f"First example: {dataset[0]}") |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
base_model = "dgtalbug/stable-code-instruct-3b" |
|
|
log(f"Loading base model: {base_model}...") |
|
|
|
|
|
tokenizer = AutoTokenizer.from_pretrained( |
|
|
base_model, |
|
|
token=HF_TOKEN, |
|
|
use_fast=True |
|
|
) |
|
|
if tokenizer.pad_token is None: |
|
|
tokenizer.pad_token = tokenizer.eos_token |
|
|
|
|
|
|
|
|
bnb_config = BitsAndBytesConfig( |
|
|
load_in_8bit=True, |
|
|
llm_int8_threshold=6.0 |
|
|
) |
|
|
|
|
|
try: |
|
|
model = AutoModelForCausalLM.from_pretrained( |
|
|
base_model, |
|
|
token=HF_TOKEN, |
|
|
device_map="auto", |
|
|
torch_dtype=torch.float16, |
|
|
trust_remote_code=True, |
|
|
return_dict=True, |
|
|
quantization_config=bnb_config |
|
|
) |
|
|
except Exception as e: |
|
|
log(f"⚠️ Quantized load failed: {e} — falling back to fp16.") |
|
|
model = AutoModelForCausalLM.from_pretrained( |
|
|
base_model, |
|
|
token=HF_TOKEN, |
|
|
device_map="auto", |
|
|
torch_dtype=torch.float16, |
|
|
trust_remote_code=True, |
|
|
return_dict=True |
|
|
) |
|
|
|
|
|
log("Base model loaded ✅") |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
log("Configuring LoRA...") |
|
|
|
|
|
|
|
|
model = prepare_model_for_kbit_training(model) |
|
|
|
|
|
|
|
|
lora_config = LoraConfig( |
|
|
r=16, |
|
|
lora_alpha=32, |
|
|
target_modules=["q_proj", "v_proj", "k_proj", "o_proj"], |
|
|
lora_dropout=0.05, |
|
|
bias="none", |
|
|
task_type="CAUSAL_LM" |
|
|
) |
|
|
|
|
|
|
|
|
model = get_peft_model(model, lora_config) |
|
|
|
|
|
|
|
|
trainable_params = [] |
|
|
for name, param in model.named_parameters(): |
|
|
if param.requires_grad: |
|
|
trainable_params.append(name) |
|
|
|
|
|
if not trainable_params: |
|
|
raise RuntimeError("❌ No parameters set to require gradients! LoRA not applied correctly.") |
|
|
|
|
|
log(f"✅ Found {len(trainable_params)} trainable parameters.") |
|
|
log(f"First 20 trainable params: {trainable_params[:20]}") |
|
|
|
|
|
|
|
|
model.print_trainable_parameters() |
|
|
|
|
|
|
|
|
|
|
|
log("Tokenizing dataset...") |
|
|
|
|
|
first_row = dataset[0] |
|
|
if "text" in first_row: |
|
|
text_key = "text" |
|
|
elif "prompt" in first_row: |
|
|
text_key = "prompt" |
|
|
else: |
|
|
text_key = list(first_row.keys())[0] |
|
|
|
|
|
log(f"Using text key: '{text_key}'") |
|
|
|
|
|
def tokenize_fn(example): |
|
|
tokenized = tokenizer(example[text_key], truncation=True, padding="max_length", max_length=512) |
|
|
tokenized["labels"] = tokenized["input_ids"].copy() |
|
|
return tokenized |
|
|
|
|
|
tokenized_dataset = dataset.map(tokenize_fn, batched=True, remove_columns=dataset.column_names) |
|
|
log("Tokenization complete ✅") |
|
|
log(f"Tokenized sample: {tokenized_dataset[0]}") |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
data_collator = DataCollatorForLanguageModeling(tokenizer, mlm=False) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
output_dir = "./stephen-lora" |
|
|
log("Preparing training arguments...") |
|
|
|
|
|
training_args = TrainingArguments( |
|
|
output_dir=output_dir, |
|
|
overwrite_output_dir=True, |
|
|
per_device_train_batch_size=8, |
|
|
gradient_accumulation_steps=2, |
|
|
gradient_checkpointing=True, |
|
|
warmup_steps=50, |
|
|
num_train_epochs=3, |
|
|
max_steps=-1, |
|
|
learning_rate=1e-4, |
|
|
lr_scheduler_type="cosine", |
|
|
fp16=True, |
|
|
optim="adamw_torch", |
|
|
logging_dir="./logs", |
|
|
logging_steps=20, |
|
|
save_strategy="epoch", |
|
|
save_total_limit=2, |
|
|
push_to_hub=True, |
|
|
hub_strategy="end", |
|
|
ddp_find_unused_parameters=False, |
|
|
label_names=["labels"] |
|
|
) |
|
|
log("Training arguments ready ✅") |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def debug_batch(batch): |
|
|
log(f"🔍 Debug batch keys: {list(batch.keys())}") |
|
|
log(f"🔍 First input_ids: {batch['input_ids'][0][:10]}") |
|
|
log(f"🔍 First labels: {batch['labels'][0][:10]}") |
|
|
log(f"🔍 labels.requires_grad? {torch.tensor(batch['labels']).requires_grad}") |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
class SafeTrainer(Trainer): |
|
|
def compute_loss(self, model, inputs, return_outputs=False, **kwargs): |
|
|
|
|
|
if self.state.global_step == 0: |
|
|
debug_batch(inputs) |
|
|
if "labels" not in inputs: |
|
|
inputs["labels"] = inputs["input_ids"].clone() |
|
|
outputs = model(**inputs) |
|
|
loss = outputs.get("loss") if isinstance(outputs, dict) else outputs[0] |
|
|
return (loss, outputs) if return_outputs else loss |
|
|
|
|
|
log("Initializing Trainer...") |
|
|
trainer = SafeTrainer( |
|
|
model=model, |
|
|
args=training_args, |
|
|
train_dataset=tokenized_dataset, |
|
|
data_collator=data_collator |
|
|
) |
|
|
log("Trainer initialized ✅") |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
trainable_params = [n for n, p in model.named_parameters() if p.requires_grad] |
|
|
log(f"Trainable params count: {len(trainable_params)}") |
|
|
log(f"First 20 trainable params: {trainable_params[:20]}") |
|
|
|
|
|
last_ckpt = None |
|
|
if os.path.isdir(output_dir): |
|
|
checkpoints = [d for d in os.listdir(output_dir) if d.startswith("checkpoint-")] |
|
|
if checkpoints: |
|
|
last_ckpt = os.path.join(output_dir, sorted(checkpoints)[-1]) |
|
|
|
|
|
if last_ckpt and os.path.isdir(last_ckpt): |
|
|
log(f"Resuming from checkpoint: {last_ckpt}") |
|
|
trainer.train(resume_from_checkpoint=last_ckpt) |
|
|
else: |
|
|
log("No checkpoint found — starting fresh training.") |
|
|
trainer.train() |
|
|
|
|
|
log("Training completed ✅") |
|
|
|
|
|
try: |
|
|
log("Pushing fine-tuned model to Hugging Face Hub...") |
|
|
trainer.push_to_hub(repo_id="dgtalbug/stephen", token=HF_TOKEN) |
|
|
log(f"Model pushed to: https://huggingface.co/dgtalbug/stephen ✅") |
|
|
except Exception as e: |
|
|
log(f"⚠️ Push to hub failed: {e}") |