|
|
|
"""Gemma3_(4B).ipynb |
|
|
|
Automatically generated by Colab. |
|
|
|
Original file is located at |
|
https://colab.research.google.com/github/unslothai/notebooks/blob/main/nb/Gemma3_(4B).ipynb |
|
|
|
To run this, press "*Runtime*" and press "*Run all*" on a **free** Tesla T4 Google Colab instance! |
|
<div class="align-center"> |
|
<a href="https://unsloth.ai/"><img src="https://github.com/unslothai/unsloth/raw/main/images/unsloth%20new%20logo.png" width="115"></a> |
|
<a href="https://discord.gg/unsloth"><img src="https://github.com/unslothai/unsloth/raw/main/images/Discord button.png" width="145"></a> |
|
<a href="https://docs.unsloth.ai/"><img src="https://github.com/unslothai/unsloth/blob/main/images/documentation%20green%20button.png?raw=true" width="125"></a></a> Join Discord if you need help + ⭐ <i>Star us on <a href="https://github.com/unslothai/unsloth">Github</a> </i> ⭐ |
|
</div> |
|
|
|
To install Unsloth on your own computer, follow the installation instructions on our Github page [here](https://docs.unsloth.ai/get-started/installing-+-updating). |
|
|
|
You will learn how to do [data prep](#Data), how to [train](#Train), how to [run the model](#Inference), & [how to save it](#Save) |
|
|
|
### News |
|
|
|
**Read our [Gemma 3 blog](https://unsloth.ai/blog/gemma3) for what's new in Unsloth and our [Reasoning blog](https://unsloth.ai/blog/r1-reasoning) on how to train reasoning models.** |
|
|
|
Visit our docs for all our [model uploads](https://docs.unsloth.ai/get-started/all-our-models) and [notebooks](https://docs.unsloth.ai/get-started/unsloth-notebooks). |
|
|
|
### Installation |
|
""" |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
"""### Unsloth |
|
|
|
`FastModel` supports loading nearly any model now! This includes Vision and Text models! |
|
""" |
|
|
|
from unsloth import FastModel |
|
import torch |
|
|
|
fourbit_models = [ |
|
|
|
"unsloth/gemma-3-1b-it-unsloth-bnb-4bit", |
|
"unsloth/gemma-3-4b-it-unsloth-bnb-4bit", |
|
"unsloth/gemma-3-12b-it-unsloth-bnb-4bit", |
|
"unsloth/gemma-3-27b-it-unsloth-bnb-4bit", |
|
|
|
|
|
"unsloth/Llama-3.1-8B", |
|
"unsloth/Llama-3.2-3B", |
|
"unsloth/Llama-3.3-70B", |
|
"unsloth/mistral-7b-instruct-v0.3", |
|
"unsloth/Phi-4", |
|
] |
|
|
|
model, tokenizer = FastModel.from_pretrained( |
|
model_name = "unsloth/gemma-3-4b-it", |
|
max_seq_length = 8192, |
|
load_in_4bit = False, |
|
load_in_8bit = False, |
|
full_finetuning = True, |
|
|
|
) |
|
|
|
"""We now add LoRA adapters so we only need to update a small amount of parameters!""" |
|
|
|
model = FastModel.get_peft_model( |
|
model, |
|
finetune_vision_layers = False, |
|
finetune_language_layers = True, |
|
finetune_attention_modules = True, |
|
finetune_mlp_modules = True, |
|
|
|
r = 64, |
|
lora_alpha = 32, |
|
lora_dropout = 0.1, |
|
bias = "none", |
|
random_state = 3407, |
|
) |
|
|
|
"""<a name="Data"></a> |
|
### Data Prep |
|
We now use the `Gemma-3` format for conversation style finetunes. We use [Maxime Labonne's FineTome-100k](https://huggingface.co/datasets/mlabonne/FineTome-100k) dataset in ShareGPT style. Gemma-3 renders multi turn conversations like below: |
|
|
|
``` |
|
<bos><start_of_turn>user |
|
Hello!<end_of_turn> |
|
<start_of_turn>model |
|
Hey there!<end_of_turn> |
|
``` |
|
|
|
We use our `get_chat_template` function to get the correct chat template. We support `zephyr, chatml, mistral, llama, alpaca, vicuna, vicuna_old, phi3, llama3, phi4, qwen2.5, gemma3` and more. |
|
""" |
|
|
|
from unsloth.chat_templates import get_chat_template |
|
tokenizer = get_chat_template( |
|
tokenizer, |
|
chat_template = "gemma-3", |
|
) |
|
from datasets import load_dataset |
|
dataset = load_dataset("FourOhFour/RP_Phase", split = "train") |
|
|
|
"""We now use `standardize_data_formats` to try converting datasets to the correct format for finetuning purposes!""" |
|
from unsloth.chat_templates import standardize_data_formats |
|
dataset = standardize_data_formats(dataset) |
|
|
|
"""Let's see how row 100 looks like!""" |
|
dataset[100] |
|
|
|
"""We validate and fix conversations to ensure proper role alternation""" |
|
def validate_and_fix_conversations(examples): |
|
valid_convs = [] |
|
for conv in examples["conversations"]: |
|
|
|
prev_role = None |
|
|
|
|
|
fixed_conv = [] |
|
for turn in conv: |
|
role = turn.get("role", "").lower() |
|
|
|
if role == prev_role: |
|
continue |
|
|
|
if role in ["assistant", "bot", "chatbot"]: |
|
role = "model" |
|
elif role in ["human", "usr"]: |
|
role = "user" |
|
|
|
fixed_conv.append({"role": role, "content": turn.get("content", "")}) |
|
prev_role = role |
|
|
|
|
|
if fixed_conv and fixed_conv[0]["role"] == "user": |
|
valid_convs.append(fixed_conv) |
|
|
|
return {"conversations": valid_convs} |
|
|
|
|
|
dataset = dataset.map(validate_and_fix_conversations, batched=True) |
|
|
|
"""We now have to apply the chat template for `Gemma-3` onto the conversations, and save it to `text`""" |
|
def apply_chat_template(examples): |
|
texts = tokenizer.apply_chat_template(examples["conversations"]) |
|
return { "text" : texts } |
|
|
|
dataset = dataset.map(apply_chat_template, batched = True) |
|
|
|
"""Let's see how the chat template did! Notice `Gemma-3` default adds a `<bos>`!""" |
|
dataset[100]["text"] |
|
|
|
"""<a name="Train"></a> |
|
### Train the model |
|
Now let's use Huggingface TRL's `SFTTrainer`! More docs here: [TRL SFT docs](https://huggingface.co/docs/trl/sft_trainer). We do 60 steps to speed things up, but you can set `num_train_epochs=1` for a full run, and turn off `max_steps=None`. |
|
""" |
|
|
|
from trl import SFTTrainer, SFTConfig |
|
trainer = SFTTrainer( |
|
model = model, |
|
tokenizer = tokenizer, |
|
train_dataset = dataset, |
|
eval_dataset = None, |
|
args = SFTConfig( |
|
dataset_text_field = "text", |
|
per_device_train_batch_size = 2, |
|
gradient_accumulation_steps = 4, |
|
warmup_steps = 35, |
|
num_train_epochs = 2, |
|
learning_rate = 1e-5, |
|
logging_steps = 1, |
|
optim = "paged_adamw_8bit", |
|
weight_decay = 0.02, |
|
lr_scheduler_type = "linear", |
|
seed = 3407, |
|
report_to = "wandb", |
|
), |
|
) |
|
|
|
"""We also use Unsloth's `train_on_completions` method to only train on the assistant outputs and ignore the loss on the user's inputs. This helps increase accuracy of finetunes!""" |
|
|
|
from unsloth.chat_templates import train_on_responses_only |
|
trainer = train_on_responses_only( |
|
trainer, |
|
instruction_part = "<start_of_turn>user\n", |
|
response_part = "<start_of_turn>model\n", |
|
) |
|
|
|
"""Let's verify masking the instruction part is done! Let's print the 100th row again:""" |
|
|
|
tokenizer.decode(trainer.train_dataset[100]["input_ids"]) |
|
|
|
"""Now let's print the masked out example - you should see only the answer is present:""" |
|
|
|
tokenizer.decode([tokenizer.pad_token_id if x == -100 else x for x in trainer.train_dataset[100]["labels"]]).replace(tokenizer.pad_token, " ") |
|
|
|
|
|
gpu_stats = torch.cuda.get_device_properties(0) |
|
start_gpu_memory = round(torch.cuda.max_memory_reserved() / 1024 / 1024 / 1024, 3) |
|
max_memory = round(gpu_stats.total_memory / 1024 / 1024 / 1024, 3) |
|
print(f"GPU = {gpu_stats.name}. Max memory = {max_memory} GB.") |
|
print(f"{start_gpu_memory} GB of memory reserved.") |
|
|
|
"""Let's train the model! To resume a training run, set `trainer.train(resume_from_checkpoint = True)`""" |
|
|
|
trainer_stats = trainer.train() |
|
|
|
|
|
used_memory = round(torch.cuda.max_memory_reserved() / 1024 / 1024 / 1024, 3) |
|
used_memory_for_lora = round(used_memory - start_gpu_memory, 3) |
|
used_percentage = round(used_memory / max_memory * 100, 3) |
|
lora_percentage = round(used_memory_for_lora / max_memory * 100, 3) |
|
print(f"{trainer_stats.metrics['train_runtime']} seconds used for training.") |
|
print( |
|
f"{round(trainer_stats.metrics['train_runtime']/60, 2)} minutes used for training." |
|
) |
|
print(f"Peak reserved memory = {used_memory} GB.") |
|
print(f"Peak reserved memory for training = {used_memory_for_lora} GB.") |
|
print(f"Peak reserved memory % of max memory = {used_percentage} %.") |
|
print(f"Peak reserved memory for training % of max memory = {lora_percentage} %.") |