Finetuning with unsloth: BeerBot#
Source for this notebook: https://colab.research.google.com/drive/1T5-zKWM_5OD21QHwXHiV9ixTRR7k3iB9?usp=sharing
%pip install unsloth tf-keras
Model#
We will use a 7B model from Mistral, which runs on most regular laptops. https://ollama.com/library/mistral
from unsloth import FastLanguageModel
import torch
max_seq_length = 2048 # Choose any! We auto support RoPE Scaling internally!
dtype = None # None for auto detection. Float16 for Tesla T4, V100, Bfloat16 for Ampere+
load_in_4bit = True # Use 4bit quantization to reduce memory usage. Can be False.
model, tokenizer = FastLanguageModel.from_pretrained(
# model_name = "unsloth/Mistral-Nemo-Instruct-2407-bnb-4bit", # More models at https://huggingface.co/unsloth
# model_name = 'unsloth/mistral-7b-instruct-v0.3-bnb-4bit',
model_name = 'unsloth/mistral-7b-v0.3',
max_seq_length = max_seq_length,
dtype = dtype,
load_in_4bit = load_in_4bit,
# token = "hf_...", # use one if using gated models like meta-llama/Llama-2-7b-hf
)
We now add LoRA adapters so we only need to update 1 to 10% of all parameters!
model = FastLanguageModel.get_peft_model(
model,
r = 16, # Choose any number > 0 ! Suggested 8, 16, 32, 64, 128
target_modules = ["q_proj", "k_proj", "v_proj", "o_proj",
"gate_proj", "up_proj", "down_proj",],
lora_alpha = 16,
lora_dropout = 0, # Supports any, but = 0 is optimized
bias = "none", # Supports any, but = "none" is optimized
# [NEW] "unsloth" uses 30% less VRAM, fits 2x larger batch sizes!
use_gradient_checkpointing = "unsloth", # True or "unsloth" for very long context
random_state = 3407,
use_rslora = False, # We support rank stabilized LoRA
loftq_config = None, # And LoftQ
)
Unsloth 2025.3.19 patched 32 layers with 32 QKV layers, 32 O layers and 32 MLP layers.
Data#
See the chapter Dataset: Beer Bot
from datasets import load_dataset # https://huggingface.co/docs/datasets/loading
dataset = load_dataset('json', data_files='data/beer_conversations.jsonl', split='train')
dataset
Dataset({
features: ['conversations'],
num_rows: 1297
})
# Inspect the dataset
sample = dataset[0]
sample
{'conversations': [{'role': 'user',
'content': 'Malziges Altbier mit dezentem Hopfenaroma und -geschmack'},
{'role': 'assistant',
'content': '{"Rezeptquelle": "www.maischemalzundmehr.de", "ExportVersion": "2.0", "Name": "Alt 43", "Datum": "14.02.2011", "Sorte": "Altbier", "Autor": "muldengold", "Ausschlagwuerze": 20, "Stammwuerze": 14.5, "Bittere": 25, "Farbe": 35, "Alkohol": 6, "Kurzbeschreibung": "Malziges Altbier mit dezentem Hopfenaroma und -geschmack", "Malze": [{"Name": "Münchner Malz", "Menge": 2.91, "Einheit": "kg"}, {"Name": "Pilsner Malz", "Menge": 1.09, "Einheit": "kg"}, {"Name": "Röstmalz", "Menge": 36, "Einheit": "g"}], "Maischform": "infusion", "Hauptguss": 13.454545454545455, "Einmaischtemperatur": 50, "Rasten": [{"Temperatur": 52, "Zeit": 15}, {"Temperatur": 65, "Zeit": 70}, {"Temperatur": 78, "Zeit": 10}], "Abmaischtemperatur": 78, "Nachguss": 16.727272727272727, "Kochzeit_Wuerze": 70, "Hopfenkochen": [{"Sorte": "Saaz", "Menge": 18, "Alpha": 4.4, "Zeit": 70, "Typ": "Vorderwuerze"}, {"Sorte": "Northern Brewer", "Menge": 9, "Alpha": 10, "Zeit": 60, "Typ": "Standard"}, {"Sorte": "Saaz", "Menge": 9, "Alpha": 4.4, "Zeit": 5, "Typ": "Standard"}], "Hefe": "WYEAST #1007 (German Ale)", "Gaertemperatur": "16", "Endvergaerungsgrad": 74, "Karbonisierung": 5}\n'}]}
sample['conversations']
[{'role': 'user',
'content': 'Malziges Altbier mit dezentem Hopfenaroma und -geschmack'},
{'role': 'assistant',
'content': '{"Rezeptquelle": "www.maischemalzundmehr.de", "ExportVersion": "2.0", "Name": "Alt 43", "Datum": "14.02.2011", "Sorte": "Altbier", "Autor": "muldengold", "Ausschlagwuerze": 20, "Stammwuerze": 14.5, "Bittere": 25, "Farbe": 35, "Alkohol": 6, "Kurzbeschreibung": "Malziges Altbier mit dezentem Hopfenaroma und -geschmack", "Malze": [{"Name": "Münchner Malz", "Menge": 2.91, "Einheit": "kg"}, {"Name": "Pilsner Malz", "Menge": 1.09, "Einheit": "kg"}, {"Name": "Röstmalz", "Menge": 36, "Einheit": "g"}], "Maischform": "infusion", "Hauptguss": 13.454545454545455, "Einmaischtemperatur": 50, "Rasten": [{"Temperatur": 52, "Zeit": 15}, {"Temperatur": 65, "Zeit": 70}, {"Temperatur": 78, "Zeit": 10}], "Abmaischtemperatur": 78, "Nachguss": 16.727272727272727, "Kochzeit_Wuerze": 70, "Hopfenkochen": [{"Sorte": "Saaz", "Menge": 18, "Alpha": 4.4, "Zeit": 70, "Typ": "Vorderwuerze"}, {"Sorte": "Northern Brewer", "Menge": 9, "Alpha": 10, "Zeit": 60, "Typ": "Standard"}, {"Sorte": "Saaz", "Menge": 9, "Alpha": 4.4, "Zeit": 5, "Typ": "Standard"}], "Hefe": "WYEAST #1007 (German Ale)", "Gaertemperatur": "16", "Endvergaerungsgrad": 74, "Karbonisierung": 5}\n'}]
Format it with a fitting chat template#
https://docs.unsloth.ai/basics/chat-templates
To finetune a mistral model, we have to use the mistral chat template.
from unsloth.chat_templates import get_chat_template
tokenizer = get_chat_template(
tokenizer,
chat_template = "mistral",
)
# This function uses the data from a conversation and merges it
# into one text. Then this will be added to our dataset in the next step.
def formatting_prompts_func(examples):
convos = examples["conversations"]
texts = [tokenizer.apply_chat_template(convo, tokenize = False, add_generation_prompt = False) for convo in convos]
return { "text" : texts, }
pass
# Convert it
# This will add the column 'text' to the dataset.
dataset = dataset.map(formatting_prompts_func, batched = True,)
sample = dataset[0]
sample.keys()
dict_keys(['conversations', 'text'])
sample['text']
'<s>[INST] Malziges Altbier mit dezentem Hopfenaroma und -geschmack [/INST]{"Rezeptquelle": "www.maischemalzundmehr.de", "ExportVersion": "2.0", "Name": "Alt 43", "Datum": "14.02.2011", "Sorte": "Altbier", "Autor": "muldengold", "Ausschlagwuerze": 20, "Stammwuerze": 14.5, "Bittere": 25, "Farbe": 35, "Alkohol": 6, "Kurzbeschreibung": "Malziges Altbier mit dezentem Hopfenaroma und -geschmack", "Malze": [{"Name": "Münchner Malz", "Menge": 2.91, "Einheit": "kg"}, {"Name": "Pilsner Malz", "Menge": 1.09, "Einheit": "kg"}, {"Name": "Röstmalz", "Menge": 36, "Einheit": "g"}], "Maischform": "infusion", "Hauptguss": 13.454545454545455, "Einmaischtemperatur": 50, "Rasten": [{"Temperatur": 52, "Zeit": 15}, {"Temperatur": 65, "Zeit": 70}, {"Temperatur": 78, "Zeit": 10}], "Abmaischtemperatur": 78, "Nachguss": 16.727272727272727, "Kochzeit_Wuerze": 70, "Hopfenkochen": [{"Sorte": "Saaz", "Menge": 18, "Alpha": 4.4, "Zeit": 70, "Typ": "Vorderwuerze"}, {"Sorte": "Northern Brewer", "Menge": 9, "Alpha": 10, "Zeit": 60, "Typ": "Standard"}, {"Sorte": "Saaz", "Menge": 9, "Alpha": 4.4, "Zeit": 5, "Typ": "Standard"}], "Hefe": "WYEAST #1007 (German Ale)", "Gaertemperatur": "16", "Endvergaerungsgrad": 74, "Karbonisierung": 5}\n</s>'
This matches the template from mistral. See https://ollama.com/library/mistral/blobs/491dfa501e59
[INST] {{ if .System }}{{ .System }}
{{ end }}{{ .Prompt }}[/INST]
{{- end }} {{ .Response }}
{{- if .Response }}</s>
{{- end }}
Train the model#
Now let’s use Huggingface TRL’s SFTTrainer
! More docs here: TRL SFT docs. We do 60 steps to speed things up, but you can set num_train_epochs=1
for a full run, and turn off max_steps=None
. We also support TRL’s DPOTrainer
!
from trl import SFTTrainer
from transformers import TrainingArguments, DataCollatorForSeq2Seq
from unsloth import is_bfloat16_supported
trainer = SFTTrainer(
model = model,
tokenizer = tokenizer,
train_dataset = dataset,
dataset_text_field = "text",
max_seq_length = max_seq_length,
data_collator = DataCollatorForSeq2Seq(tokenizer = tokenizer),
dataset_num_proc = 2,
packing = False, # Can make training 5x faster for short sequences.
args = TrainingArguments(
per_device_train_batch_size = 2,
gradient_accumulation_steps = 4,
warmup_steps = 5,
# num_train_epochs = 1, # Set this for 1 full training run.
max_steps = 60,
learning_rate = 2e-4,
fp16 = not is_bfloat16_supported(),
bf16 = is_bfloat16_supported(),
logging_steps = 1,
optim = "adamw_8bit",
weight_decay = 0.01,
lr_scheduler_type = "linear",
seed = 3407,
output_dir = "outputs",
report_to = "none", # Use this for WandB etc
),
)
#@title Show current memory stats
gpu_stats = torch.cuda.get_device_properties(0)
start_gpu_memory = round(torch.cuda.max_memory_reserved() / 1024 / 1024 / 1024, 3)
max_memory = round(gpu_stats.total_memory / 1024 / 1024 / 1024, 3)
print(f"GPU = {gpu_stats.name}. Max memory = {max_memory} GB.")
print(f"{start_gpu_memory} GB of memory reserved.")
GPU = NVIDIA A100-SXM4-80GB MIG 2g.20gb. Max memory = 19.5 GB.
7.0 GB of memory reserved.
trainer_stats = trainer.train()
#@title Show final memory and time stats
used_memory = round(torch.cuda.max_memory_reserved() / 1024 / 1024 / 1024, 3)
used_memory_for_lora = round(used_memory - start_gpu_memory, 3)
used_percentage = round(used_memory /max_memory*100, 3)
lora_percentage = round(used_memory_for_lora/max_memory*100, 3)
print(f"{trainer_stats.metrics['train_runtime']} seconds used for training.")
print(f"{round(trainer_stats.metrics['train_runtime']/60, 2)} minutes used for training.")
print(f"Peak reserved memory = {used_memory} GB.")
print(f"Peak reserved memory for training = {used_memory_for_lora} GB.")
print(f"Peak reserved memory % of max memory = {used_percentage} %.")
print(f"Peak reserved memory for training % of max memory = {lora_percentage} %.")
536.9347 seconds used for training.
8.95 minutes used for training.
Peak reserved memory = 7.0 GB.
Peak reserved memory for training = 0.0 GB.
Peak reserved memory % of max memory = 35.897 %.
Peak reserved memory for training % of max memory = 0.0 %.
Inference#
Let’s run the model! You can change the instruction and input - leave the output blank!
from unsloth.chat_templates import get_chat_template
tokenizer = get_chat_template(
tokenizer,
chat_template = "mistral",
)
FastLanguageModel.for_inference(model) # Enable native 2x faster inference
messages = [
{"role": "user", "content": "Kristallklares, würziges Brauwasser aus dem Main, Qualitätsmalz aus der Rhein-Main-Region und Aromahopfen aus der S-Bahn-Haltestelle Marktplatz machen KI-Lab-Bier so wohlschmeckend."},
]
inputs = tokenizer.apply_chat_template(
messages,
tokenize = True,
add_generation_prompt = True, # Must add for generation
return_tensors = "pt",
).to("cuda")
from transformers import TextStreamer
text_streamer = TextStreamer(tokenizer, skip_prompt = True)
_ = model.generate(input_ids = inputs, streamer = text_streamer, max_new_tokens = 768,
use_cache = True, temperature = 1.5, min_p = 0.1)
{"Rezeptquelle": "www.maischemalzundmehr.de", "ExportVersion": "2.0", "Name": "KI-Lab-Bier", "Datum": "12.02.2019", "Sorte": "Pale Ale", "Autor": "KI-Lab", "Ausschlagwuerze": 20, "Stammwuerze": 12.5, "Bittere": 30, "Farbe": 10, "Alkohol": 5.2, "Kurzbeschreibung": "Kristallklares, w\u00fcrziges Brauwasser aus dem Main, Qualit\u00e4tsmalz aus der Rhein-Main-Region und Aromahopfen aus der S-Bahn-Haltestelle Marktplatz machen KI-Lab-Bier so wohlschmeckend.", "Malze": [{"Name": "Pilsner Malz", "Menge": 3.2, "Einheit": "kg"}, {"Name": "Weizenmalz hell", "Menge": 1.2, "Einheit": "kg"}, {"Name": "Carahell", "Menge": 120, "Einheit": "g"}], "Maischform": "infusion", "Hauptguss": 14.4, "Einmaischtemperatur": 57, "Rasten": [{"Temperatur": 57, "Zeit": 10}, {"Temperatur": 63, "Zeit": 60}, {"Temperatur": 72, "Zeit": 10}], "Abmaischtemperatur": 78, "Nachguss": 14.4, "Kochzeit_Wuerze": 90, "Hopfenkochen": [{"Sorte": "Cascade", "Menge": 12, "Alpha": 6.5, "Zeit": 90, "Typ": "Vorderwuerze"}, {"Sorte": "Cascade", "Menge": 12, "Alpha": 6.5, "Zeit": 70, "Typ": "Standard"}, {"Sorte": "Cascade", "Menge": 12, "Alpha": 6.5, "Zeit": 5, "Typ": "Standard"}, {"Sorte": "Cascade", "Menge": 12, "Alpha": 6.5, "Zeit": 0, "Typ": "Whirlpool"}], "Stopfhopfen": [{"Sorte": "Cascade", "Menge": 12}], "Hefe": "Safale US-05", "Gaertemperatur": "18", "Endvergaerungsgrad": 75, "Karbonisierung": 5, "Anmerkung_Autor": "Das Bier wurde mit dem KI-Lab-Brauwasser gebraut. Das Brauwasser wurde mit dem KI-Lab-Wasserfilter aufbereitet. Das Wasser wurde mit dem KI-Lab-Wasseranalyseger\u00e4t aufbereitet. Das Malz wurde mit dem KI-
Saving, loading finetuned models#
To save the final model as LoRA adapters, either use Huggingface’s push_to_hub
for an online save or save_pretrained
for a local save.
[NOTE] This ONLY saves the LoRA adapters, and not the full model. To save to 16bit or GGUF, scroll down!
model.save_pretrained("lora_model_beer") # Local saving
tokenizer.save_pretrained("lora_model_beer")
# model.push_to_hub("your_name/lora_model", token = "...") # Online saving
# tokenizer.push_to_hub("your_name/lora_model", token = "...") # Online saving
('lora_model_beer/tokenizer_config.json',
'lora_model_beer/special_tokens_map.json',
'lora_model_beer/tokenizer.model',
'lora_model_beer/added_tokens.json',
'lora_model_beer/tokenizer.json')