2303 lines
120 KiB
Python
2303 lines
120 KiB
Python
"""
|
||
2025.6.1
|
||
2025.6.2
|
||
4.52.4
|
||
0.18.2
|
||
__UNSLOTH_VERSIONING__
|
||
"""
|
||
from torch import Tensor
|
||
import torch
|
||
import torch.nn as nn
|
||
from torch.nn import functional as F
|
||
from trl.trainer.grpo_trainer import (Any, AutoModelForCausalLM, AutoModelForSequenceClassification, AutoTokenizer, DataLoader, Dataset, FSDP, GRPOConfig, GRPOTrainer, GenerationConfig, IterableDataset, Optional, PeftConfig, PreTrainedModel, PreTrainedTokenizerBase, RepeatSampler, RewardFunc, Sampler, SyncRefModelCallback, Trainer, TrainerCallback, Union, VLLMClient, _ForwardRedirection, apply_chat_template, broadcast_object_list, create_reference_model, datasets, defaultdict, deque, disable_dropout_in_model, gather, gather_object, generate_model_card, get_comet_experiment_url, identity, is_conversational, is_datasets_available, is_deepspeed_zero3_enabled, is_liger_kernel_available, is_peft_available, is_peft_model, is_rich_available, is_vllm_available, is_wandb_available, maybe_apply_chat_template, nanmax, nanmin, nanstd, nn, nullcontext, os, pad, partial, prepare_deepspeed, prepare_fsdp, print_prompt_completions_sample, profiling_context, profiling_decorator, seed_worker, set_seed, shuffle_tensor_dict, split_tensor_dict, textwrap, torch, transformers, unwrap_model_for_generation, version, warnings, Any, FSDP, Union, apply_chat_template, broadcast_object_list, gather, gather_object, is_conversational, maybe_apply_chat_template, nanstd, nn, nullcontext, os, pad, profiling_context, torch, unwrap_model_for_generation, warnings, os, torch, transformers, FSDP, nn, os, GRPOTrainer, Trainer, gather, os, torch)
|
||
|
||
|
||
import os
|
||
from typing import *
|
||
from dataclasses import dataclass, field
|
||
from packaging.version import Version
|
||
import torch
|
||
import numpy as np
|
||
from contextlib import nullcontext
|
||
from torch.nn import functional as F
|
||
from transformers import DataCollatorForSeq2Seq, DataCollatorForLanguageModeling as TransformersDataCollatorForLanguageModeling
|
||
|
||
torch_compile_options = {
|
||
"epilogue_fusion" : True,
|
||
"max_autotune" : False,
|
||
"shape_padding" : True,
|
||
"trace.enabled" : False,
|
||
"triton.cudagraphs" : False,
|
||
}
|
||
|
||
@torch.compile(dynamic = True, fullgraph = True, options = torch_compile_options,)
|
||
def selective_log_softmax(logits, index):
|
||
logits = logits.to(torch.float32)
|
||
selected_logits = torch.gather(logits, dim = -1, index = index.unsqueeze(-1)).squeeze(-1)
|
||
# loop to reduce peak mem consumption
|
||
# logsumexp_values = torch.stack([torch.logsumexp(lg, dim=-1) for lg in logits])
|
||
logsumexp_values = torch.logsumexp(logits, dim = -1)
|
||
per_token_logps = selected_logits - logsumexp_values # log_softmax(x_i) = x_i - logsumexp(x)
|
||
return per_token_logps
|
||
|
||
def grpo_compute_loss(
|
||
ref_logits,
|
||
new_logits,
|
||
old_logits,
|
||
input_ids,
|
||
mask,
|
||
beta,
|
||
advantages,
|
||
**kwargs
|
||
):
|
||
# Set defaults for optional arguments
|
||
loss_type = kwargs.get("loss_type", "bnpo")
|
||
epsilon_low = kwargs.get("epsilon_low", 0.2)
|
||
epsilon_high = kwargs.get("epsilon_high", 0.2)
|
||
max_completion_length = kwargs.get("max_completion_length", 8192)
|
||
delta = kwargs.get("delta", None)
|
||
|
||
# All Unsloth Zoo code licensed under LGPLv3
|
||
new_logits = new_logits.to(torch.float32)
|
||
input_ids = input_ids.unsqueeze(-1)
|
||
|
||
# x_i - logsumexp(x_i)
|
||
|
||
with torch.no_grad():
|
||
if beta != 0.0:
|
||
assert ref_logits is not None, "ref_logits should not be None when beta != 0.0"
|
||
ref_logits = ref_logits.to(torch.float32)
|
||
ref_x = torch.gather(ref_logits, dim = -1, index = input_ids).squeeze(-1)
|
||
ref = ref_x - torch.logsumexp(ref_logits, dim = -1)
|
||
if old_logits is not None:
|
||
old_x = torch.gather(old_logits, dim = -1, index = input_ids).squeeze(-1)
|
||
old = old_x - torch.logsumexp(old_logits, dim = -1)
|
||
new_x = torch.gather(new_logits, dim = -1, index = input_ids).squeeze(-1)
|
||
new = new_x - torch.logsumexp(new_logits, dim = -1)
|
||
|
||
# Reverse KL
|
||
# Note that this is a low variance low bias estimator for the KL divergence as used in GRPO paper
|
||
if beta != 0.0:
|
||
kl_i = torch.exp(ref - new) - (ref - new) - 1.0
|
||
|
||
else:
|
||
kl_i = 0.0 # set it to 0 to not effect the downstream computation
|
||
# Full correct reverse KL divergence?? Missing term maybe?
|
||
# kl_i = torch.exp(new) * kl_i
|
||
|
||
# Below is forward KL (normal KL)
|
||
# kl_i = torch.exp(old) * (old - new)
|
||
if old_logits is not None:
|
||
coef_1 = torch.exp(new - old)
|
||
else:
|
||
coef_1 = torch.exp(new - new.detach())
|
||
coef_2 = torch.clamp(coef_1, 1 - epsilon_low, 1 + epsilon_high)
|
||
|
||
if delta is not None:
|
||
loss_1 = torch.clamp(coef_1, max=delta) * advantages.unsqueeze(1)
|
||
else:
|
||
loss_1 = coef_1 * advantages.unsqueeze(1)
|
||
|
||
|
||
# Must detach - otherwise gradients are not propagated correctly!
|
||
# exp(x - x) == 1
|
||
# loss_i = torch.exp(new - new.detach()) * advantages.unsqueeze(1)
|
||
|
||
|
||
loss_2 = coef_2 * advantages.unsqueeze(1)
|
||
loss_i = -torch.min(loss_1, loss_2)
|
||
if beta != 0.0:
|
||
loss_i = loss_i + beta * kl_i
|
||
|
||
mask = mask.to(torch.float32)
|
||
n_mask_per_reward = mask.sum(1)
|
||
|
||
# https://github.com/huggingface/trl/blob/main/trl/trainer/grpo_trainer.py#L1363-L1370
|
||
if loss_type == "grpo":
|
||
loss = ((loss_i * mask).sum(-1) / mask.sum(-1).clamp(min=1.0)).mean()
|
||
elif loss_type == "bnpo":
|
||
loss = (loss_i * mask).sum() / mask.sum().clamp(min=1.0)
|
||
elif loss_type == "dr_grpo":
|
||
loss = (loss_i * mask).sum() / (loss_i.size(0) * max_completion_length)
|
||
else:
|
||
raise ValueError(f"Unknown loss type: {loss_type}")
|
||
|
||
# loss = (loss_i * mask).sum() / mask.sum()
|
||
|
||
# Get metrics as well which are folded
|
||
with torch.inference_mode():
|
||
completion_length = n_mask_per_reward.mean()
|
||
mean_kl_per_reward = (kl_i * mask).sum(1) / n_mask_per_reward
|
||
mean_kl = mean_kl_per_reward.mean()
|
||
pass
|
||
|
||
return loss, completion_length, mean_kl
|
||
|
||
class UnslothEfficientGRPO(torch.autograd.Function):
|
||
# All Unsloth Zoo code licensed under LGPLv3
|
||
@staticmethod
|
||
def forward(ctx, _new_hidden_states, _old_hidden_states, _ref_hidden_states, lm_head, _input_ids, _mask, _advantages, beta, scaler = None, n_chunks = 1, extra_kwargs=None):
|
||
if extra_kwargs is None:
|
||
extra_kwargs = {}
|
||
def compute_loss(new_hidden_states, old_hidden_states, ref_hidden_states,input_ids, mask, advantages, scaling):
|
||
new_logits = torch.matmul(new_hidden_states, lm_head.t())
|
||
new_logits = new_logits[:, :-1, :] # exclude the last logit: it corresponds to the next token pred
|
||
with torch.no_grad():
|
||
ref_logits = torch.matmul(ref_hidden_states, lm_head.t())
|
||
ref_logits = ref_logits[:, :-1, :] # exclude the last logit: it corresponds to the next token pred
|
||
old_logits = None
|
||
if old_hidden_states is not None:
|
||
old_logits = torch.matmul(old_hidden_states, lm_head.t())
|
||
old_logits = old_logits[:, :-1, :] # exclude the last logit: it corresponds to the next token pred
|
||
else:
|
||
old_logits = None
|
||
# if old_hidden_states is not None:
|
||
# old_logits = torch.matmul(old_hidden_states, lm_head.t()) #last logit already excluded
|
||
# old_logits = old_logits[:, :-1, :] # exclude the last logit: it corresponds to the next token pred
|
||
# else:
|
||
# old_logits = Noneunsloth_zoo/rl_replacements.py
|
||
loss, completion_length, mean_kl = grpo_compute_loss(
|
||
ref_logits, new_logits,old_logits, input_ids, mask, beta, advantages, **extra_kwargs
|
||
)
|
||
|
||
# Scale loss if needed for mixed precision training
|
||
scaled_loss = loss * scaling
|
||
# Must add .loss.detach otherwise autograd uses 2x VRAM
|
||
return scaled_loss, (loss.detach(), completion_length, mean_kl,)
|
||
pass
|
||
|
||
device =_new_hidden_states.device
|
||
grad_inputs = torch.empty_like(_new_hidden_states)
|
||
accumulated_loss = torch.zeros(1, device = device)
|
||
accumulated_completion_length = torch.zeros(1, device = device)
|
||
accumulated_mean_kl = torch.zeros(1, device = device)
|
||
|
||
def accumulate_chunk(new_hidden_states_j, old_hidden_states_j, ref_hidden_states_j, input_ids_j, mask_j, advantages_j, scaling):
|
||
(chunk_grad_input,), (chunk_loss, (unscaled_loss, chunk_completion_length, chunk_mean_kl,)) = torch.func.grad_and_value(
|
||
compute_loss,
|
||
argnums = (0,),
|
||
has_aux = True,
|
||
)(new_hidden_states_j, old_hidden_states_j, ref_hidden_states_j, input_ids_j, mask_j, advantages_j, scaling)
|
||
accumulated_loss .add_(unscaled_loss)
|
||
accumulated_completion_length.add_(chunk_completion_length)
|
||
accumulated_mean_kl .add_(chunk_mean_kl)
|
||
return chunk_grad_input
|
||
pass
|
||
|
||
accumulate_chunk = torch.compile(
|
||
accumulate_chunk,
|
||
fullgraph = True,
|
||
options = torch_compile_options,
|
||
)
|
||
|
||
grad_inputs_chunks = torch.chunk(grad_inputs, chunks = n_chunks, dim = 0)
|
||
new_hidden_states = torch.chunk(_new_hidden_states, chunks = n_chunks, dim = 0)
|
||
if _old_hidden_states is not None:
|
||
old_hidden_states = torch.chunk(_old_hidden_states, chunks = n_chunks, dim = 0)
|
||
else:
|
||
old_hidden_states = [None] * n_chunks
|
||
ref_hidden_states = torch.chunk(_ref_hidden_states, chunks = n_chunks, dim = 0)
|
||
input_ids = torch.chunk(_input_ids, chunks = n_chunks, dim = 0)
|
||
mask = torch.chunk(_mask, chunks = n_chunks, dim = 0)
|
||
advantages = torch.chunk(_advantages, chunks = n_chunks, dim = 0)
|
||
|
||
# Get mixed precision scaling if seen
|
||
scaling = scaler.get_scale() if scaler is not None else 1.0
|
||
|
||
# Force torch.compile to use dynamic shapes for seqlen dim
|
||
mark_dynamic = lambda x: torch._dynamo.mark_dynamic(x, 1)
|
||
|
||
for (grad_inputs_j, new_hidden_states_j, old_hidden_states_j, ref_hidden_states_j, input_ids_j, mask_j, advantages_j,) in \
|
||
zip(grad_inputs_chunks, new_hidden_states, old_hidden_states, ref_hidden_states, input_ids, mask, advantages):
|
||
|
||
mark_dynamic(new_hidden_states_j)
|
||
mark_dynamic(ref_hidden_states_j)
|
||
if old_hidden_states_j is not None:
|
||
mark_dynamic(old_hidden_states_j)
|
||
mark_dynamic(input_ids_j)
|
||
mark_dynamic(mask_j)
|
||
|
||
|
||
grad_inputs_j.copy_(accumulate_chunk(new_hidden_states_j, old_hidden_states_j,ref_hidden_states_j, input_ids_j, mask_j, advantages_j, scaling))
|
||
pass
|
||
|
||
grad_inputs .div_(n_chunks)
|
||
accumulated_loss .div_(n_chunks)
|
||
accumulated_completion_length.div_(n_chunks)
|
||
accumulated_mean_kl .div_(n_chunks)
|
||
ctx.save_for_backward(grad_inputs)
|
||
return (
|
||
accumulated_loss,
|
||
accumulated_completion_length,
|
||
accumulated_mean_kl,
|
||
)
|
||
pass
|
||
|
||
@staticmethod
|
||
def backward(ctx, grad_output, dcompletion_length, dmean_kl):
|
||
(grad_input,) = ctx.saved_tensors
|
||
return (grad_input, None, None, None, None, None, None, None, None, None, None)
|
||
pass
|
||
|
||
def grpo_accumulated_loss(
|
||
trainer,
|
||
input_ids,
|
||
logits_to_keep,
|
||
completion_mask,
|
||
advantages,
|
||
old_hidden_states,
|
||
n_chunks = -1,
|
||
**kwargs,
|
||
):
|
||
# All Unsloth Zoo code licensed under LGPLv3
|
||
bsz, qlen = input_ids.shape
|
||
|
||
# Find closest multiple
|
||
factors = [i for i in range(1, bsz + 1) if bsz % i == 0]
|
||
if n_chunks == -1: n_chunks = bsz
|
||
n_chunks = factors[min(np.searchsorted(factors, n_chunks), len(factors)-1)]
|
||
|
||
mixed_dtype = torch.float16 if os.environ.get('ACCELERATE_MIXED_PRECISION', 'fp16') == 'fp16' else torch.bfloat16
|
||
os.environ["UNSLOTH_RETURN_HIDDEN_STATES"] = "1"
|
||
|
||
completion_input_ids = input_ids[:, -logits_to_keep:]
|
||
lm_head = trainer.model.get_output_embeddings().weight
|
||
|
||
with torch.amp.autocast(device_type = "cuda", dtype = mixed_dtype):
|
||
#breakpoint()
|
||
with torch.inference_mode(), trainer.accelerator.unwrap_model(trainer.model, keep_fp32_wrapper = False).disable_adapter():
|
||
ref_hidden_states = trainer.model(input_ids = input_ids, logits_to_keep = logits_to_keep + 1).logits
|
||
pass
|
||
|
||
new_hidden_states = trainer.model(input_ids = input_ids, logits_to_keep = logits_to_keep + 1).logits
|
||
|
||
loss, completion_length, mean_kl = UnslothEfficientGRPO.apply(
|
||
new_hidden_states, old_hidden_states ,ref_hidden_states, lm_head,
|
||
completion_input_ids, completion_mask, advantages, trainer.beta,
|
||
trainer.accelerator.scaler,
|
||
n_chunks, kwargs # pass kwargs as a dict
|
||
)
|
||
|
||
return loss, completion_length, mean_kl
|
||
|
||
# Old non efficient code path
|
||
new_logits = torch.matmul(new_hidden_states, lm_head.t())
|
||
new_logits = new_logits[:, :-1, :] # exclude the last logit: it corresponds to the next token pred
|
||
old_logits = torch.matmul(old_hidden_states, lm_head.t())
|
||
old_logits = old_logits[:, :-1, :] # exclude the last logit: it corresponds to the next token pred
|
||
loss, completion_length, mean_kl = grpo_compute_loss(
|
||
old_logits, new_logits, completion_input_ids, completion_mask, trainer.beta, advantages,
|
||
)
|
||
return loss, completion_length, mean_kl
|
||
pass
|
||
|
||
@torch.compile(dynamic = True, fullgraph = True, options = torch_compile_options)
|
||
def grpo_compute_loss_slow(
|
||
ref_logits,
|
||
new_logits,
|
||
old_logits,
|
||
input_ids,
|
||
mask,
|
||
beta,
|
||
advantages,
|
||
**kwargs
|
||
):
|
||
# Set defaults for optional arguments
|
||
loss_type = kwargs.get("loss_type", "bnpo")
|
||
epsilon_low = kwargs.get("epsilon_low", 0.2)
|
||
epsilon_high = kwargs.get("epsilon_high", 0.2)
|
||
max_completion_length = kwargs.get("max_completion_length", 8192)
|
||
delta = kwargs.get("delta", None)
|
||
|
||
# All Unsloth Zoo code licensed under LGPLv3
|
||
new_logits = new_logits.to(torch.float32)
|
||
input_ids = input_ids.unsqueeze(-1)
|
||
|
||
# x_i - logsumexp(x_i)
|
||
|
||
with torch.no_grad():
|
||
if beta != 0.0:
|
||
assert ref_logits is not None, "ref_logits should not be None when beta != 0.0"
|
||
ref_logits = ref_logits.to(torch.float32)
|
||
ref_x = torch.gather(ref_logits, dim = -1, index = input_ids).squeeze(-1)
|
||
ref = ref_x - torch.logsumexp(ref_logits, dim = -1)
|
||
if old_logits is not None:
|
||
old_x = torch.gather(old_logits, dim = -1, index = input_ids).squeeze(-1)
|
||
old = old_x - torch.logsumexp(old_logits, dim = -1)
|
||
new_x = torch.gather(new_logits, dim = -1, index = input_ids).squeeze(-1)
|
||
new = new_x - torch.logsumexp(new_logits, dim = -1)
|
||
|
||
# Reverse KL
|
||
# Note that this is a low variance low bias estimator for the KL divergence as used in GRPO paper
|
||
if beta != 0.0:
|
||
kl_i = torch.exp(ref - new) - (ref - new) - 1.0
|
||
|
||
else:
|
||
kl_i = 0.0 # set it to 0 to not effect the downstream computation
|
||
# Full correct reverse KL divergence?? Missing term maybe?
|
||
# kl_i = torch.exp(new) * kl_i
|
||
|
||
# Below is forward KL (normal KL)
|
||
# kl_i = torch.exp(old) * (old - new)
|
||
if old_logits is not None:
|
||
coef_1 = torch.exp(new - old)
|
||
else:
|
||
coef_1 = torch.exp(new - new.detach())
|
||
coef_2 = torch.clamp(coef_1, 1 - epsilon_low, 1 + epsilon_high)
|
||
|
||
if delta is not None:
|
||
loss_1 = torch.clamp(coef_1, max=delta) * advantages.unsqueeze(1)
|
||
else:
|
||
loss_1 = coef_1 * advantages.unsqueeze(1)
|
||
|
||
|
||
# Must detach - otherwise gradients are not propagated correctly!
|
||
# exp(x - x) == 1
|
||
# loss_i = torch.exp(new - new.detach()) * advantages.unsqueeze(1)
|
||
|
||
|
||
loss_2 = coef_2 * advantages.unsqueeze(1)
|
||
loss_i = -torch.min(loss_1, loss_2)
|
||
if beta != 0.0:
|
||
loss_i = loss_i + beta * kl_i
|
||
|
||
mask = mask.to(torch.float32)
|
||
n_mask_per_reward = mask.sum(1)
|
||
|
||
# https://github.com/huggingface/trl/blob/main/trl/trainer/grpo_trainer.py#L1363-L1370
|
||
if loss_type == "grpo":
|
||
loss = ((loss_i * mask).sum(-1) / mask.sum(-1).clamp(min=1.0)).mean()
|
||
elif loss_type == "bnpo":
|
||
loss = (loss_i * mask).sum() / mask.sum().clamp(min=1.0)
|
||
elif loss_type == "dr_grpo":
|
||
loss = (loss_i * mask).sum() / (loss_i.size(0) * max_completion_length)
|
||
else:
|
||
raise ValueError(f"Unknown loss type: {loss_type}")
|
||
|
||
# loss = (loss_i * mask).sum() / mask.sum()
|
||
|
||
# Get metrics as well which are folded
|
||
with torch.inference_mode():
|
||
completion_length = n_mask_per_reward.mean()
|
||
mean_kl_per_reward = (kl_i * mask).sum(1) / n_mask_per_reward
|
||
mean_kl = mean_kl_per_reward.mean()
|
||
pass
|
||
|
||
return loss, completion_length, mean_kl
|
||
|
||
def vLLMSamplingParams(**kwargs):
|
||
from vllm import SamplingParams
|
||
sampling_params = SamplingParams(**kwargs)
|
||
sampling_params._set_kwargs = kwargs
|
||
return sampling_params
|
||
@dataclass
|
||
class UnslothGRPOConfig(GRPOConfig):
|
||
"""
|
||
|
||
Configuration class for the [`GRPOTrainer`].
|
||
|
||
Only the parameters specific to GRPO training are listed here. For details on other parameters, refer to the
|
||
[`~transformers.TrainingArguments`] documentation.
|
||
|
||
Using [`~transformers.HfArgumentParser`] we can turn this class into
|
||
[argparse](https://docs.python.org/3/library/argparse#module-argparse) arguments that can be specified on the
|
||
command line.
|
||
|
||
Parameters:
|
||
> Parameters that control the model and reference model
|
||
|
||
model_init_kwargs (`str`, `dict[str, Any]` or `None`, *optional*, defaults to `None`):
|
||
Keyword arguments for [`~transformers.AutoModelForCausalLM.from_pretrained`], used when the `model`
|
||
argument of the [`GRPOTrainer`] is provided as a string.
|
||
disable_dropout (`bool`, *optional*, defaults to `False`):
|
||
Whether to disable dropout in the model. This is useful for training with a reference model, as it
|
||
prevents the model from generating different logprobs for the same input.
|
||
|
||
> Parameters that control the data preprocessing
|
||
|
||
remove_unused_columns (`bool`, *optional*, defaults to `False`):
|
||
Whether to only keep the column `"prompt"` in the dataset. If you use a custom reward function that
|
||
requires any column other than `"prompts"` and `"completions"`, you should keep this to `False`.
|
||
max_prompt_length (`int` or `None`, *optional*, defaults to `512`):
|
||
Maximum length of the prompt. If the prompt is longer than this value, it will be truncated left.
|
||
num_generations (`int` or `None`, *optional*, defaults to `8`):
|
||
Number of generations per prompt to sample. The effective batch size (num_processes *
|
||
per_device_batch_size * gradient_accumulation_steps) must be evenly divisible by this value.
|
||
max_completion_length (`int` or `None`, *optional*, defaults to `256`):
|
||
Maximum length of the generated completion.
|
||
ds3_gather_for_generation (`bool`, *optional*, defaults to `True`):
|
||
This setting applies to DeepSpeed ZeRO-3. If enabled, the policy model weights are gathered for generation,
|
||
improving generation speed. However, disabling this option allows training models that exceed the VRAM
|
||
capacity of a single GPU, albeit at the cost of slower generation. Disabling this option is not compatible
|
||
with vLLM generation.
|
||
shuffle_dataset (`bool`, *optional*, defaults to `True`):
|
||
Whether to shuffle the training dataset.
|
||
|
||
> Parameters that control generation
|
||
|
||
generation_batch_size: (`int` or `None`, *optional*, defaults to `None`):
|
||
Batch size to use for generation. If `None`, it defaults to the effective training batch size:
|
||
`per_device_train_batch_size * num_processes * gradient_accumulation_steps`.
|
||
steps_per_generations: (`int` or `None`, *optional*, defaults to `None`):
|
||
Number of optimization steps per generation. If `None`, it defaults to gradient_accumulation_steps.
|
||
temperature (`float`, defaults to `1.0`):
|
||
Temperature for sampling. The higher the temperature, the more random the completions.
|
||
top_p (`float`, *optional*, defaults to `1.0`):
|
||
Float that controls the cumulative probability of the top tokens to consider. Must be in (0, 1]. Set to
|
||
`1.0` to consider all tokens.
|
||
top_k (`int` or `None`, *optional*, defaults to `None`):
|
||
Number of highest probability vocabulary tokens to keep for top-k-filtering. If `None`, top-k-filtering is
|
||
disabled and all tokens are considered.
|
||
min_p (`float` or `None`, *optional*, defaults to `None`):
|
||
Minimum token probability, which will be scaled by the probability of the most likely token. It must be a
|
||
value between `0.0` and `1.0`. Typical values are in the `0.01-0.2` range.
|
||
repetition_penalty (`float`, *optional*, defaults to `1.0`):
|
||
Float that penalizes new tokens based on whether they appear in the prompt and the generated text so far.
|
||
Values > `1.0` encourage the model to use new tokens, while values < `1.0` encourage the model to repeat
|
||
tokens.
|
||
cache_implementation (`str` or `None`, *optional*, defaults to `None`):
|
||
Implementation of the cache method for faster generation when use_vllm is set to False.
|
||
|
||
> Parameters that control generation acceleration powered by vLLM
|
||
|
||
use_vllm (`bool`, *optional*, defaults to `False`):
|
||
Whether to use vLLM for generating completions. If set to `True`, the trainer will use vLLM for generation
|
||
instead of the default model.generate(). Requires `vllm` to be installed.
|
||
vllm_mode (`str`, *optional*, defaults to `"server"`):
|
||
Mode to use for vLLM integration when `use_vllm` is set to `True`. Must be one of `"server"` or
|
||
`"colocate"`.
|
||
|
||
- `"server"`: The trainer will send generation requests to a separate vLLM server. Make sure a TRL vLLM
|
||
server is running (start with `trl vllm-serve`).
|
||
- `"colocate"`: vLLM will run in the same process and share the training GPUs. This avoids the need for a
|
||
separate server but may cause resource contention with training.
|
||
vllm_guided_decoding_regex (`str` or `None`, *optional*, defaults to `None`):
|
||
Regex for vLLM guided decoding. If `None` (default), guided decoding is disabled.
|
||
|
||
> Parameters that control the vLLM server (only used when `vllm_mode` is `"server"`)
|
||
vllm_server_base_url (`str` or `None`, *optional*, defaults to `None`):
|
||
Base URL for the vLLM server (e.g., `"http://localhost:8000"`). If provided, `vllm_server_host` and
|
||
`vllm_server_port` are ignored.
|
||
vllm_server_host (`str`, *optional*, defaults to `"0.0.0.0"`):
|
||
Host of the vLLM server to connect to. Ignored if `vllm_server_base_url` is provided.
|
||
vllm_server_port (`int`, *optional*, defaults to `8000`):
|
||
Port of the vLLM server to connect to. Ignored if `vllm_server_base_url` is provided.
|
||
vllm_server_timeout (`float`, *optional*, defaults to `240.0`):
|
||
Total timeout duration in seconds to wait for the vLLM server to be up. If the server is not up after the
|
||
timeout, a `ConnectionError` is raised.
|
||
|
||
> Parameters that control colocated vLLM execution (only used when `vllm_mode` is `"colocate"`)
|
||
|
||
vllm_gpu_memory_utilization (`float`, *optional*, defaults to `0.3`):
|
||
Control the GPU memory utilization for vLLM. This setting only applies when `vllm_mode` is set to
|
||
`"colocate"`. If you are using `vllm_mode="server"`, this parameter must be passed separately when
|
||
launching the vLLM server via the `--vllm_gpu_memory_utilization` flag.
|
||
vllm_tensor_parallel_size (`int`, *optional*, defaults to `1`):
|
||
Control the tensor parallel size for vLLM. This setting only applies when `vllm_mode` is set to
|
||
`"colocate"`. If you are using `vllm_mode="server"`, this parameter must be passed separately when
|
||
launching the vLLM server via the `--vllm_tensor_parallel_size` flag.
|
||
|
||
> Parameters that control the training
|
||
|
||
learning_rate (`float`, *optional*, defaults to `1e-6`):
|
||
Initial learning rate for [`AdamW`] optimizer. The default value replaces that of
|
||
[`~transformers.TrainingArguments`].
|
||
beta (`float`, *optional*, defaults to `0.04`):
|
||
KL coefficient. If `0.0`, the reference model is not loaded, reducing memory usage and improving training
|
||
speed, but may be numerically unstable for long training runs.
|
||
num_iterations (`int`, *optional*, defaults to `1`):
|
||
Number of iterations per batch (denoted as μ in the algorithm).
|
||
epsilon (`float`, *optional*, defaults to `0.2`):
|
||
Epsilon value for clipping.
|
||
delta: (`float` or `None`, *optional*, defaults to `None`):
|
||
Enables the upper clipping bound in two-sided GRPO loss when set to a float. If `None` (default), standard
|
||
GRPO clipping is used. Recommended to be greater than `1 + ε` when enabled. This method is introduced in
|
||
the [INTELLECT-2 tech report](https://huggingface.co/papers/2505.07291).
|
||
epsilon_high (`float` or `None`, *optional*, defaults to `None`):
|
||
Upper-bound epsilon value for clipping. If not specified, it defaults to the same value as the lower-bound
|
||
specified in argument `epsilon`. Paper [DAPO](https://huggingface.co/papers/2503.14476) recommends `0.28`.
|
||
reward_weights (`list[float]` or `None`, *optional*, defaults to `None`):
|
||
Weights for each reward function. Must match the number of reward functions. If `None`, all rewards are
|
||
weighted equally with weight `1.0`.
|
||
scale_rewards (`bool`, *optional*, defaults to `True`):
|
||
Whether to scale the rewards by dividing them by their standard deviation. If `True` (default), the rewards
|
||
are normalized by the standard deviation, ensuring they have unit variance. If `False`, no scaling is
|
||
applied. The [Dr. GRPO paper](https://huggingface.co/papers/2503.20783) recommends not scaling the rewards,
|
||
as scaling by the standard deviation introduces a question-level difficulty bias.
|
||
loss_type (`str`, *optional*, defaults to `"bnpo"`):
|
||
Specifies the loss formulation to use. Supported values are:
|
||
|
||
- `"grpo"`: Aggregates token-level losses by normalizing over sequence length. Not recommended due to
|
||
length bias—this approach tends to prefer shorter completions with positive advantages and longer ones
|
||
with negative advantages.
|
||
- `"bnpo"`: Aggregates token-level losses by normalizing number of active token in the local batch.
|
||
Note that normalization is performed over the local batch only, so results may slightly vary depending
|
||
on the local batch size, despite a constant effective batch size. When using
|
||
`per_device_train_batch_size==1`, the loss is equivalent to the GRPO loss.
|
||
- `"dr_grpo"`: Aggregates token-level losses by normalizing with a global constant. This method was
|
||
introduced in the [Dr. GRPO paper](https://huggingface.co/papers/2503.20783) to eliminate length bias.
|
||
The value of the constant corresponds to `max_completion_length`.
|
||
mask_truncated_completions (`bool`, *optional*, defaults to `False`):
|
||
When enabled, truncated completions are excluded from the loss calculation, preventing them from being
|
||
incorrectly penalized and introducing noise during training. According to the
|
||
[DAPO](https://huggingface.co/papers/2503.14476) paper, this is a good practice for training stability.
|
||
sync_ref_model (`bool`, *optional*, defaults to `False`):
|
||
Whether to synchronize the reference model with the active model every `ref_model_sync_steps` steps, using
|
||
the `ref_model_mixup_alpha` parameter. This synchronization originates from the
|
||
[TR-DPO](https://huggingface.co/papers/2404.09656) paper.
|
||
ref_model_mixup_alpha (`float`, *optional*, defaults to `0.6`):
|
||
α parameter from the [TR-DPO](https://huggingface.co/papers/2404.09656) paper, which controls the mix
|
||
between the current policy and the previous reference policy during updates. The reference policy is
|
||
updated according to the equation: `π_ref = α * π_θ + (1 - α) * π_ref_prev`. To use this parameter, you
|
||
must set `sync_ref_model=True`.
|
||
ref_model_sync_steps (`int`, *optional*, defaults to `512`):
|
||
τ parameter from the [TR-DPO](https://huggingface.co/papers/2404.09656) paper, which determines how
|
||
frequently the current policy is synchronized with the reference policy. To use this parameter, you must
|
||
set `sync_ref_model=True`.
|
||
use_liger_loss (`bool`, *optional*, defaults to `False`):
|
||
Whether to use the Liger GRPO loss.
|
||
|
||
> Parameters that control the logging
|
||
|
||
log_completions (`bool`, *optional*, defaults to `False`):
|
||
Whether to log a sample of (prompt, completion) pairs every `logging_steps` steps. If `rich` is
|
||
installed, it prints the sample. If `wandb` logging is enabled, it logs it to `wandb`.
|
||
num_completions_to_print (`int` or `None`, *optional*, defaults to `None`):
|
||
Number of completions to print with `rich`. If `None`, all completions are logged.
|
||
wandb_log_unique_prompts (`bool`, *optional*, defaults to `False`):
|
||
Whether to log unique prompts in wandb. If `True`, only unique prompts are logged. If `False`, all
|
||
prompts are logged.
|
||
|
||
"""
|
||
vllm_sampling_params: Optional[Any] = field(
|
||
default = None,
|
||
metadata = {'help': 'vLLM SamplingParams'},
|
||
)
|
||
unsloth_num_chunks : Optional[int] = field(
|
||
default = -1,
|
||
metadata = {'help': 'Chunk size to reduce memory usage. -1 is most efficient.'},
|
||
)
|
||
def __init__(
|
||
self,
|
||
output_dir = None,
|
||
overwrite_output_dir = None,
|
||
do_train = False,
|
||
do_eval = False,
|
||
do_predict = False,
|
||
eval_strategy = 'no',
|
||
prediction_loss_only = False,
|
||
per_device_train_batch_size = 4,
|
||
per_device_eval_batch_size = 4,
|
||
per_gpu_train_batch_size = None,
|
||
per_gpu_eval_batch_size = None,
|
||
gradient_accumulation_steps = 2,
|
||
eval_accumulation_steps = 2,
|
||
eval_delay = 0,
|
||
torch_empty_cache_steps = 250,
|
||
learning_rate = 5e-05,
|
||
weight_decay = 0.01,
|
||
adam_beta1 = 0.9,
|
||
adam_beta2 = 0.999,
|
||
adam_epsilon = 1e-08,
|
||
max_grad_norm = 1.0,
|
||
num_train_epochs = 3.0,
|
||
max_steps = -1,
|
||
lr_scheduler_type = 'linear',
|
||
warmup_ratio = 0.1,
|
||
warmup_steps = 0,
|
||
log_level = 'passive',
|
||
log_level_replica = 'warning',
|
||
log_on_each_node = True,
|
||
logging_dir = None,
|
||
logging_strategy = 'steps',
|
||
logging_first_step = False,
|
||
logging_steps = 1,
|
||
logging_nan_inf_filter = False,
|
||
save_strategy = 'steps',
|
||
save_steps = 500,
|
||
save_total_limit = None,
|
||
save_safetensors = True,
|
||
save_on_each_node = False,
|
||
save_only_model = False,
|
||
restore_callback_states_from_checkpoint = False,
|
||
no_cuda = False,
|
||
use_cpu = False,
|
||
use_mps_device = False,
|
||
seed = 3407,
|
||
data_seed = 3407,
|
||
jit_mode_eval = False,
|
||
use_ipex = False,
|
||
bf16 = False,
|
||
fp16 = False,
|
||
fp16_opt_level = 'O1',
|
||
half_precision_backend = 'auto',
|
||
bf16_full_eval = False,
|
||
fp16_full_eval = False,
|
||
tf32 = None,
|
||
local_rank = -1,
|
||
ddp_backend = None,
|
||
tpu_num_cores = None,
|
||
tpu_metrics_debug = False,
|
||
debug = '',
|
||
dataloader_drop_last = False,
|
||
eval_steps = None,
|
||
dataloader_num_workers = 0,
|
||
dataloader_prefetch_factor = None,
|
||
past_index = -1,
|
||
run_name = None,
|
||
disable_tqdm = None,
|
||
remove_unused_columns = False,
|
||
label_names = None,
|
||
load_best_model_at_end = False,
|
||
metric_for_best_model = None,
|
||
greater_is_better = None,
|
||
ignore_data_skip = False,
|
||
fsdp = '',
|
||
fsdp_min_num_params = 0,
|
||
fsdp_config = None,
|
||
fsdp_transformer_layer_cls_to_wrap = None,
|
||
accelerator_config = None,
|
||
deepspeed = None,
|
||
label_smoothing_factor = 0.0,
|
||
optim = 'adamw_8bit',
|
||
optim_args = None,
|
||
adafactor = False,
|
||
group_by_length = False,
|
||
length_column_name = 'length',
|
||
report_to = None,
|
||
ddp_find_unused_parameters = None,
|
||
ddp_bucket_cap_mb = None,
|
||
ddp_broadcast_buffers = None,
|
||
dataloader_pin_memory = True,
|
||
dataloader_persistent_workers = False,
|
||
skip_memory_metrics = True,
|
||
use_legacy_prediction_loop = False,
|
||
push_to_hub = False,
|
||
resume_from_checkpoint = None,
|
||
hub_model_id = None,
|
||
hub_strategy = 'every_save',
|
||
hub_token = None,
|
||
hub_private_repo = None,
|
||
hub_always_push = False,
|
||
gradient_checkpointing = False,
|
||
gradient_checkpointing_kwargs = None,
|
||
include_inputs_for_metrics = False,
|
||
eval_do_concat_batches = True,
|
||
fp16_backend = 'auto',
|
||
push_to_hub_model_id = None,
|
||
push_to_hub_organization = None,
|
||
push_to_hub_token = None,
|
||
mp_parameters = '',
|
||
auto_find_batch_size = False,
|
||
full_determinism = False,
|
||
torchdynamo = None,
|
||
ray_scope = 'last',
|
||
ddp_timeout = 1800,
|
||
torch_compile = False,
|
||
torch_compile_backend = None,
|
||
torch_compile_mode = None,
|
||
include_tokens_per_second = False,
|
||
include_num_input_tokens_seen = False,
|
||
neftune_noise_alpha = None,
|
||
optim_target_modules = None,
|
||
batch_eval_metrics = False,
|
||
eval_on_start = False,
|
||
use_liger_kernel = False,
|
||
eval_use_gather_object = False,
|
||
average_tokens_across_devices = False,
|
||
model_init_kwargs = None,
|
||
disable_dropout = False,
|
||
max_prompt_length = 512,
|
||
num_generations = 8,
|
||
max_completion_length = 256,
|
||
ds3_gather_for_generation = True,
|
||
shuffle_dataset = True,
|
||
generation_batch_size = None,
|
||
steps_per_generation = None,
|
||
temperature = 1.0,
|
||
top_p = 1.0,
|
||
top_k = None,
|
||
min_p = None,
|
||
repetition_penalty = 1.0,
|
||
cache_implementation = None,
|
||
use_vllm = False,
|
||
vllm_server_base_url = None,
|
||
vllm_mode = 'colocate',
|
||
vllm_guided_decoding_regex = None,
|
||
vllm_server_host = '0.0.0.0',
|
||
vllm_server_port = 8000,
|
||
vllm_server_timeout = 240.0,
|
||
vllm_gpu_memory_utilization = 0.3,
|
||
vllm_tensor_parallel_size = 1,
|
||
beta = 0.04,
|
||
num_iterations = 1,
|
||
epsilon = 0.2,
|
||
delta = None,
|
||
epsilon_high = None,
|
||
reward_weights = None,
|
||
scale_rewards = True,
|
||
loss_type = 'bnpo',
|
||
mask_truncated_completions = False,
|
||
sync_ref_model = False,
|
||
ref_model_mixup_alpha = 0.6,
|
||
ref_model_sync_steps = 512,
|
||
use_liger_loss = False,
|
||
log_completions = False,
|
||
num_completions_to_print = None,
|
||
wandb_log_unique_prompts = False,
|
||
vllm_sampling_params = None,
|
||
unsloth_num_chunks = -1,
|
||
**kwargs,
|
||
):
|
||
if learning_rate < 1e-7: raise FloatingPointError(f'Unsloth: Your learning rate of `{learning_rate}` is too small and less than 1e-7! Consider increasing it, otherwise gradient updates will be close to 0!')
|
||
if learning_rate > 1: raise OverflowError(f'Unsloth: Your learning rate of `{learning_rate}` is way too larger > 1! Consider decreasing it to 1e-1, otherwise gradient updates will explode!')
|
||
if output_dir is None and save_strategy == 'steps' and save_steps == 500:
|
||
output_dir = 'unsloth_training_checkpoints'
|
||
save_strategy = 'no'
|
||
if loss_type.lower() == 'dr_grpo':
|
||
loss_type = 'dr_grpo'
|
||
elif loss_type.lower() == 'dapo':
|
||
loss_type = 'dapo'
|
||
if loss_type.lower() == 'dr_grpo':
|
||
if scale_rewards == None:
|
||
scale_rewards = True
|
||
elif scale_rewards == True:
|
||
print('The Dr GRPO paper recommends setting `scale_rewards` to False! Will override. Set it to `None` to force False.')
|
||
scale_rewards = False
|
||
elif loss_type.lower() == 'dapo':
|
||
print('The DAPO paper recommends `mask_truncated_completions = True`')
|
||
print('The DAPO paper recommends `epsilon_high = 0.28`')
|
||
mask_truncated_completions = True
|
||
epsilon_high = 0.28
|
||
|
||
if (per_device_train_batch_size // num_generations) * num_generations != per_device_train_batch_size:
|
||
print('Unsloth: We now expect `per_device_train_batch_size` to be a multiple of `num_generations`.\nWe will change the batch size of ' + str(per_device_train_batch_size) + ' to the `num_generations` of ' + str(num_generations))
|
||
per_device_train_batch_size = num_generations
|
||
|
||
|
||
super().__init__(
|
||
output_dir = output_dir,
|
||
overwrite_output_dir = overwrite_output_dir,
|
||
do_train = do_train,
|
||
do_eval = do_eval,
|
||
do_predict = do_predict,
|
||
eval_strategy = eval_strategy,
|
||
prediction_loss_only = prediction_loss_only,
|
||
per_device_train_batch_size = per_device_train_batch_size,
|
||
per_device_eval_batch_size = per_device_eval_batch_size,
|
||
per_gpu_train_batch_size = per_gpu_train_batch_size,
|
||
per_gpu_eval_batch_size = per_gpu_eval_batch_size,
|
||
gradient_accumulation_steps = gradient_accumulation_steps,
|
||
eval_accumulation_steps = eval_accumulation_steps,
|
||
eval_delay = eval_delay,
|
||
torch_empty_cache_steps = torch_empty_cache_steps,
|
||
learning_rate = learning_rate,
|
||
weight_decay = weight_decay,
|
||
adam_beta1 = adam_beta1,
|
||
adam_beta2 = adam_beta2,
|
||
adam_epsilon = adam_epsilon,
|
||
max_grad_norm = max_grad_norm,
|
||
num_train_epochs = num_train_epochs,
|
||
max_steps = max_steps,
|
||
lr_scheduler_type = lr_scheduler_type,
|
||
warmup_ratio = warmup_ratio,
|
||
warmup_steps = warmup_steps,
|
||
log_level = log_level,
|
||
log_level_replica = log_level_replica,
|
||
log_on_each_node = log_on_each_node,
|
||
logging_dir = logging_dir,
|
||
logging_strategy = logging_strategy,
|
||
logging_first_step = logging_first_step,
|
||
logging_steps = logging_steps,
|
||
logging_nan_inf_filter = logging_nan_inf_filter,
|
||
save_strategy = save_strategy,
|
||
save_steps = save_steps,
|
||
save_total_limit = save_total_limit,
|
||
save_safetensors = save_safetensors,
|
||
save_on_each_node = save_on_each_node,
|
||
save_only_model = save_only_model,
|
||
restore_callback_states_from_checkpoint = restore_callback_states_from_checkpoint,
|
||
no_cuda = no_cuda,
|
||
use_cpu = use_cpu,
|
||
use_mps_device = use_mps_device,
|
||
seed = seed,
|
||
data_seed = data_seed,
|
||
jit_mode_eval = jit_mode_eval,
|
||
use_ipex = use_ipex,
|
||
bf16 = bf16,
|
||
fp16 = fp16,
|
||
fp16_opt_level = fp16_opt_level,
|
||
half_precision_backend = half_precision_backend,
|
||
bf16_full_eval = bf16_full_eval,
|
||
fp16_full_eval = fp16_full_eval,
|
||
tf32 = tf32,
|
||
local_rank = local_rank,
|
||
ddp_backend = ddp_backend,
|
||
tpu_num_cores = tpu_num_cores,
|
||
tpu_metrics_debug = tpu_metrics_debug,
|
||
debug = debug,
|
||
dataloader_drop_last = dataloader_drop_last,
|
||
eval_steps = eval_steps,
|
||
dataloader_num_workers = dataloader_num_workers,
|
||
dataloader_prefetch_factor = dataloader_prefetch_factor,
|
||
past_index = past_index,
|
||
run_name = run_name,
|
||
disable_tqdm = disable_tqdm,
|
||
remove_unused_columns = remove_unused_columns,
|
||
label_names = label_names,
|
||
load_best_model_at_end = load_best_model_at_end,
|
||
metric_for_best_model = metric_for_best_model,
|
||
greater_is_better = greater_is_better,
|
||
ignore_data_skip = ignore_data_skip,
|
||
fsdp = fsdp,
|
||
fsdp_min_num_params = fsdp_min_num_params,
|
||
fsdp_config = fsdp_config,
|
||
fsdp_transformer_layer_cls_to_wrap = fsdp_transformer_layer_cls_to_wrap,
|
||
accelerator_config = accelerator_config,
|
||
deepspeed = deepspeed,
|
||
label_smoothing_factor = label_smoothing_factor,
|
||
optim = optim,
|
||
optim_args = optim_args,
|
||
adafactor = adafactor,
|
||
group_by_length = group_by_length,
|
||
length_column_name = length_column_name,
|
||
report_to = report_to,
|
||
ddp_find_unused_parameters = ddp_find_unused_parameters,
|
||
ddp_bucket_cap_mb = ddp_bucket_cap_mb,
|
||
ddp_broadcast_buffers = ddp_broadcast_buffers,
|
||
dataloader_pin_memory = dataloader_pin_memory,
|
||
dataloader_persistent_workers = dataloader_persistent_workers,
|
||
skip_memory_metrics = skip_memory_metrics,
|
||
use_legacy_prediction_loop = use_legacy_prediction_loop,
|
||
push_to_hub = push_to_hub,
|
||
resume_from_checkpoint = resume_from_checkpoint,
|
||
hub_model_id = hub_model_id,
|
||
hub_strategy = hub_strategy,
|
||
hub_token = hub_token,
|
||
hub_private_repo = hub_private_repo,
|
||
hub_always_push = hub_always_push,
|
||
gradient_checkpointing = gradient_checkpointing,
|
||
gradient_checkpointing_kwargs = gradient_checkpointing_kwargs,
|
||
include_inputs_for_metrics = include_inputs_for_metrics,
|
||
eval_do_concat_batches = eval_do_concat_batches,
|
||
fp16_backend = fp16_backend,
|
||
push_to_hub_model_id = push_to_hub_model_id,
|
||
push_to_hub_organization = push_to_hub_organization,
|
||
push_to_hub_token = push_to_hub_token,
|
||
mp_parameters = mp_parameters,
|
||
auto_find_batch_size = auto_find_batch_size,
|
||
full_determinism = full_determinism,
|
||
torchdynamo = torchdynamo,
|
||
ray_scope = ray_scope,
|
||
ddp_timeout = ddp_timeout,
|
||
torch_compile = torch_compile,
|
||
torch_compile_backend = torch_compile_backend,
|
||
torch_compile_mode = torch_compile_mode,
|
||
include_tokens_per_second = include_tokens_per_second,
|
||
include_num_input_tokens_seen = include_num_input_tokens_seen,
|
||
neftune_noise_alpha = neftune_noise_alpha,
|
||
optim_target_modules = optim_target_modules,
|
||
batch_eval_metrics = batch_eval_metrics,
|
||
eval_on_start = eval_on_start,
|
||
use_liger_kernel = use_liger_kernel,
|
||
eval_use_gather_object = eval_use_gather_object,
|
||
average_tokens_across_devices = average_tokens_across_devices,
|
||
model_init_kwargs = model_init_kwargs,
|
||
disable_dropout = disable_dropout,
|
||
max_prompt_length = max_prompt_length,
|
||
num_generations = num_generations,
|
||
max_completion_length = max_completion_length,
|
||
ds3_gather_for_generation = ds3_gather_for_generation,
|
||
shuffle_dataset = shuffle_dataset,
|
||
generation_batch_size = generation_batch_size,
|
||
steps_per_generation = steps_per_generation,
|
||
temperature = temperature,
|
||
top_p = top_p,
|
||
top_k = top_k,
|
||
min_p = min_p,
|
||
repetition_penalty = repetition_penalty,
|
||
cache_implementation = cache_implementation,
|
||
use_vllm = use_vllm,
|
||
vllm_server_base_url = vllm_server_base_url,
|
||
vllm_mode = vllm_mode,
|
||
vllm_guided_decoding_regex = vllm_guided_decoding_regex,
|
||
vllm_server_host = vllm_server_host,
|
||
vllm_server_port = vllm_server_port,
|
||
vllm_server_timeout = vllm_server_timeout,
|
||
vllm_gpu_memory_utilization = vllm_gpu_memory_utilization,
|
||
vllm_tensor_parallel_size = vllm_tensor_parallel_size,
|
||
beta = beta,
|
||
num_iterations = num_iterations,
|
||
epsilon = epsilon,
|
||
delta = delta,
|
||
epsilon_high = epsilon_high,
|
||
reward_weights = reward_weights,
|
||
scale_rewards = scale_rewards,
|
||
loss_type = loss_type,
|
||
mask_truncated_completions = mask_truncated_completions,
|
||
sync_ref_model = sync_ref_model,
|
||
ref_model_mixup_alpha = ref_model_mixup_alpha,
|
||
ref_model_sync_steps = ref_model_sync_steps,
|
||
use_liger_loss = use_liger_loss,
|
||
log_completions = log_completions,
|
||
num_completions_to_print = num_completions_to_print,
|
||
wandb_log_unique_prompts = wandb_log_unique_prompts,**kwargs)
|
||
self.vllm_sampling_params = vllm_sampling_params
|
||
self.unsloth_num_chunks = unsloth_num_chunks
|
||
pass
|
||
|
||
class _UnslothGRPOTrainer(Trainer):
|
||
""""""
|
||
|
||
_tag_names = ["trl", "grpo"]
|
||
|
||
def __init__(
|
||
self,
|
||
model: Union[str, PreTrainedModel],
|
||
reward_funcs: Union[RewardFunc, list[RewardFunc]],
|
||
args: Optional[GRPOConfig] = None,
|
||
train_dataset: Optional[Union[Dataset, IterableDataset]] = None,
|
||
eval_dataset: Optional[Union[Dataset, IterableDataset, dict[str, Union[Dataset, IterableDataset]]]] = None,
|
||
processing_class: Optional[PreTrainedTokenizerBase] = None,
|
||
reward_processing_classes: Optional[Union[PreTrainedTokenizerBase, list[PreTrainedTokenizerBase]]] = None,
|
||
callbacks: Optional[list[TrainerCallback]] = None,
|
||
optimizers: tuple[Optional[torch.optim.Optimizer], Optional[torch.optim.lr_scheduler.LambdaLR]] = (None, None),
|
||
peft_config: Optional["PeftConfig"] = None,
|
||
):
|
||
|
||
if hasattr(model, 'vllm_engine') and hasattr(args, 'use_vllm'):
|
||
if (getattr(args, 'use_vllm', False) == False):
|
||
args.use_vllm = True
|
||
args.vllm_mode='colocate'
|
||
# Args
|
||
if args is None:
|
||
model_name = model if isinstance(model, str) else model.config._name_or_path
|
||
model_name = model_name.split("/")[-1]
|
||
args = GRPOConfig(f"{model_name}-GRPO")
|
||
|
||
# Models
|
||
# Trained model
|
||
model_init_kwargs = args.model_init_kwargs or {}
|
||
if isinstance(model, str):
|
||
model_id = model
|
||
torch_dtype = model_init_kwargs.get("torch_dtype")
|
||
if isinstance(torch_dtype, torch.dtype) or torch_dtype == "auto" or torch_dtype is None:
|
||
pass # torch_dtype is already a torch.dtype or "auto" or None
|
||
elif isinstance(torch_dtype, str): # it's a str, but not "auto"
|
||
torch_dtype = getattr(torch, torch_dtype)
|
||
model_init_kwargs["torch_dtype"] = torch_dtype
|
||
else:
|
||
raise ValueError(
|
||
"Invalid `torch_dtype` passed to `GRPOConfig`. Expected either 'auto' or a string representing "
|
||
f"a `torch.dtype` (e.g., 'float32'), but got {torch_dtype}."
|
||
)
|
||
# Disable caching if gradient checkpointing is enabled (not supported)
|
||
model_init_kwargs["use_cache"] = (
|
||
False if args.gradient_checkpointing else model_init_kwargs.get("use_cache")
|
||
)
|
||
model = AutoModelForCausalLM.from_pretrained(model, **model_init_kwargs)
|
||
else:
|
||
model_id = model.config._name_or_path
|
||
if args.model_init_kwargs is not None:
|
||
raise ValueError(
|
||
"You passed `model_init_kwargs` to the `GRPOConfig`, but your model is already instantiated. "
|
||
"This argument can only be used when the `model` argument is a string."
|
||
)
|
||
|
||
if False:
|
||
if not is_peft_available():
|
||
raise ImportError("PEFT is required to use `peft_config`. Run `pip install peft`.")
|
||
model = model
|
||
|
||
# Enable gradient checkpointing if requested
|
||
if args.gradient_checkpointing:
|
||
model = self._enable_gradient_checkpointing(model, args)
|
||
|
||
# Processing class
|
||
if processing_class is None:
|
||
processing_class = AutoTokenizer.from_pretrained(model.config._name_or_path, padding_side="left")
|
||
if processing_class.pad_token is None:
|
||
processing_class.pad_token = processing_class.eos_token
|
||
|
||
# Reward functions
|
||
if not isinstance(reward_funcs, list):
|
||
reward_funcs = [reward_funcs]
|
||
self.reward_func_names = []
|
||
for i, reward_func in enumerate(reward_funcs):
|
||
if isinstance(reward_func, str):
|
||
reward_funcs[i] = AutoModelForSequenceClassification.from_pretrained(
|
||
reward_func, num_labels=1, **model_init_kwargs
|
||
)
|
||
if isinstance(reward_funcs[i], nn.Module): # Use Module over PretrainedModel for compat w/ compiled models
|
||
self.reward_func_names.append(reward_funcs[i].config._name_or_path.split("/")[-1])
|
||
else:
|
||
self.reward_func_names.append(reward_funcs[i].__name__)
|
||
self.reward_funcs = reward_funcs
|
||
|
||
# Reward weights
|
||
if args.reward_weights is not None:
|
||
if len(args.reward_weights) != len(reward_funcs):
|
||
raise ValueError(
|
||
f"Number of reward weights ({len(args.reward_weights)}) must match number of reward "
|
||
f"functions ({len(reward_funcs)})"
|
||
)
|
||
self.reward_weights = torch.tensor(args.reward_weights, dtype=torch.float32)
|
||
else:
|
||
self.reward_weights = torch.ones(len(reward_funcs), dtype=torch.float32)
|
||
|
||
# Reward processing class
|
||
if reward_processing_classes is None:
|
||
reward_processing_classes = [None] * len(reward_funcs)
|
||
elif not isinstance(reward_processing_classes, list):
|
||
reward_processing_classes = [reward_processing_classes]
|
||
else:
|
||
if len(reward_processing_classes) != len(reward_funcs):
|
||
raise ValueError("The number of reward processing classes must match the number of reward functions.")
|
||
|
||
for i, (reward_processing_class, reward_func) in enumerate(zip(reward_processing_classes, reward_funcs)):
|
||
if isinstance(reward_func, PreTrainedModel):
|
||
if reward_processing_class is None:
|
||
reward_processing_class = AutoTokenizer.from_pretrained(reward_func.config._name_or_path)
|
||
if reward_processing_class.pad_token_id is None:
|
||
reward_processing_class.pad_token = reward_processing_class.eos_token
|
||
# The reward model computes the reward for the latest non-padded token in the input sequence.
|
||
# So it's important to set the pad token ID to the padding token ID of the processing class.
|
||
reward_func.config.pad_token_id = reward_processing_class.pad_token_id
|
||
reward_processing_classes[i] = reward_processing_class
|
||
self.reward_processing_classes = reward_processing_classes
|
||
|
||
# Training arguments
|
||
self.max_prompt_length = args.max_prompt_length
|
||
self.max_completion_length = args.max_completion_length # = |o_i| in the GRPO paper
|
||
self.num_generations = args.num_generations # = G in the GRPO paper
|
||
self.temperature = args.temperature
|
||
self.top_p = args.top_p
|
||
self.top_k = args.top_k
|
||
self.min_p = args.min_p
|
||
self.repetition_penalty = args.repetition_penalty
|
||
self.use_vllm = args.use_vllm
|
||
self.vllm_mode = args.vllm_mode
|
||
self.vllm_gpu_memory_utilization = args.vllm_gpu_memory_utilization # only applies to colocation mode
|
||
self.vllm_tensor_parallel_size = args.vllm_tensor_parallel_size # only applies to colocation mode
|
||
self.use_liger_loss = args.use_liger_loss
|
||
self.loss_type = args.loss_type
|
||
self.scale_rewards = args.scale_rewards
|
||
self.mask_truncated_completions = args.mask_truncated_completions
|
||
|
||
# Datasets
|
||
self.shuffle_dataset = args.shuffle_dataset
|
||
|
||
if (
|
||
isinstance(train_dataset, IterableDataset)
|
||
or isinstance(eval_dataset, IterableDataset)
|
||
or (
|
||
isinstance(eval_dataset, dict) and any(isinstance(ds, IterableDataset) for ds in eval_dataset.values())
|
||
)
|
||
):
|
||
# See https://github.com/huggingface/trl/issues/3213
|
||
raise NotImplementedError(
|
||
"Iterable datasets are not yet supported in GRPOTrainer. Please use a standard dataset instead."
|
||
)
|
||
|
||
# Multi-step
|
||
self.num_iterations = args.num_iterations # = 𝜇 in the GRPO paper
|
||
self.epsilon_low = args.epsilon
|
||
self.epsilon_high = args.epsilon_high if args.epsilon_high is not None else args.epsilon
|
||
# Tracks the number of iterations (forward + backward passes), including those within a grad accum cycle
|
||
self._step = 0
|
||
# Buffer the batch to reuse generated outputs across multiple updates. For more details, see
|
||
# `_get_train_sampler` and `_prepare_inputs`.
|
||
self._buffered_inputs = None
|
||
|
||
# The trainer estimates the number of FLOPs (floating-point operations) using the number of elements in the
|
||
# input tensor associated with the key "input_ids". However, in GRPO, the sampled data does not include the
|
||
# "input_ids" key. Instead, the available keys is "prompt". As a result, the trainer issues the warning:
|
||
# "Could not estimate the number of tokens of the input, floating-point operations will not be computed." To
|
||
# suppress this warning, we set the "estimate_tokens" key in the model's "warnings_issued" dictionary to True.
|
||
# This acts as a flag to indicate that the warning has already been issued.
|
||
model.warnings_issued["estimate_tokens"] = True
|
||
|
||
super().__init__(
|
||
model=model,
|
||
args=args,
|
||
data_collator=identity, # No data collation is needed in GRPO
|
||
train_dataset=train_dataset,
|
||
eval_dataset=eval_dataset,
|
||
processing_class=processing_class,
|
||
callbacks=callbacks,
|
||
optimizers=optimizers,
|
||
)
|
||
|
||
# Reference model
|
||
self.beta = args.beta
|
||
if self.beta == 0.0:
|
||
# If beta is 0.0, the reference model is not needed
|
||
self.ref_model = None
|
||
elif is_deepspeed_zero3_enabled() or self.is_fsdp_enabled:
|
||
self.ref_model = AutoModelForCausalLM.from_pretrained(model_id, **model_init_kwargs)
|
||
elif is_peft_model(model):
|
||
# If PEFT is used, the reference model is not needed since the adapter can be disabled
|
||
# to revert to the initial model.
|
||
self.ref_model = None
|
||
else:
|
||
# If PEFT configuration is not provided, create a reference model based on the initial model.
|
||
self.ref_model = create_reference_model(model)
|
||
|
||
# Disable dropout in the models
|
||
if args.disable_dropout:
|
||
disable_dropout_in_model(model)
|
||
if self.ref_model is not None:
|
||
disable_dropout_in_model(self.ref_model)
|
||
|
||
# Liger loss
|
||
if self.use_liger_loss:
|
||
if not is_liger_kernel_available():
|
||
raise ImportError(
|
||
"Liger is required to use `liger_loss` as the GRPO loss. Run `pip install liger-kernel`."
|
||
)
|
||
# redirect the model.module forward to the model forward to ensure pre-forward hooks are called
|
||
self._forward_redirection = _ForwardRedirection()
|
||
|
||
self.liger_grpo_loss = LigerFusedLinearGRPOLoss(
|
||
beta=self.beta,
|
||
epsilon_low=self.epsilon_low,
|
||
epsilon_high=self.epsilon_high,
|
||
temperature=self.temperature,
|
||
use_ref_model=self.beta != 0.0,
|
||
loss_type=self.loss_type,
|
||
max_completion_length=self.max_completion_length,
|
||
)
|
||
|
||
# Initialize the metrics
|
||
self._metrics = {"train": defaultdict(list), "eval": defaultdict(list)}
|
||
self._total_train_tokens = 0
|
||
self.log_completions = args.log_completions
|
||
self.wandb_log_unique_prompts = args.wandb_log_unique_prompts
|
||
self.num_completions_to_print = args.num_completions_to_print
|
||
# maxlen is set to the total number of forward passes per step. This value of `maxlen` ensures we log only the
|
||
# final optimization step.
|
||
maxlen = self.accelerator.num_processes * args.per_device_train_batch_size * args.steps_per_generation
|
||
self._textual_logs = {
|
||
"prompt": deque(maxlen=maxlen),
|
||
"completion": deque(maxlen=maxlen),
|
||
"rewards": defaultdict(lambda: deque(maxlen=maxlen)),
|
||
"advantages": deque(maxlen=maxlen),
|
||
}
|
||
|
||
# Ensure each process receives a unique seed to prevent duplicate completions when generating with
|
||
# transformers if num_generations exceeds per_device_train_batch_size. We could skip it if we use vLLM, but
|
||
# it's safer to set it in all cases.
|
||
set_seed(args.seed, device_specific=True)
|
||
|
||
if self.use_vllm:
|
||
if not is_vllm_available():
|
||
raise ImportError(
|
||
"vLLM is not available and `use_vllm` is set to True. Please install vLLM with "
|
||
"`pip install vllm` to use it."
|
||
)
|
||
|
||
if self.vllm_mode == "server" and self.accelerator.is_main_process:
|
||
if args.vllm_server_base_url is not None:
|
||
base_url = args.vllm_server_base_url
|
||
else:
|
||
base_url = f"http://{args.vllm_server_host}:{args.vllm_server_port}"
|
||
self.vllm_client = VLLMClient(base_url=base_url, connection_timeout=args.vllm_server_timeout)
|
||
self.vllm_client.init_communicator()
|
||
|
||
elif self.vllm_mode == "colocate":
|
||
if not self.accelerator.num_processes % self.vllm_tensor_parallel_size == 0:
|
||
raise ValueError(
|
||
f"vllm_tensor_parallel_size ({self.vllm_tensor_parallel_size}) must divide world size "
|
||
f"({self.accelerator.num_processes}) evenly."
|
||
)
|
||
|
||
if self.vllm_tensor_parallel_size > 1:
|
||
self.tp_group, _ = torch.distributed.new_subgroups_by_enumeration(
|
||
[
|
||
list(range(i * self.vllm_tensor_parallel_size, (i + 1) * self.vllm_tensor_parallel_size))
|
||
for i in range(self.accelerator.num_processes // self.vllm_tensor_parallel_size)
|
||
]
|
||
)
|
||
|
||
self.llm = model.vllm_engine
|
||
|
||
self.guided_decoding_regex = args.vllm_guided_decoding_regex
|
||
|
||
self._last_loaded_step = -1
|
||
self.accelerator.wait_for_everyone()
|
||
else:
|
||
self.generation_config = GenerationConfig(
|
||
max_new_tokens=self.max_completion_length,
|
||
do_sample=True,
|
||
pad_token_id=processing_class.pad_token_id,
|
||
bos_token_id=processing_class.bos_token_id,
|
||
eos_token_id=processing_class.eos_token_id,
|
||
temperature=self.temperature,
|
||
top_p=self.top_p,
|
||
top_k=self.top_k,
|
||
min_p=self.min_p,
|
||
repetition_penalty=self.repetition_penalty,
|
||
cache_implementation=args.cache_implementation,
|
||
)
|
||
|
||
# Gradient accumulation requires scaled loss. Normally, loss scaling in the parent class depends on whether the
|
||
# model accepts loss-related kwargs. Since we compute our own loss, this check is irrelevant. We set
|
||
# self.model_accepts_loss_kwargs to False to enable scaling.
|
||
self.model_accepts_loss_kwargs = False
|
||
|
||
# Add tags to the model
|
||
self.model.add_model_tags(self._tag_names)
|
||
|
||
if self.ref_model is not None:
|
||
if self.is_deepspeed_enabled:
|
||
self.ref_model = prepare_deepspeed(self.ref_model, self.accelerator)
|
||
elif self.is_fsdp_enabled:
|
||
self.ref_model = prepare_fsdp(self.ref_model, self.accelerator)
|
||
else:
|
||
self.ref_model = self.accelerator.prepare_model(self.ref_model, evaluation_mode=True)
|
||
|
||
if args.sync_ref_model:
|
||
self.add_callback(SyncRefModelCallback(ref_model=self.ref_model, accelerator=self.accelerator))
|
||
|
||
for i, reward_func in enumerate(self.reward_funcs):
|
||
if isinstance(reward_func, PreTrainedModel):
|
||
if self.is_deepspeed_enabled:
|
||
self.reward_funcs[i] = prepare_deepspeed(reward_func, self.accelerator)
|
||
else:
|
||
# set device placement to True to make `prepare_model` move `reward_func` to device when using fsdp
|
||
self.reward_funcs[i] = self.accelerator.prepare_model(
|
||
reward_func, evaluation_mode=True, device_placement=True
|
||
)
|
||
|
||
def _set_signature_columns_if_needed(self):
|
||
# If `self.args.remove_unused_columns` is True, non-signature columns are removed.
|
||
# By default, this method sets `self._signature_columns` to the model's expected inputs.
|
||
# In GRPOTrainer, we preprocess data, so using the model's signature columns doesn't work.
|
||
# Instead, we set them to the columns expected by the `training_step` method, hence the override.
|
||
if self._signature_columns is None:
|
||
self._signature_columns = ["prompt"]
|
||
|
||
# This method overrides `Trainer.get_train_dataloader` to support our custom batching strategy.
|
||
# Instead of returning a standard per-step batch (i.e., `per_device_batch_size), our dataloader loads an
|
||
# *generation* batch (i.e., `per_device_batch_size × steps_per_generation`). This allows us to generate completions
|
||
# once every steps_per_generation step—rather than once per accumulation step—which is significantly more
|
||
# efficient. The only change from the original implementation is multiplying the batch size by
|
||
# `steps_per_generation`. Thus, `_prepare_inputs` is called with this *generation* batch, and it handles the
|
||
# splitting internally.
|
||
# Maintenance note: This method is a copy-paste of the original `Trainer.get_train_dataloader` with only one line
|
||
# modification. As a result, some parts of the method aren't relevant to GRPO, but we keep them to stay one line
|
||
# apart from the super method, ensuring easier maintenance in the future.
|
||
def get_train_dataloader(self):
|
||
if self.train_dataset is None:
|
||
raise ValueError("Trainer: training requires a train_dataset.")
|
||
|
||
train_dataset = self.train_dataset
|
||
data_collator = self.data_collator
|
||
if is_datasets_available() and isinstance(train_dataset, datasets.Dataset):
|
||
train_dataset = self._remove_unused_columns(train_dataset, description="training")
|
||
else:
|
||
data_collator = self._get_collator_with_removed_columns(data_collator, description="training")
|
||
|
||
dataloader_params = {
|
||
"batch_size": self._train_batch_size * self.args.steps_per_generation, # < this is the change
|
||
"collate_fn": data_collator,
|
||
"num_workers": self.args.dataloader_num_workers,
|
||
"pin_memory": self.args.dataloader_pin_memory,
|
||
"persistent_workers": self.args.dataloader_persistent_workers,
|
||
}
|
||
|
||
if not isinstance(train_dataset, torch.utils.data.IterableDataset):
|
||
dataloader_params["sampler"] = self._get_train_sampler()
|
||
dataloader_params["drop_last"] = self.args.dataloader_drop_last
|
||
if version.parse(transformers.__version__) >= version.parse("4.52.0"):
|
||
# from transformers 4.52.0, the `seed_worker` requires the `num_workers` and `rank` arguments
|
||
dataloader_params["worker_init_fn"] = partial(
|
||
seed_worker, num_workers=self.args.dataloader_num_workers, rank=self.args.process_index
|
||
)
|
||
else:
|
||
dataloader_params["worker_init_fn"] = seed_worker
|
||
dataloader_params["prefetch_factor"] = self.args.dataloader_prefetch_factor
|
||
|
||
return self.accelerator.prepare(DataLoader(train_dataset, **dataloader_params))
|
||
|
||
def _get_train_sampler(self, dataset: Optional[Dataset] = None) -> Sampler:
|
||
# Returns a sampler that
|
||
# 1. ensures each prompt is repeated across multiple processes. This guarantees that identical prompts are
|
||
# distributed to different GPUs, allowing rewards to be computed and normalized correctly within each prompt
|
||
# group. Using the same seed across processes ensures consistent prompt assignment, preventing discrepancies
|
||
# in group formation.
|
||
# 2. repeats the batch multiple times to allow reusing generations across multiple updates. Refer to
|
||
# _prepare_inputs to see how the generations are stored and reused.
|
||
|
||
# In the following figure, the values are the prompt indices. The first row shows the first sampled batch, the
|
||
# second row shows the second sampled batch, and so on.
|
||
#
|
||
# | GPU 0 | GPU 1 |
|
||
#
|
||
# global_step step <-───> num_generations=2
|
||
# <-───────> per_device_train_batch_size=3
|
||
# grad_accum ▲ ▲ 0 0 0 0 1 1 2 2 <- Generate for the first `steps_per_generation` (prompts 0 to 11); store the completions; use the first slice to compute the loss
|
||
# =2 ▼ | 0 1 3 3 4 4 5 5 <- Take the stored generations and use the second slice to compute the loss
|
||
# |
|
||
# | 1 2 6 6 7 7 8 8 <- Take the stored generations and use the third slice to compute the loss
|
||
# steps_per_gen=4 ▼ 1 3 9 9 10 10 11 11 <- Take the stored generations and use the fourth slice to compute the loss
|
||
#
|
||
# 2 4 12 12 13 13 14 14 <- Generate for the second `steps_per_generation` (prompts 12 to 23); store the completions; use the first slice to compute the loss
|
||
# 2 5 15 15 16 16 17 17 <- Take the stored generations and use the second slice to compute the loss
|
||
# ...
|
||
if dataset is None:
|
||
dataset = self.train_dataset
|
||
return RepeatSampler(
|
||
data_source=dataset,
|
||
mini_repeat_count=self.num_generations,
|
||
batch_size=self.args.generation_batch_size // self.num_generations,
|
||
repeat_count=self.num_iterations * self.args.steps_per_generation,
|
||
shuffle=self.shuffle_dataset,
|
||
seed=self.args.seed,
|
||
)
|
||
|
||
def _get_eval_sampler(self, eval_dataset) -> Sampler:
|
||
# See _get_train_sampler for an explanation of the sampler.
|
||
return RepeatSampler(
|
||
data_source=eval_dataset,
|
||
mini_repeat_count=self.num_generations,
|
||
seed=self.args.seed,
|
||
)
|
||
|
||
def _enable_gradient_checkpointing(self, model: PreTrainedModel, args: GRPOConfig) -> PreTrainedModel:
|
||
"""Enables gradient checkpointing for the model."""
|
||
# Ensure use_cache is disabled
|
||
model.config.use_cache = False
|
||
|
||
# Enable gradient checkpointing on the base model for PEFT
|
||
if is_peft_model(model):
|
||
model.base_model.gradient_checkpointing_enable()
|
||
# Enable gradient checkpointing for non-PEFT models
|
||
else:
|
||
model.gradient_checkpointing_enable()
|
||
|
||
gradient_checkpointing_kwargs = args.gradient_checkpointing_kwargs or {}
|
||
use_reentrant = (
|
||
"use_reentrant" not in gradient_checkpointing_kwargs or gradient_checkpointing_kwargs["use_reentrant"]
|
||
)
|
||
|
||
if use_reentrant:
|
||
model.enable_input_require_grads()
|
||
|
||
return model
|
||
|
||
@profiling_decorator
|
||
def _get_last_hidden_state(self, unwrapped_model, input_ids, attention_mask, logits_to_keep=None):
|
||
if is_peft_model(unwrapped_model):
|
||
unwrapped_model = unwrapped_model.base_model.model
|
||
last_hidden_state = unwrapped_model.model(input_ids=input_ids, attention_mask=attention_mask).last_hidden_state
|
||
last_hidden_state = last_hidden_state[:, :-1, :] # (B, L-1, H)
|
||
if logits_to_keep is not None:
|
||
last_hidden_state = last_hidden_state[:, -logits_to_keep:, :] # (B, logits_to_keep, H)
|
||
return last_hidden_state
|
||
|
||
# Get the per-token log probabilities for the completions for the model and the reference model
|
||
def _get_per_token_logps(self, model, input_ids, attention_mask, logits_to_keep, calc_logprob_flag = None):
|
||
if os.environ.get('UNSLOTH_USE_NEW_MODEL', '0') == '0' and not calc_logprob_flag:
|
||
return None # Unsloth efficient GRPO
|
||
# Otherwise, calculate normally:
|
||
if not hasattr(self, '_autocast_dtype'):
|
||
self._autocast_dtype = torch.float16 if os.environ.get('ACCELERATE_MIXED_PRECISION', 'fp16') == 'fp16' else torch.bfloat16
|
||
if os.environ.get('UNSLOTH_FORCE_FLOAT32', '0') == '1': self._autocast_dtype = torch.float16
|
||
|
||
os.environ["UNSLOTH_RETURN_HIDDEN_STATES"] = "1"
|
||
with torch.amp.autocast(device_type = 'cuda', dtype = self._autocast_dtype):
|
||
# We add 1 to `logits_to_keep` because the last logits of the sequence is later excluded
|
||
hidden_states = model(input_ids=input_ids, attention_mask=attention_mask, logits_to_keep=logits_to_keep + 1).logits
|
||
#logits = logits[:, :-1, :] # (B, L-1, V), exclude the last logit: it corresponds to the next token pred
|
||
return hidden_states
|
||
# input_ids = input_ids[:, -logits_to_keep:]
|
||
# For transformers<=4.48, logits_to_keep argument isn't supported, so here we drop logits ourselves.
|
||
# See https://github.com/huggingface/trl/issues/2770
|
||
# logits = logits[:, -logits_to_keep:]
|
||
# return logits
|
||
# logps = selective_log_softmax(logits, input_ids)
|
||
|
||
# row_indices, col_indices = torch.where(logps < -20)
|
||
|
||
# # Method 1: Check if tensors have elements
|
||
# if len(row_indices) > 0 and len(col_indices) > 0:
|
||
# breakpoint() # Breakpoint triggered here
|
||
# print("Found high values!")
|
||
# return logps # compute logprobs for the input tokens
|
||
pass
|
||
|
||
def _sync_fsdp_params_to_vllm(self, module: nn.Module, prefix: str = "", visited=None):
|
||
"""Memory-efficient post-order traversal of FSDP modules to extract full parameters and sync with vLLM."""
|
||
if visited is None:
|
||
visited = set()
|
||
|
||
for child_name, child_module in module.named_children():
|
||
child_prefix = f"{prefix}.{child_name}" if prefix else child_name
|
||
self._sync_fsdp_params_to_vllm(
|
||
child_module, prefix=child_prefix, visited=visited
|
||
) # recurse into the child
|
||
|
||
if isinstance(module, FSDP):
|
||
with FSDP.summon_full_params(module, recurse=False, writeback=False):
|
||
for param_name, param in module.named_parameters():
|
||
full_name = f"{prefix}.{param_name}" if prefix else param_name
|
||
for extra in ("_fsdp_wrapped_module.", "_checkpoint_wrapped_module."):
|
||
full_name = full_name.replace(extra, "")
|
||
|
||
if full_name in visited:
|
||
continue # skip FSDP subtrees already traversed
|
||
visited.add(full_name)
|
||
|
||
if self.vllm_mode == "server" and self.accelerator.is_main_process:
|
||
self.vllm_client.update_named_param(full_name, param.data)
|
||
elif self.vllm_mode == "colocate":
|
||
|
||
pass
|
||
|
||
pass
|
||
|
||
def _move_model_to_vllm(self, *args, **kwargs): return None
|
||
|
||
@profiling_decorator
|
||
def _prepare_inputs(
|
||
self, generation_batch: dict[str, Union[torch.Tensor, Any]]
|
||
) -> dict[str, Union[torch.Tensor, Any]]:
|
||
# Prepares inputs for model training/evaluation by managing completion generation and batch handling.
|
||
# During training:
|
||
# - Receives the local generation batch (Per-GPU batch size × steps per generation)
|
||
# from the modified training dataloader instead of the standard local batch
|
||
# - Generates completions once for the entire generation batch and splits it into batches of size
|
||
# `per_device_train_batch_size`
|
||
# - Buffers these completions and returns the appropriate slice for the current accumulation step
|
||
# - Optimizes by regenerating completions only periodically (every steps_per_generation * num_iterations)
|
||
# During evaluation:
|
||
# - The input is treated as a standard local batch (no accumulation, no multiple iterations)
|
||
# - Completions are generated for each batch without buffering or reuse
|
||
# Returns a single local batch in both cases.
|
||
|
||
mode = "train" if self.model.training else "eval"
|
||
if mode == "train":
|
||
generate_every = self.args.steps_per_generation * self.num_iterations
|
||
if self._step % generate_every == 0 or self._buffered_inputs is None:
|
||
# self._buffered_inputs=None can occur when resuming from a checkpoint
|
||
generation_batch = self._generate_and_score_completions(generation_batch)
|
||
generation_batch = shuffle_tensor_dict(generation_batch)
|
||
self._buffered_inputs = split_tensor_dict(generation_batch, self.args.steps_per_generation)
|
||
inputs = self._buffered_inputs[self._step % self.args.steps_per_generation]
|
||
self._step += 1
|
||
else:
|
||
# In evaluation, there is neither batch grouping for generation, nor multiple iterations, hence
|
||
# local generation batch == local eval batch
|
||
inputs = self._generate_and_score_completions(generation_batch)
|
||
return inputs
|
||
|
||
def _generate_and_score_completions(
|
||
self, inputs: list[dict[str, Union[torch.Tensor, Any]]]
|
||
) -> dict[str, Union[torch.Tensor, Any]]:
|
||
device = self.accelerator.device
|
||
mode = "train" if self.model.training else "eval"
|
||
|
||
prompts = [x["prompt"] for x in inputs]
|
||
prompts_text = [maybe_apply_chat_template(example, self.processing_class)["prompt"] for example in inputs]
|
||
prompt_inputs = self.processing_class(
|
||
text=prompts_text, return_tensors="pt", padding=True, padding_side="left", add_special_tokens=False
|
||
)
|
||
prompt_inputs = super()._prepare_inputs(prompt_inputs)
|
||
prompt_ids, prompt_mask = prompt_inputs["input_ids"], prompt_inputs["attention_mask"]
|
||
|
||
if self.max_prompt_length is not None:
|
||
prompt_ids = prompt_ids[:, -self.max_prompt_length :]
|
||
prompt_mask = prompt_mask[:, -self.max_prompt_length :]
|
||
|
||
# Generate completions using either vLLM or regular generation
|
||
if self.use_vllm:
|
||
# First, update the vLLM weights if needed
|
||
if self.state.global_step != self._last_loaded_step:
|
||
self._move_model_to_vllm()
|
||
self._last_loaded_step = self.state.global_step
|
||
|
||
# Generate completions using vLLM: gather all prompts and use them in a single call in the main process
|
||
if self.vllm_mode == "server":
|
||
all_prompts_text = gather_object(prompts_text)
|
||
if self.accelerator.is_main_process:
|
||
# Since 'prompts' contains 'num_generations' duplicates, we first take unique prompts, and generate
|
||
# num_generations outputs for each one. This is faster than generating outputs for each duplicate
|
||
# prompt individually.
|
||
ordered_set_of_prompts = all_prompts_text[:: self.num_generations]
|
||
with profiling_context(self, "vLLM.generate"):
|
||
completion_ids = self.vllm_client.generate(
|
||
prompts=ordered_set_of_prompts,
|
||
n=self.num_generations,
|
||
repetition_penalty=self.repetition_penalty,
|
||
temperature=self.temperature,
|
||
top_p=self.top_p,
|
||
top_k=-1 if self.top_k is None else self.top_k,
|
||
min_p=0.0 if self.min_p is None else self.min_p,
|
||
max_tokens=self.max_completion_length,
|
||
guided_decoding_regex=self.guided_decoding_regex,
|
||
)
|
||
else:
|
||
completion_ids = [None] * len(all_prompts_text)
|
||
# Broadcast the completions from the main process to all processes, ensuring each process receives its
|
||
# corresponding slice.
|
||
completion_ids = broadcast_object_list(completion_ids, from_process=0)
|
||
process_slice = slice(
|
||
self.accelerator.process_index * len(prompts),
|
||
(self.accelerator.process_index + 1) * len(prompts),
|
||
)
|
||
completion_ids = completion_ids[process_slice]
|
||
|
||
# Generate completions using colocated vLLM instances: each device holds vLLM copy and work on their own batch of prompts
|
||
elif self.vllm_mode == "colocate":
|
||
if self.guided_decoding_regex:
|
||
guided_decoding = GuidedDecodingParams(backend="outlines", regex=self.guided_decoding_regex)
|
||
else:
|
||
guided_decoding = None
|
||
sampling_params = SamplingParams(
|
||
n=1, # vLLM on each GPU generates only 1 in colocate mode
|
||
repetition_penalty=self.repetition_penalty,
|
||
temperature=self.temperature,
|
||
top_p=self.top_p,
|
||
top_k=-1 if self.top_k is None else self.top_k,
|
||
min_p=0.0 if self.min_p is None else self.min_p,
|
||
max_tokens=self.max_completion_length,
|
||
guided_decoding=guided_decoding,
|
||
)
|
||
|
||
if self.vllm_tensor_parallel_size > 1:
|
||
# Gather prompts from all ranks in the TP group and flatten.
|
||
# Each rank starts with its own prompts; after gathering, all ranks see the full group set.
|
||
orig_size = len(prompts_text)
|
||
gathered_prompts = [None for _ in range(self.vllm_tensor_parallel_size)]
|
||
torch.distributed.all_gather_object(gathered_prompts, prompts_text, group=self.tp_group)
|
||
all_prompts_text = [p for sublist in gathered_prompts for p in sublist]
|
||
else:
|
||
all_prompts_text = prompts_text
|
||
|
||
with profiling_context(self, "vLLM.generate"):
|
||
all_outputs = self.llm.generate(all_prompts_text, sampling_params=sampling_params, use_tqdm=False, lora_request = self.model.load_lora('grpo_trainer_lora_model', load_tensors = True))
|
||
|
||
completion_ids = [output.token_ids for outputs in all_outputs for output in outputs.outputs]
|
||
|
||
if self.vllm_tensor_parallel_size > 1:
|
||
# Slice completions for this rank within its TP group.
|
||
# Each rank generates all outputs — we keep only our share.
|
||
local_rank_in_group = torch.distributed.get_rank(group=self.tp_group)
|
||
tp_slice = slice(local_rank_in_group * orig_size, (local_rank_in_group + 1) * orig_size)
|
||
completion_ids = completion_ids[tp_slice]
|
||
|
||
# Pad the completions, and concatenate them with the prompts
|
||
completion_ids = [torch.tensor(ids, device=device) for ids in completion_ids]
|
||
completion_ids = pad(completion_ids, padding_value=self.processing_class.pad_token_id)
|
||
prompt_completion_ids = torch.cat([prompt_ids, completion_ids], dim=1)
|
||
else:
|
||
# Regular generation path
|
||
with unwrap_model_for_generation(
|
||
self.model_wrapped, self.accelerator, gather_deepspeed3_params=self.args.ds3_gather_for_generation
|
||
) as unwrapped_model:
|
||
with (
|
||
FSDP.summon_full_params(self.model_wrapped, recurse=False)
|
||
if self.is_fsdp_enabled
|
||
else nullcontext()
|
||
):
|
||
prompt_completion_ids = unwrapped_model.generate(
|
||
prompt_ids, attention_mask=prompt_mask, generation_config=self.generation_config
|
||
)
|
||
|
||
# Compute prompt length and extract completion ids
|
||
prompt_length = prompt_ids.size(1)
|
||
prompt_ids = prompt_completion_ids[:, :prompt_length]
|
||
completion_ids = prompt_completion_ids[:, prompt_length:]
|
||
|
||
# Mask everything after the first EOS token
|
||
is_eos = completion_ids == self.processing_class.eos_token_id
|
||
eos_idx = torch.full((is_eos.size(0),), is_eos.size(1), dtype=torch.long, device=device)
|
||
eos_idx[is_eos.any(dim=1)] = is_eos.int().argmax(dim=1)[is_eos.any(dim=1)]
|
||
sequence_indices = torch.arange(is_eos.size(1), device=device).expand(is_eos.size(0), -1)
|
||
completion_mask = (sequence_indices <= eos_idx.unsqueeze(1)).int()
|
||
|
||
# Convert tensor to a list of lists of token IDs. This will be passed to the reward function, avoiding the need
|
||
# to re-tokenize completions if the reward is computed from tokens.
|
||
completion_ids_list = [
|
||
[id.item() for id, m in zip(row, mask_row) if m] for row, mask_row in zip(completion_ids, completion_mask)
|
||
]
|
||
|
||
# Sum along sequence dimension (dim=1) to get completion length per sequence, used for logging
|
||
completion_lengths = completion_mask.sum(1)
|
||
|
||
# If mask_truncated_completions is enabled, zero out truncated completions in completion_mask
|
||
if self.mask_truncated_completions:
|
||
truncated_completions = ~is_eos.any(dim=1)
|
||
completion_mask = completion_mask * (~truncated_completions).unsqueeze(1).int()
|
||
|
||
# Concatenate prompt_mask with completion_mask for logit computation
|
||
attention_mask = torch.cat([prompt_mask, completion_mask], dim=1) # (B, P+C)
|
||
|
||
logits_to_keep = completion_ids.size(1) # we only need to compute the logits for the completion tokens
|
||
batch_size = self.args.per_device_train_batch_size if mode == "train" else self.args.per_device_eval_batch_size
|
||
|
||
with torch.no_grad():
|
||
# When using num_iterations == 1 and steps_per_generation <= gradient_accumulation_steps
|
||
# old_per_token_logps == per_token_logps, so we can skip it's computation here, and use
|
||
# per_token_logps.detach() instead.
|
||
if self.num_iterations > 1 or self.args.steps_per_generation > self.args.gradient_accumulation_steps:
|
||
old_per_token_logps = self._get_per_token_logps(
|
||
self.model, prompt_completion_ids, attention_mask, logits_to_keep, batch_size
|
||
)
|
||
else:
|
||
old_per_token_logps = None
|
||
|
||
# Decode the generated completions
|
||
completions_text = self.processing_class.batch_decode(completion_ids, skip_special_tokens=True)
|
||
if is_conversational(inputs[0]):
|
||
completions = []
|
||
for prompt, completion in zip(prompts, completions_text):
|
||
bootstrap = prompt.pop()["content"] if prompt[-1]["role"] == "assistant" else ""
|
||
completions.append([{"role": "assistant", "content": bootstrap + completion}])
|
||
else:
|
||
completions = completions_text
|
||
|
||
rewards_per_func = torch.zeros(len(prompts), len(self.reward_funcs), device=device)
|
||
|
||
# Repeat all input columns (but "prompt", "completion", and "completion_ids") to match the num of generations
|
||
keys = [key for key in inputs[0] if key not in ["prompt", "completion", "completion_ids"]]
|
||
reward_kwargs = {key: [example[key] for example in inputs] for key in keys}
|
||
|
||
for i, (reward_func, reward_processing_class, reward_func_name) in enumerate(
|
||
zip(self.reward_funcs, self.reward_processing_classes, self.reward_func_names)
|
||
):
|
||
with profiling_context(self, reward_func_name):
|
||
if isinstance(reward_func, nn.Module): # Module (no PretrainedModel) for compat with compiled models
|
||
if is_conversational(inputs[0]):
|
||
messages = [{"messages": p + c} for p, c in zip(prompts, completions)]
|
||
texts = [apply_chat_template(x, reward_processing_class)["text"] for x in messages]
|
||
else:
|
||
texts = [p + c for p, c in zip(prompts, completions)]
|
||
reward_inputs = reward_processing_class(
|
||
text=texts, return_tensors="pt", padding=True, padding_side="right", add_special_tokens=False
|
||
)
|
||
reward_inputs = super()._prepare_inputs(reward_inputs)
|
||
with torch.inference_mode():
|
||
rewards_per_func[:, i] = reward_func(**reward_inputs).logits[:, 0] # Shape (B*G,)
|
||
else:
|
||
output_reward_func = reward_func(
|
||
prompts=prompts, completions=completions, completion_ids=completion_ids_list, **reward_kwargs
|
||
)
|
||
# Convert None values to NaN
|
||
output_reward_func = [reward if reward is not None else torch.nan for reward in output_reward_func]
|
||
|
||
rewards_per_func[:, i] = torch.tensor(output_reward_func, dtype=torch.float32, device=device)
|
||
|
||
# If all reward functions return None for a given row, issue a detailed warning
|
||
if torch.isnan(rewards_per_func).all(dim=1).any():
|
||
nan_row_idx = torch.isnan(rewards_per_func).all(dim=1).nonzero(as_tuple=True)[0][0]
|
||
row_reward_kwargs = {key: value[nan_row_idx] for key, value in reward_kwargs.items()}
|
||
row_reward_kwargs["prompt"] = prompts[nan_row_idx]
|
||
row_reward_kwargs["completion"] = completions[nan_row_idx]
|
||
warnings.warn(
|
||
f"All reward functions returned None for the following kwargs: {row_reward_kwargs}. "
|
||
"Please ensure that at least one reward function returns a valid reward."
|
||
)
|
||
|
||
# Gather the reward per function: this part is crucial, because the rewards are normalized per group and the
|
||
# completions may be distributed across processes
|
||
rewards_per_func = gather(rewards_per_func)
|
||
|
||
# Apply weights to each reward function's output and sum
|
||
rewards = (rewards_per_func * self.reward_weights.to(device).unsqueeze(0)).nansum(dim=1)
|
||
|
||
# Compute grouped-wise rewards
|
||
mean_grouped_rewards = rewards.view(-1, self.num_generations).mean(dim=1)
|
||
std_grouped_rewards = rewards.view(-1, self.num_generations).std(dim=1)
|
||
is_std_zero = torch.isclose(std_grouped_rewards, torch.zeros_like(std_grouped_rewards))
|
||
|
||
# Normalize the rewards to compute the advantages
|
||
mean_grouped_rewards = mean_grouped_rewards.repeat_interleave(self.num_generations, dim=0)
|
||
std_grouped_rewards = std_grouped_rewards.repeat_interleave(self.num_generations, dim=0)
|
||
advantages = rewards - mean_grouped_rewards
|
||
if self.scale_rewards:
|
||
advantages = advantages / (std_grouped_rewards + 1e-4)
|
||
|
||
# Slice to keep only the local part of the data
|
||
process_slice = slice(
|
||
self.accelerator.process_index * len(prompts),
|
||
(self.accelerator.process_index + 1) * len(prompts),
|
||
)
|
||
all_process_advantages = advantages.clone() # keep the aggregated advantages for logging
|
||
advantages = advantages[process_slice]
|
||
|
||
# Log the metrics
|
||
if mode == "train":
|
||
self.state.num_input_tokens_seen += self.accelerator.gather(attention_mask.sum()).sum().item()
|
||
self._metrics[mode]["num_tokens"] = [self.state.num_input_tokens_seen]
|
||
|
||
# Log completion lengths, mean, min, max
|
||
agg_completion_lengths = self.accelerator.gather(completion_lengths)
|
||
self._metrics[mode]["completions/mean_length"].append(agg_completion_lengths.float().mean().item())
|
||
self._metrics[mode]["completions/min_length"].append(agg_completion_lengths.float().min().item())
|
||
self._metrics[mode]["completions/max_length"].append(agg_completion_lengths.float().max().item())
|
||
|
||
# Identify sequences that terminated with EOS and log their lengths
|
||
agg_terminated_with_eos = self.accelerator.gather(is_eos.any(dim=1))
|
||
term_completion_lengths = agg_completion_lengths[agg_terminated_with_eos]
|
||
clipped_completions_ratio = 1 - len(term_completion_lengths) / len(agg_completion_lengths)
|
||
self._metrics[mode]["completions/clipped_ratio"].append(clipped_completions_ratio)
|
||
if len(term_completion_lengths) == 0: # edge case where no terminated sequences are found
|
||
term_completion_lengths = torch.zeros(1, device=device)
|
||
self._metrics[mode]["completions/mean_terminated_length"].append(term_completion_lengths.float().mean().item())
|
||
self._metrics[mode]["completions/min_terminated_length"].append(term_completion_lengths.float().min().item())
|
||
self._metrics[mode]["completions/max_terminated_length"].append(term_completion_lengths.float().max().item())
|
||
|
||
# Calculate mean reward per function, but only for samples where the function was applied (non-NaN values)
|
||
for i, reward_func_name in enumerate(self.reward_func_names):
|
||
mean_rewards = torch.nanmean(rewards_per_func[:, i]).item()
|
||
self._metrics[mode][f"rewards/{reward_func_name}/mean"].append(mean_rewards)
|
||
std_rewards = nanstd(rewards_per_func[:, i]).item()
|
||
self._metrics[mode][f"rewards/{reward_func_name}/std"].append(std_rewards)
|
||
self._metrics[mode]["reward"].append(mean_grouped_rewards.mean().item())
|
||
self._metrics[mode]["reward_std"].append(std_grouped_rewards.mean().item())
|
||
self._metrics[mode]["frac_reward_zero_std"].append(is_std_zero.float().mean().item())
|
||
|
||
# Log prompt and completion texts
|
||
self._textual_logs["prompt"].extend(gather_object(prompts_text))
|
||
self._textual_logs["completion"].extend(gather_object(completions_text))
|
||
for i, name in enumerate(self.reward_func_names):
|
||
self._textual_logs["rewards"][name].extend(rewards_per_func[:, i].tolist())
|
||
self._textual_logs["advantages"].extend(all_process_advantages.tolist())
|
||
|
||
return {
|
||
"prompt_ids": prompt_ids,
|
||
"prompt_mask": prompt_mask,
|
||
"completion_ids": completion_ids,
|
||
"completion_mask": completion_mask,
|
||
"advantages": advantages,
|
||
"old_per_token_logps": old_per_token_logps,
|
||
}
|
||
|
||
def compute_liger_loss(self, unwrapped_model, inputs):
|
||
# Compute the per-token log probabilities for the model
|
||
prompt_ids, prompt_mask = inputs["prompt_ids"], inputs["prompt_mask"]
|
||
completion_ids, completion_mask = inputs["completion_ids"], inputs["completion_mask"]
|
||
input_ids = torch.cat([prompt_ids, completion_ids], dim=1)
|
||
attention_mask = torch.cat([prompt_mask, completion_mask], dim=1)
|
||
logits_to_keep = completion_ids.size(1) # we only need to compute the logits for the completion tokens
|
||
|
||
# Compute the KL divergence between the model and the reference model
|
||
ref_per_token_logps = None
|
||
if self.beta != 0.0:
|
||
with torch.no_grad():
|
||
if self.ref_model is not None:
|
||
ref_per_token_logps = self._get_per_token_logps(
|
||
self.ref_model, input_ids, attention_mask, logits_to_keep
|
||
)
|
||
else:
|
||
with self.accelerator.unwrap_model(self.model).disable_adapter():
|
||
ref_per_token_logps = self._get_per_token_logps(
|
||
self.model, input_ids, attention_mask, logits_to_keep
|
||
)
|
||
|
||
# get the last hidden state of the model
|
||
last_hidden_state = self._get_last_hidden_state(unwrapped_model, input_ids, attention_mask, logits_to_keep)
|
||
|
||
# compute loss and metrics using liger grpo loss
|
||
loss, metrics = self.liger_grpo_loss(
|
||
_input=last_hidden_state,
|
||
lin_weight=unwrapped_model.lm_head.weight,
|
||
selected_token_ids=completion_ids,
|
||
attention_mask=completion_mask,
|
||
advantages=inputs["advantages"],
|
||
bias=unwrapped_model.lm_head.bias,
|
||
old_per_token_logps=inputs["old_per_token_logps"],
|
||
ref_per_token_logps=ref_per_token_logps,
|
||
)
|
||
# Extract metrics from the liger_grpo_loss output
|
||
# KL divergence is the first metric when beta is non-zero
|
||
mean_kl = metrics[0] if self.beta != 0.0 else None
|
||
clip_ratio = metrics[-1]
|
||
|
||
mode = "train" if self.model.training else "eval"
|
||
if self.beta != 0.0:
|
||
self._metrics[mode]["kl"].append(self.accelerator.gather(mean_kl).mean().item())
|
||
self._metrics[mode]["clip_ratio"].append(self.accelerator.gather(clip_ratio).mean().item())
|
||
return loss
|
||
|
||
def compute_loss(self, model, inputs, return_outputs = False, num_items_in_batch = None):
|
||
if return_outputs:
|
||
raise ValueError("The GRPOTrainer does not support returning outputs")
|
||
# Compute the per-token log probabilities for the model
|
||
|
||
prompt_ids, prompt_mask = inputs["prompt_ids"], inputs["prompt_mask"]
|
||
completion_ids, completion_mask = inputs["completion_ids"], inputs["completion_mask"]
|
||
input_ids = torch.cat([prompt_ids, completion_ids], dim=1)
|
||
bsz, qlen = input_ids.shape
|
||
attention_mask = torch.cat([prompt_mask, completion_mask], dim=1)
|
||
# attention_mask = None
|
||
logits_to_keep = completion_ids.size(1) # we only need to compute the logits for the completion tokens
|
||
_input_ids = input_ids
|
||
_logits_to_keep = logits_to_keep
|
||
|
||
per_token_logps = self._get_per_token_logps(model, input_ids, attention_mask, logits_to_keep)
|
||
|
||
# Compute the KL divergence between the model and the reference model
|
||
# _prepare_inputs doesn't return reference log probs anymore. We need to calculate it ourselves.
|
||
# https://github.com/huggingface/trl/blob/05bc43e960396581e458195b8388efe6b82cae1f/trl/trainer/grpo_trainer.py#L1328
|
||
if self.beta != 0.0:
|
||
with torch.inference_mode(), model.disable_adapter():
|
||
ref_per_token_logps = self._get_per_token_logps(model, input_ids, attention_mask, logits_to_keep)
|
||
else:
|
||
ref_per_token_logps = None
|
||
# per_token_kl = torch.exp(ref_per_token_logps - per_token_logps) - (ref_per_token_logps - per_token_logps) - 1
|
||
|
||
# x - x.detach() allows for preserving gradients from x
|
||
advantages = inputs["advantages"]
|
||
# per_token_loss = torch.exp(per_token_logps - per_token_logps.detach()) * advantages.unsqueeze(1)
|
||
# per_token_loss = -(per_token_loss - self.beta * per_token_kl)
|
||
# loss = ((per_token_loss * completion_mask).sum(dim=1) / completion_mask.sum(dim=1)).mean()
|
||
if "old_per_token_logps" in inputs.keys():
|
||
old_hidden_states = inputs["old_per_token_logps"]
|
||
else:
|
||
old_hidden_states = None
|
||
input_ids = input_ids[:, -logits_to_keep:]
|
||
if per_token_logps is not None:
|
||
loss, completion_length, mean_kl = grpo_compute_loss_slow(
|
||
ref_per_token_logps, per_token_logps, old_hidden_states, input_ids, completion_mask, self.beta, advantages,
|
||
loss_type = self.args.loss_type,
|
||
epsilon_low = self.epsilon_low, epsilon_high = self.epsilon_high,
|
||
max_completion_length = self.args.max_completion_length,
|
||
delta = self.args.delta,
|
||
)
|
||
else:
|
||
if hasattr(self.args, "loss_type"):
|
||
loss, completion_length, mean_kl = grpo_accumulated_loss(
|
||
self, _input_ids, logits_to_keep, completion_mask, advantages, old_hidden_states,
|
||
n_chunks = self.args.unsloth_num_chunks,
|
||
loss_type = self.args.loss_type,
|
||
epsilon_low = self.epsilon_low, epsilon_high = self.epsilon_high,
|
||
max_completion_length = self.args.max_completion_length,
|
||
delta = self.args.delta,
|
||
)
|
||
else:
|
||
# to ensure backwards compatibility with trl 0.15.2 and maybe even 0.17
|
||
loss, completion_length, mean_kl = grpo_accumulated_loss(
|
||
self, _input_ids, logits_to_keep, completion_mask, advantages, old_hidden_states,
|
||
n_chunks = self.args.unsloth_num_chunks,
|
||
)
|
||
|
||
# Log the metrics
|
||
# completion_length = self.accelerator.gather_for_metrics(completion_mask.sum(1)).float().mean().item()
|
||
|
||
# mean_kl = ((per_token_kl * completion_mask).sum(dim=1) / completion_mask.sum(dim=1)).mean()
|
||
# self._metrics["kl"].append(self.accelerator.gather_for_metrics(mean_kl).mean().item())
|
||
|
||
if "train" in self._metrics:
|
||
mode = "eval" if self.control.should_evaluate else "train"
|
||
self._metrics[mode]["completion_length"].append(completion_length.item())
|
||
self._metrics[mode]["kl"].append(mean_kl.item())
|
||
else:
|
||
self._metrics["completion_length"].append(completion_length.item())
|
||
self._metrics["kl"].append(mean_kl.item())
|
||
return loss
|
||
|
||
def _compute_loss(self, model, inputs):
|
||
# Compute the per-token log probabilities for the model
|
||
prompt_ids, prompt_mask = inputs["prompt_ids"], inputs["prompt_mask"]
|
||
completion_ids, completion_mask = inputs["completion_ids"], inputs["completion_mask"]
|
||
input_ids = torch.cat([prompt_ids, completion_ids], dim=1)
|
||
attention_mask = torch.cat([prompt_mask, completion_mask], dim=1)
|
||
logits_to_keep = completion_ids.size(1) # we only need to compute the logits for the completion tokens
|
||
|
||
per_token_logps = self._get_per_token_logps(model, input_ids, attention_mask, logits_to_keep)
|
||
|
||
# Compute the KL divergence between the model and the reference model
|
||
if self.beta != 0.0:
|
||
with torch.no_grad():
|
||
if self.ref_model is not None:
|
||
ref_per_token_logps = self._get_per_token_logps(
|
||
self.ref_model, input_ids, attention_mask, logits_to_keep
|
||
)
|
||
else:
|
||
with self.accelerator.unwrap_model(self.model).disable_adapter():
|
||
ref_per_token_logps = self._get_per_token_logps(
|
||
self.model, input_ids, attention_mask, logits_to_keep
|
||
)
|
||
per_token_kl = (
|
||
torch.exp(ref_per_token_logps - per_token_logps) - (ref_per_token_logps - per_token_logps) - 1
|
||
)
|
||
|
||
# Compute the loss
|
||
advantages = inputs["advantages"]
|
||
# When using num_iterations == 1 and steps_per_generation <= gradient_accumulation_steps
|
||
# old_per_token_logps == per_token_logps, so we can skip it's computation
|
||
# (see _generate_and_score_completions) and use per_token_logps.detach() instead.
|
||
old_per_token_logps = (
|
||
per_token_logps.detach() if inputs["old_per_token_logps"] is None else inputs["old_per_token_logps"]
|
||
)
|
||
coef_1 = torch.exp(per_token_logps - old_per_token_logps)
|
||
coef_2 = torch.clamp(coef_1, 1 - self.epsilon_low, 1 + self.epsilon_high)
|
||
|
||
# Two-sided clipping
|
||
if self.args.delta is not None:
|
||
coef_1 = torch.clamp(coef_1, max=self.args.delta)
|
||
|
||
per_token_loss1 = coef_1 * advantages.unsqueeze(1)
|
||
per_token_loss2 = coef_2 * advantages.unsqueeze(1)
|
||
per_token_loss = -torch.min(per_token_loss1, per_token_loss2)
|
||
if self.beta != 0.0:
|
||
per_token_loss = per_token_loss + self.beta * per_token_kl
|
||
|
||
if self.loss_type == "grpo":
|
||
loss = ((per_token_loss * completion_mask).sum(-1) / completion_mask.sum(-1).clamp(min=1.0)).mean()
|
||
elif self.loss_type == "bnpo":
|
||
loss = (per_token_loss * completion_mask).sum() / completion_mask.sum().clamp(min=1.0)
|
||
elif self.loss_type == "dr_grpo":
|
||
loss = (per_token_loss * completion_mask).sum() / (per_token_loss.size(0) * self.max_completion_length)
|
||
else:
|
||
raise ValueError(f"Unknown loss type: {self.loss_type}")
|
||
|
||
# Log the metrics
|
||
mode = "train" if self.model.training else "eval"
|
||
|
||
if self.beta != 0.0:
|
||
mean_kl = (per_token_kl * completion_mask).sum() / completion_mask.sum()
|
||
self._metrics[mode]["kl"].append(self.accelerator.gather(mean_kl).nanmean().item())
|
||
|
||
# Compute the clipped probability ratios
|
||
is_low_clipped = (coef_1 < 1 - self.epsilon_low) & (advantages.unsqueeze(1) < 0)
|
||
is_high_clipped = (coef_1 > 1 + self.epsilon_high) & (advantages.unsqueeze(1) > 0)
|
||
is_region_clipped = is_low_clipped | is_high_clipped
|
||
|
||
low_clip = (is_low_clipped * completion_mask).sum() / completion_mask.sum()
|
||
high_clip = (is_high_clipped * completion_mask).sum() / completion_mask.sum()
|
||
clip_ratio = (is_region_clipped * completion_mask).sum() / completion_mask.sum()
|
||
|
||
gathered_low_clip = self.accelerator.gather(low_clip)
|
||
self._metrics[mode]["clip_ratio/low_mean"].append(gathered_low_clip.nanmean().item())
|
||
self._metrics[mode]["clip_ratio/low_min"].append(nanmin(gathered_low_clip).item())
|
||
gathered_high_clip = self.accelerator.gather(high_clip)
|
||
self._metrics[mode]["clip_ratio/high_mean"].append(gathered_high_clip.nanmean().item())
|
||
self._metrics[mode]["clip_ratio/high_max"].append(nanmax(gathered_high_clip).item())
|
||
gathered_clip_ratio = self.accelerator.gather(clip_ratio)
|
||
self._metrics[mode]["clip_ratio/region_mean"].append(gathered_clip_ratio.nanmean().item())
|
||
return loss
|
||
|
||
def prediction_step(self, model, inputs, prediction_loss_only, ignore_keys: Optional[list[str]] = None):
|
||
inputs = self._prepare_inputs(inputs)
|
||
with torch.no_grad():
|
||
with self.compute_loss_context_manager():
|
||
loss = self.compute_loss(model, inputs)
|
||
loss = loss.mean().detach()
|
||
return loss, None, None
|
||
|
||
def log(self, logs: dict[str, float], start_time: Optional[float] = None) -> None:
|
||
mode = "train" if self.model.training else "eval"
|
||
metrics = {key: sum(val) / len(val) for key, val in self._metrics[mode].items()} # average the metrics
|
||
|
||
# This method can be called both in training and evaluation. When called in evaluation, the keys in `logs`
|
||
# start with "eval_". We need to add the prefix "eval_" to the keys in `metrics` to match the format.
|
||
if mode == "eval":
|
||
metrics = {f"eval_{key}": val for key, val in metrics.items()}
|
||
|
||
logs = {**logs, **metrics}
|
||
if version.parse(transformers.__version__) >= version.parse("4.47.0.dev0"):
|
||
super().log(logs, start_time)
|
||
else: # transformers<=4.46
|
||
super().log(logs)
|
||
self._metrics[mode].clear()
|
||
|
||
if self.accelerator.is_main_process and self.log_completions:
|
||
if is_rich_available():
|
||
print_prompt_completions_sample(
|
||
self._textual_logs["prompt"],
|
||
self._textual_logs["completion"],
|
||
self._textual_logs["rewards"],
|
||
self._textual_logs["advantages"],
|
||
self.state.global_step,
|
||
self.num_completions_to_print,
|
||
)
|
||
|
||
if self.args.report_to and "wandb" in self.args.report_to and wandb.run is not None:
|
||
import pandas as pd
|
||
|
||
table = {
|
||
"step": [str(self.state.global_step)] * len(self._textual_logs["prompt"]),
|
||
"prompt": self._textual_logs["prompt"],
|
||
"completion": self._textual_logs["completion"],
|
||
**self._textual_logs["rewards"],
|
||
"advantage": self._textual_logs["advantages"],
|
||
}
|
||
df = pd.DataFrame(table)
|
||
if self.wandb_log_unique_prompts:
|
||
df = df.drop_duplicates(subset=["prompt"])
|
||
wandb.log({"completions": wandb.Table(dataframe=df)})
|
||
|
||
def create_model_card(
|
||
self,
|
||
model_name: Optional[str] = None,
|
||
dataset_name: Optional[str] = None,
|
||
tags: Union[str, list[str], None] = None,
|
||
):
|
||
"""
|
||
Creates a draft of a model card using the information available to the `Trainer`.
|
||
|
||
Args:
|
||
model_name (`str` or `None`, *optional*, defaults to `None`):
|
||
Name of the model.
|
||
dataset_name (`str` or `None`, *optional*, defaults to `None`):
|
||
Name of the dataset used for training.
|
||
tags (`str`, `list[str]` or `None`, *optional*, defaults to `None`):
|
||
Tags to be associated with the model card.
|
||
"""
|
||
if not self.is_world_process_zero():
|
||
return
|
||
|
||
if hasattr(self.model.config, "_name_or_path") and not os.path.isdir(self.model.config._name_or_path):
|
||
base_model = self.model.config._name_or_path
|
||
else:
|
||
base_model = None
|
||
|
||
tags = tags or []
|
||
if isinstance(tags, str):
|
||
tags = [tags]
|
||
|
||
if hasattr(self.model.config, "unsloth_version"):
|
||
tags.append("unsloth")
|
||
|
||
citation = textwrap.dedent(
|
||
"""\
|
||
@article{zhihong2024deepseekmath,
|
||
title = {{DeepSeekMath: Pushing the Limits of Mathematical Reasoning in Open Language Models}},
|
||
author = {Zhihong Shao and Peiyi Wang and Qihao Zhu and Runxin Xu and Junxiao Song and Mingchuan Zhang and Y. K. Li and Y. Wu and Daya Guo},
|
||
year = 2024,
|
||
eprint = {arXiv:2402.03300},
|
||
}
|
||
"""
|
||
)
|
||
|
||
model_card = generate_model_card(
|
||
base_model=base_model,
|
||
model_name=model_name,
|
||
hub_model_id=self.hub_model_id,
|
||
dataset_name=dataset_name,
|
||
tags=tags,
|
||
wandb_url=wandb.run.get_url() if is_wandb_available() and wandb.run is not None else None,
|
||
comet_url=get_comet_experiment_url(),
|
||
trainer_name="GRPO",
|
||
trainer_citation=citation,
|
||
paper_title="DeepSeekMath: Pushing the Limits of Mathematical Reasoning in Open Language Models",
|
||
paper_id="2402.03300",
|
||
)
|
||
|
||
model_card.save(os.path.join(self.args.output_dir, "README.md"))
|
||
class UnslothGRPOTrainer(_UnslothGRPOTrainer):
|
||
"""
|
||
|
||
Trainer for the Group Relative Policy Optimization (GRPO) method. This algorithm was initially proposed in the
|
||
paper [DeepSeekMath: Pushing the Limits of Mathematical Reasoning in Open Language Models](https://huggingface.co/papers/2402.03300).
|
||
|
||
Example:
|
||
|
||
```python
|
||
from datasets import load_dataset
|
||
from trl import GRPOTrainer
|
||
|
||
dataset = load_dataset("trl-lib/tldr", split="train")
|
||
|
||
def reward_func(completions, **kwargs):
|
||
# Dummy reward function that rewards completions with more unique letters.
|
||
return [float(len(set(completion))) for completion in completions]
|
||
|
||
trainer = GRPOTrainer(
|
||
model="Qwen/Qwen2-0.5B-Instruct",
|
||
reward_funcs=reward_func,
|
||
train_dataset=dataset,
|
||
)
|
||
|
||
trainer.train()
|
||
```
|
||
|
||
Args:
|
||
model (`Union[str, PreTrainedModel]`):
|
||
Model to be trained. Can be either:
|
||
|
||
- A string, being the *model id* of a pretrained model hosted inside a model repo on huggingface.co, or
|
||
a path to a *directory* containing model weights saved using
|
||
[`~transformers.PreTrainedModel.save_pretrained`], e.g., `'./my_model_directory/'`. The model is
|
||
loaded using [`~transformers.AutoModelForCausalLM.from_pretrained`] with the keywork arguments
|
||
in `args.model_init_kwargs`.
|
||
- A [`~transformers.PreTrainedModel`] object. Only causal language models are supported.
|
||
reward_funcs (`Union[RewardFunc, list[RewardFunc]]`):
|
||
Reward functions to be used for computing the rewards. To compute the rewards, we call all the reward
|
||
functions with the prompts and completions and sum the rewards. Can be either:
|
||
|
||
- A single reward function, such as:
|
||
- A string: The *model ID* of a pretrained model hosted inside a model repo on huggingface.co, or a
|
||
path to a *directory* containing model weights saved using
|
||
[`~transformers.PreTrainedModel.save_pretrained`], e.g., `'./my_model_directory/'`. The model is loaded
|
||
using [`~transformers.AutoModelForSequenceClassification.from_pretrained`] with `num_labels=1` and the
|
||
keyword arguments in `args.model_init_kwargs`.
|
||
- A [`~transformers.PreTrainedModel`] object: Only sequence classification models are supported.
|
||
- A custom reward function: The function is provided with the prompts and the generated completions,
|
||
plus any additional columns in the dataset. It should return a list of rewards. Custom reward
|
||
functions can also return None when the reward is not applicable to those samples. This is useful for
|
||
multi-task training where different reward functions apply to different types of samples. When a
|
||
reward function returns None for a sample, that reward function is excluded from the reward
|
||
calculation for that sample. For more details, see
|
||
[Using a custom reward function](#using-a-custom-reward-function).
|
||
- A list of reward functions, where each item can independently be any of the above types. Mixing different
|
||
types within the list (e.g., a string model ID and a custom reward function) is allowed.
|
||
args ([`GRPOConfig`], *optional*, defaults to `None`):
|
||
Configuration for this trainer. If `None`, a default configuration is used.
|
||
train_dataset ([`~datasets.Dataset`] or [`~datasets.IterableDataset`]):
|
||
Dataset to use for training. It must include a column `"prompt"`. Any additional columns in the dataset is
|
||
ignored. The format of the samples can be either:
|
||
|
||
- [Standard](dataset_formats#standard): Each sample contains plain text.
|
||
- [Conversational](dataset_formats#conversational): Each sample contains structured messages (e.g., role
|
||
and content).
|
||
eval_dataset ([`~datasets.Dataset`], [`~datasets.IterableDataset`] or `dict[str, Union[Dataset, IterableDataset]]`):
|
||
Dataset to use for evaluation. It must meet the same requirements as `train_dataset`.
|
||
processing_class ([`~transformers.PreTrainedTokenizerBase`], *optional*, defaults to `None`):
|
||
Processing class used to process the data. The padding side must be set to "left". If `None`, the
|
||
processing class is loaded from the model's name with [`~transformers.AutoTokenizer.from_pretrained`]. A
|
||
padding token, `processing_class.pad_token`, must be set. If the processing class has not set a padding
|
||
token, `processing_class.eos_token` will be used as the default.
|
||
reward_processing_classes (`Union[PreTrainedTokenizerBase, list[PreTrainedTokenizerBase]]`, *optional*, defaults to `None`):
|
||
Processing classes corresponding to the reward functions specified in `reward_funcs`. Can be either:
|
||
|
||
- A single processing class: Used when `reward_funcs` contains only one reward function.
|
||
- A list of processing classes: Must match the order and length of the reward functions in `reward_funcs`.
|
||
If set to `None`, or if an element of the list corresponding to a [`~transformers.PreTrainedModel`] is
|
||
`None`, the tokenizer for the model is automatically loaded using [`~transformers.AutoTokenizer.from_pretrained`].
|
||
For elements in `reward_funcs` that are custom reward functions (not [`~transformers.PreTrainedModel`]),
|
||
the corresponding entries in `reward_processing_classes` are ignored.
|
||
callbacks (list of [`~transformers.TrainerCallback`], *optional*, defaults to `None`):
|
||
List of callbacks to customize the training loop. Will add those to the list of default callbacks
|
||
detailed in [here](https://huggingface.co/docs/transformers/main_classes/callback).
|
||
|
||
If you want to remove one of the default callbacks used, use the [`~transformers.Trainer.remove_callback`]
|
||
method.
|
||
optimizers (`tuple[torch.optim.Optimizer, torch.optim.lr_scheduler.LambdaLR]`, *optional*, defaults to `(None, None)`):
|
||
A tuple containing the optimizer and the scheduler to use. Will default to an instance of [`AdamW`] on your
|
||
model and a scheduler given by [`get_linear_schedule_with_warmup`] controlled by `args`.
|
||
peft_config ([`~peft.PeftConfig`], *optional*, defaults to `None`):
|
||
PEFT configuration used to wrap the model. If `None`, the model is not wrapped.
|
||
|
||
"""
|
||
def __init__(
|
||
self,
|
||
model,
|
||
reward_funcs,
|
||
args = None,
|
||
train_dataset = None,
|
||
eval_dataset = None,
|
||
processing_class = None,
|
||
reward_processing_classes = None,
|
||
callbacks = None,
|
||
peft_config = None,
|
||
**kwargs
|
||
):
|
||
if args is None: args = UnslothGRPOConfig()
|
||
use_bf16 = getattr(args, 'bf16', False)
|
||
use_fp16 = getattr(args, 'fp16', False)
|
||
force_float32 = False
|
||
if os.environ.get('UNSLOTH_FORCE_FLOAT32', '0') == '1':
|
||
print('Unsloth: Switching to float32 training since model cannot work with float16')
|
||
force_float32 = True
|
||
mixed_precision_dtype = os.environ.get('UNSLOTH_MIXED_PRECISION', 'float32')
|
||
dtype = getattr(model.config, 'torch_dtype', None)
|
||
if dtype is None: dtype = model.get_input_embeddings().dtype
|
||
from unsloth_zoo.utils import _get_dtype
|
||
dtype = _get_dtype(dtype)
|
||
float16 = dtype == torch.float16
|
||
if not force_float32 and (float16 and use_bf16): raise TypeError('Unsloth: Model is in float16 precision but you want to use bfloat16 precision. Set fp16 to `True` and bf16 to `False`')
|
||
if not force_float32 and (not float16 and use_fp16): raise TypeError('Unsloth: Model is in bfloat16 precision but you want to use float16 precision. Set fp16 to `False` and bf16 to `True`')
|
||
if force_float32:
|
||
args.fp16 = False
|
||
args.bf16 = False
|
||
os.environ['ACCELERATE_MIXED_PRECISION'] = 'no'
|
||
elif (not use_bf16 and not use_fp16) and mixed_precision_dtype == 'float32':
|
||
args.fp16 = float16
|
||
args.bf16 = not float16
|
||
os.environ['ACCELERATE_MIXED_PRECISION'] = 'fp16' if float16 else 'bf16'
|
||
if getattr(args, 'eval_dataset', None) is not None and getattr(args, 'eval_strategy', 'no') == 'no':
|
||
args.eval_strategy = 'steps'
|
||
if getattr(args, 'eval_steps', None) is None: args.eval_steps = 0.1
|
||
ga_steps = getattr(args, 'gradient_accumulation_steps', None)
|
||
if ga_steps is not None and ga_steps > 1:
|
||
from transformers import __version__ as transformers_version
|
||
if Version(transformers_version) <= Version('4.45.2'):
|
||
print('**** Unsloth: Please use our fixed gradient_accumulation_steps by updating transformers, TRL and Unsloth!\n'
|
||
'`pip install --upgrade --no-cache-dir --force-reinstall --no-deps unsloth transformers trl unsloth_zoo`')
|
||
if getattr(args, 'eval_strategy', 'no') != 'no':
|
||
eval_bsz = getattr(args, 'per_device_eval_batch_size', 8)
|
||
if eval_bsz == 8 and args.per_device_train_batch_size < eval_bsz: args.per_device_eval_batch_size = args.per_device_train_batch_size
|
||
if getattr(args, 'eval_accumulation_steps', None) is None and ga_steps is not None: args.eval_accumulation_steps = ga_steps
|
||
fp16_full_eval = getattr(args, 'fp16_full_eval', False)
|
||
bf16_full_eval = getattr(args, 'bf16_full_eval', False)
|
||
if args.fp16 and bf16_full_eval: args.bf16_full_eval = False; args.fp16_full_eval = True
|
||
if args.bf16 and fp16_full_eval: args.bf16_full_eval = True; args.fp16_full_eval = False
|
||
if force_float32:
|
||
args.bf16_full_eval = False
|
||
args.fp16_full_eval = False
|
||
elif os.environ.get('UNSLOTH_MIXED_PRECISION', 'float32') == 'bfloat16':
|
||
args.bf16_full_eval = True
|
||
args.fp16_full_eval = False
|
||
elif not bf16_full_eval and not fp16_full_eval:
|
||
args.bf16_full_eval = args.bf16
|
||
args.fp16_full_eval = args.fp16
|
||
_output_logits = False
|
||
if locals().get('compute_metrics', None) is not None: _output_logits = True
|
||
if locals().get('preprocess_logits_for_metrics', None) is not None: _output_logits = True
|
||
if _output_logits:
|
||
os.environ['UNSLOTH_RETURN_LOGITS'] = '1'
|
||
if 'max_seq_length' not in locals() and not hasattr(args, 'max_seq_length'):
|
||
pass
|
||
else:
|
||
model_max_seq_length = getattr(model, 'max_seq_length', None)
|
||
args_max_seq_length = getattr(args, 'max_seq_length', None)
|
||
if args_max_seq_length is None and model_max_seq_length is not None:
|
||
max_seq_length = model.max_seq_length
|
||
if hasattr(args, 'max_seq_length'): args.max_seq_length = max_seq_length
|
||
if model is not None and hasattr(model, 'for_training'):
|
||
model.for_training()
|
||
if 'tokenizer' in locals() and hasattr(tokenizer, 'padding_side'): tokenizer.padding_side = 'right'
|
||
if 'processing_class' in locals():
|
||
if hasattr(processing_class, 'padding_side'): processing_class.padding_side = 'right'
|
||
if hasattr(processing_class, 'tokenizer') and hasattr(processing_class.tokenizer, 'padding_side'): processing_class.tokenizer.padding_side = 'right'
|
||
other_metrics = []
|
||
if not isinstance(reward_funcs, list): _reward_funcs = [reward_funcs]
|
||
else: _reward_funcs = reward_funcs
|
||
for reward_func in _reward_funcs:
|
||
try:
|
||
reward_func_name = reward_func.__name__
|
||
if True:
|
||
other_metrics.append(f'rewards/{reward_func_name}/mean')
|
||
if True:
|
||
other_metrics.append(f'rewards/{reward_func_name}/std')
|
||
if False:
|
||
other_metrics.append(f'rewards/{reward_func_name}')
|
||
except: pass
|
||
|
||
from unsloth_zoo.logging_utils import PatchRLStatistics
|
||
PatchRLStatistics('grpo_trainer', other_metrics)
|
||
|
||
super().__init__(
|
||
model = model,
|
||
reward_funcs = reward_funcs,
|
||
args = args,
|
||
train_dataset = train_dataset,
|
||
eval_dataset = eval_dataset,
|
||
processing_class = processing_class,
|
||
reward_processing_classes = reward_processing_classes,
|
||
callbacks = callbacks,
|
||
peft_config = peft_config,**kwargs)
|
||
if hasattr(self, 'neftune_hook_handle'):
|
||
self.neftune_hook_handle.remove()
|
||
if hasattr(self, 'neftune_hook_handle'): del self.neftune_hook_handle
|
||
if getattr(args, 'neftune_noise_alpha', None) is not None:
|
||
model.get_input_embeddings().neftune_noise_alpha = self.neftune_noise_alpha
|
||
pass
|
||
|
||
pass
|