1555 lines
74 KiB
Python
1555 lines
74 KiB
Python
"""
|
|
2025.6.1
|
|
2025.6.2
|
|
4.52.4
|
|
0.18.2
|
|
__UNSLOTH_VERSIONING__
|
|
"""
|
|
from torch import Tensor
|
|
import torch
|
|
import torch.nn as nn
|
|
from torch.nn import functional as F
|
|
from trl.trainer.cpo_trainer import (Any, AutoModelForCausalLM, BaseImageProcessor, CPOConfig, CPOTrainer, Callable, DPODataCollatorWithPadding, DataCollator, DataLoader, Dataset, EvalLoopOutput, F, FeatureExtractionMixin, Literal, Optional, PartialState, PeftModel, PreTrainedModel, PreTrainedTokenizerBase, ProcessorMixin, Trainer, TrainerCallback, Union, add_bos_token_if_needed, add_eos_token_if_needed, amp, defaultdict, disable_dropout_in_model, generate_model_card, get_comet_experiment_url, inspect, is_comet_available, is_peft_available, is_torch_fx_proxy, is_wandb_available, log_table_to_comet_experiment, maybe_apply_chat_template, maybe_extract_prompt, nn, np, nullcontext, os, pad_to_length, pd, peft_module_casting_to_bf16, prepare_model_for_kbit_training, random, textwrap, torch, transformers, version, warnings)
|
|
|
|
|
|
import os
|
|
from typing import *
|
|
from dataclasses import dataclass, field
|
|
from packaging.version import Version
|
|
import torch
|
|
import numpy as np
|
|
from contextlib import nullcontext
|
|
from torch.nn import functional as F
|
|
from transformers import DataCollatorForSeq2Seq, DataCollatorForLanguageModeling as TransformersDataCollatorForLanguageModeling
|
|
|
|
torch_compile_options = {
|
|
"epilogue_fusion" : True,
|
|
"max_autotune" : False,
|
|
"shape_padding" : True,
|
|
"trace.enabled" : False,
|
|
"triton.cudagraphs" : False,
|
|
}
|
|
|
|
@torch.compile(dynamic = True, fullgraph = True, options = torch_compile_options,)
|
|
def selective_log_softmax(logits, index):
|
|
logits = logits.to(torch.float32)
|
|
selected_logits = torch.gather(logits, dim = -1, index = index.unsqueeze(-1)).squeeze(-1)
|
|
# loop to reduce peak mem consumption
|
|
# logsumexp_values = torch.stack([torch.logsumexp(lg, dim=-1) for lg in logits])
|
|
logsumexp_values = torch.logsumexp(logits, dim = -1)
|
|
per_token_logps = selected_logits - logsumexp_values # log_softmax(x_i) = x_i - logsumexp(x)
|
|
return per_token_logps
|
|
@dataclass
|
|
class UnslothCPOConfig(CPOConfig):
|
|
"""
|
|
|
|
Configuration class for the [`CPOTrainer`].
|
|
|
|
Using [`~transformers.HfArgumentParser`] we can turn this class into
|
|
[argparse](https://docs.python.org/3/library/argparse#module-argparse) arguments that can be specified on the
|
|
command line.
|
|
|
|
Parameters:
|
|
learning_rate (`float`, *optional*, defaults to `1e-6`):
|
|
Initial learning rate for [`AdamW`] optimizer. The default value replaces that of
|
|
[`~transformers.TrainingArguments`].
|
|
max_length (`int` or `None`, *optional*, defaults to `1024`):
|
|
Maximum length of the sequences (prompt + completion) in the batch. This argument is required if you want
|
|
to use the default data collator.
|
|
max_prompt_length (`int` or `None`, *optional*, defaults to `512`):
|
|
Maximum length of the prompt. This argument is required if you want to use the default data collator.
|
|
max_completion_length (`int` or `None`, *optional*, defaults to `None`):
|
|
Maximum length of the completion. This argument is required if you want to use the default data collator
|
|
and your model is an encoder-decoder.
|
|
beta (`float`, *optional*, defaults to `0.1`):
|
|
Parameter controlling the deviation from the reference model. Higher β means less deviation from the
|
|
reference model. For the IPO loss (`loss_type="ipo"`), β is the regularization parameter denoted by τ in
|
|
the [paper](https://huggingface.co/papers/2310.12036).
|
|
label_smoothing (`float`, *optional*, defaults to `0.0`):
|
|
Label smoothing factor. This argument is required if you want to use the default data collator.
|
|
loss_type (`str`, *optional*, defaults to `"sigmoid"`):
|
|
Type of loss to use. Possible values are:
|
|
|
|
- `"sigmoid"`: sigmoid loss from the original [DPO](https://huggingface.co/papers/2305.18290) paper.
|
|
- `"hinge"`: hinge loss on the normalized likelihood from the [SLiC](https://huggingface.co/papers/2305.10425) paper.
|
|
- `"ipo"`: IPO loss from the [IPO](https://huggingface.co/papers/2310.12036) paper.
|
|
- `"simpo"`: SimPO loss from the [SimPO](https://huggingface.co/papers/2405.14734) paper.
|
|
|
|
disable_dropout (`bool`, *optional*, defaults to `True`):
|
|
Whether to disable dropout in the model.
|
|
cpo_alpha (`float`, *optional*, defaults to `1.0`):
|
|
Weight of the BC regularizer in CPO training.
|
|
simpo_gamma (`float`, *optional*, defaults to `0.5`):
|
|
Target reward margin for the SimPO loss, used only when the `loss_type="simpo"`.
|
|
label_pad_token_id (`int`, *optional*, defaults to `-100`):
|
|
Label pad token id. This argument is required if you want to use the default data collator.
|
|
padding_value (`int` or `None`, *optional*, defaults to `None`):
|
|
Padding value to use. If `None`, the padding value of the tokenizer is used.
|
|
truncation_mode (`str`,*optional*, defaults to `"keep_end"`):
|
|
Truncation mode to use when the prompt is too long. Possible values are `"keep_end"` or `"keep_start"`.
|
|
This argument is required if you want to use the default data collator.
|
|
generate_during_eval (`bool`, *optional*, defaults to `False`):
|
|
If `True`, generates and logs completions from the model to W&B or Comet during evaluation.
|
|
is_encoder_decoder (`bool` or `None`, *optional*, defaults to `None`):
|
|
When using the `model_init` argument (callable) to instantiate the model instead of the `model` argument,
|
|
you need to specify if the model returned by the callable is an encoder-decoder model.
|
|
model_init_kwargs (`dict[str, Any]` or `None`, *optional*, defaults to `None`):
|
|
Keyword arguments to pass to `AutoModelForCausalLM.from_pretrained` when instantiating the model from a
|
|
string.
|
|
dataset_num_proc (`int` or `None`, *optional*, defaults to `None`):
|
|
Number of processes to use for processing the dataset.
|
|
|
|
"""
|
|
vllm_sampling_params: Optional[Any] = field(
|
|
default = None,
|
|
metadata = {'help': 'vLLM SamplingParams'},
|
|
)
|
|
unsloth_num_chunks : Optional[int] = field(
|
|
default = -1,
|
|
metadata = {'help': 'Chunk size to reduce memory usage. -1 is most efficient.'},
|
|
)
|
|
def __init__(
|
|
self,
|
|
output_dir = None,
|
|
overwrite_output_dir = None,
|
|
do_train = False,
|
|
do_eval = False,
|
|
do_predict = False,
|
|
eval_strategy = 'no',
|
|
prediction_loss_only = False,
|
|
per_device_train_batch_size = 4,
|
|
per_device_eval_batch_size = 4,
|
|
per_gpu_train_batch_size = None,
|
|
per_gpu_eval_batch_size = None,
|
|
gradient_accumulation_steps = 2,
|
|
eval_accumulation_steps = 2,
|
|
eval_delay = 0,
|
|
torch_empty_cache_steps = 250,
|
|
learning_rate = 5e-05,
|
|
weight_decay = 0.01,
|
|
adam_beta1 = 0.9,
|
|
adam_beta2 = 0.999,
|
|
adam_epsilon = 1e-08,
|
|
max_grad_norm = 1.0,
|
|
num_train_epochs = 3.0,
|
|
max_steps = -1,
|
|
lr_scheduler_type = 'linear',
|
|
warmup_ratio = 0.1,
|
|
warmup_steps = 0,
|
|
log_level = 'passive',
|
|
log_level_replica = 'warning',
|
|
log_on_each_node = True,
|
|
logging_dir = None,
|
|
logging_strategy = 'steps',
|
|
logging_first_step = False,
|
|
logging_steps = 1,
|
|
logging_nan_inf_filter = False,
|
|
save_strategy = 'steps',
|
|
save_steps = 500,
|
|
save_total_limit = None,
|
|
save_safetensors = True,
|
|
save_on_each_node = False,
|
|
save_only_model = False,
|
|
restore_callback_states_from_checkpoint = False,
|
|
no_cuda = False,
|
|
use_cpu = False,
|
|
use_mps_device = False,
|
|
seed = 3407,
|
|
data_seed = 3407,
|
|
jit_mode_eval = False,
|
|
use_ipex = False,
|
|
bf16 = False,
|
|
fp16 = False,
|
|
fp16_opt_level = 'O1',
|
|
half_precision_backend = 'auto',
|
|
bf16_full_eval = False,
|
|
fp16_full_eval = False,
|
|
tf32 = None,
|
|
local_rank = -1,
|
|
ddp_backend = None,
|
|
tpu_num_cores = None,
|
|
tpu_metrics_debug = False,
|
|
debug = '',
|
|
dataloader_drop_last = False,
|
|
eval_steps = None,
|
|
dataloader_num_workers = 0,
|
|
dataloader_prefetch_factor = None,
|
|
past_index = -1,
|
|
run_name = None,
|
|
disable_tqdm = None,
|
|
remove_unused_columns = True,
|
|
label_names = None,
|
|
load_best_model_at_end = False,
|
|
metric_for_best_model = None,
|
|
greater_is_better = None,
|
|
ignore_data_skip = False,
|
|
fsdp = '',
|
|
fsdp_min_num_params = 0,
|
|
fsdp_config = None,
|
|
fsdp_transformer_layer_cls_to_wrap = None,
|
|
accelerator_config = None,
|
|
deepspeed = None,
|
|
label_smoothing_factor = 0.0,
|
|
optim = 'adamw_8bit',
|
|
optim_args = None,
|
|
adafactor = False,
|
|
group_by_length = False,
|
|
length_column_name = 'length',
|
|
report_to = None,
|
|
ddp_find_unused_parameters = None,
|
|
ddp_bucket_cap_mb = None,
|
|
ddp_broadcast_buffers = None,
|
|
dataloader_pin_memory = True,
|
|
dataloader_persistent_workers = False,
|
|
skip_memory_metrics = True,
|
|
use_legacy_prediction_loop = False,
|
|
push_to_hub = False,
|
|
resume_from_checkpoint = None,
|
|
hub_model_id = None,
|
|
hub_strategy = 'every_save',
|
|
hub_token = None,
|
|
hub_private_repo = None,
|
|
hub_always_push = False,
|
|
gradient_checkpointing = False,
|
|
gradient_checkpointing_kwargs = None,
|
|
include_inputs_for_metrics = False,
|
|
eval_do_concat_batches = True,
|
|
fp16_backend = 'auto',
|
|
push_to_hub_model_id = None,
|
|
push_to_hub_organization = None,
|
|
push_to_hub_token = None,
|
|
mp_parameters = '',
|
|
auto_find_batch_size = False,
|
|
full_determinism = False,
|
|
torchdynamo = None,
|
|
ray_scope = 'last',
|
|
ddp_timeout = 1800,
|
|
torch_compile = False,
|
|
torch_compile_backend = None,
|
|
torch_compile_mode = None,
|
|
include_tokens_per_second = False,
|
|
include_num_input_tokens_seen = False,
|
|
neftune_noise_alpha = None,
|
|
optim_target_modules = None,
|
|
batch_eval_metrics = False,
|
|
eval_on_start = False,
|
|
use_liger_kernel = False,
|
|
eval_use_gather_object = False,
|
|
average_tokens_across_devices = False,
|
|
max_length = 1024,
|
|
max_prompt_length = 512,
|
|
max_completion_length = None,
|
|
beta = 0.1,
|
|
label_smoothing = 0.0,
|
|
loss_type = 'sigmoid',
|
|
disable_dropout = True,
|
|
cpo_alpha = 1.0,
|
|
simpo_gamma = 0.5,
|
|
label_pad_token_id = -100,
|
|
padding_value = None,
|
|
truncation_mode = 'keep_end',
|
|
generate_during_eval = False,
|
|
is_encoder_decoder = None,
|
|
model_init_kwargs = None,
|
|
dataset_num_proc = None,
|
|
vllm_sampling_params = None,
|
|
unsloth_num_chunks = -1,
|
|
**kwargs,
|
|
):
|
|
if learning_rate < 1e-7: raise FloatingPointError(f'Unsloth: Your learning rate of `{learning_rate}` is too small and less than 1e-7! Consider increasing it, otherwise gradient updates will be close to 0!')
|
|
if learning_rate > 1: raise OverflowError(f'Unsloth: Your learning rate of `{learning_rate}` is way too larger > 1! Consider decreasing it to 1e-1, otherwise gradient updates will explode!')
|
|
if output_dir is None and save_strategy == 'steps' and save_steps == 500:
|
|
output_dir = 'unsloth_training_checkpoints'
|
|
save_strategy = 'no'
|
|
if dataset_num_proc is None:
|
|
from multiprocessing import cpu_count
|
|
dataset_num_proc = cpu_count()
|
|
|
|
super().__init__(
|
|
output_dir = output_dir,
|
|
overwrite_output_dir = overwrite_output_dir,
|
|
do_train = do_train,
|
|
do_eval = do_eval,
|
|
do_predict = do_predict,
|
|
eval_strategy = eval_strategy,
|
|
prediction_loss_only = prediction_loss_only,
|
|
per_device_train_batch_size = per_device_train_batch_size,
|
|
per_device_eval_batch_size = per_device_eval_batch_size,
|
|
per_gpu_train_batch_size = per_gpu_train_batch_size,
|
|
per_gpu_eval_batch_size = per_gpu_eval_batch_size,
|
|
gradient_accumulation_steps = gradient_accumulation_steps,
|
|
eval_accumulation_steps = eval_accumulation_steps,
|
|
eval_delay = eval_delay,
|
|
torch_empty_cache_steps = torch_empty_cache_steps,
|
|
learning_rate = learning_rate,
|
|
weight_decay = weight_decay,
|
|
adam_beta1 = adam_beta1,
|
|
adam_beta2 = adam_beta2,
|
|
adam_epsilon = adam_epsilon,
|
|
max_grad_norm = max_grad_norm,
|
|
num_train_epochs = num_train_epochs,
|
|
max_steps = max_steps,
|
|
lr_scheduler_type = lr_scheduler_type,
|
|
warmup_ratio = warmup_ratio,
|
|
warmup_steps = warmup_steps,
|
|
log_level = log_level,
|
|
log_level_replica = log_level_replica,
|
|
log_on_each_node = log_on_each_node,
|
|
logging_dir = logging_dir,
|
|
logging_strategy = logging_strategy,
|
|
logging_first_step = logging_first_step,
|
|
logging_steps = logging_steps,
|
|
logging_nan_inf_filter = logging_nan_inf_filter,
|
|
save_strategy = save_strategy,
|
|
save_steps = save_steps,
|
|
save_total_limit = save_total_limit,
|
|
save_safetensors = save_safetensors,
|
|
save_on_each_node = save_on_each_node,
|
|
save_only_model = save_only_model,
|
|
restore_callback_states_from_checkpoint = restore_callback_states_from_checkpoint,
|
|
no_cuda = no_cuda,
|
|
use_cpu = use_cpu,
|
|
use_mps_device = use_mps_device,
|
|
seed = seed,
|
|
data_seed = data_seed,
|
|
jit_mode_eval = jit_mode_eval,
|
|
use_ipex = use_ipex,
|
|
bf16 = bf16,
|
|
fp16 = fp16,
|
|
fp16_opt_level = fp16_opt_level,
|
|
half_precision_backend = half_precision_backend,
|
|
bf16_full_eval = bf16_full_eval,
|
|
fp16_full_eval = fp16_full_eval,
|
|
tf32 = tf32,
|
|
local_rank = local_rank,
|
|
ddp_backend = ddp_backend,
|
|
tpu_num_cores = tpu_num_cores,
|
|
tpu_metrics_debug = tpu_metrics_debug,
|
|
debug = debug,
|
|
dataloader_drop_last = dataloader_drop_last,
|
|
eval_steps = eval_steps,
|
|
dataloader_num_workers = dataloader_num_workers,
|
|
dataloader_prefetch_factor = dataloader_prefetch_factor,
|
|
past_index = past_index,
|
|
run_name = run_name,
|
|
disable_tqdm = disable_tqdm,
|
|
remove_unused_columns = remove_unused_columns,
|
|
label_names = label_names,
|
|
load_best_model_at_end = load_best_model_at_end,
|
|
metric_for_best_model = metric_for_best_model,
|
|
greater_is_better = greater_is_better,
|
|
ignore_data_skip = ignore_data_skip,
|
|
fsdp = fsdp,
|
|
fsdp_min_num_params = fsdp_min_num_params,
|
|
fsdp_config = fsdp_config,
|
|
fsdp_transformer_layer_cls_to_wrap = fsdp_transformer_layer_cls_to_wrap,
|
|
accelerator_config = accelerator_config,
|
|
deepspeed = deepspeed,
|
|
label_smoothing_factor = label_smoothing_factor,
|
|
optim = optim,
|
|
optim_args = optim_args,
|
|
adafactor = adafactor,
|
|
group_by_length = group_by_length,
|
|
length_column_name = length_column_name,
|
|
report_to = report_to,
|
|
ddp_find_unused_parameters = ddp_find_unused_parameters,
|
|
ddp_bucket_cap_mb = ddp_bucket_cap_mb,
|
|
ddp_broadcast_buffers = ddp_broadcast_buffers,
|
|
dataloader_pin_memory = dataloader_pin_memory,
|
|
dataloader_persistent_workers = dataloader_persistent_workers,
|
|
skip_memory_metrics = skip_memory_metrics,
|
|
use_legacy_prediction_loop = use_legacy_prediction_loop,
|
|
push_to_hub = push_to_hub,
|
|
resume_from_checkpoint = resume_from_checkpoint,
|
|
hub_model_id = hub_model_id,
|
|
hub_strategy = hub_strategy,
|
|
hub_token = hub_token,
|
|
hub_private_repo = hub_private_repo,
|
|
hub_always_push = hub_always_push,
|
|
gradient_checkpointing = gradient_checkpointing,
|
|
gradient_checkpointing_kwargs = gradient_checkpointing_kwargs,
|
|
include_inputs_for_metrics = include_inputs_for_metrics,
|
|
eval_do_concat_batches = eval_do_concat_batches,
|
|
fp16_backend = fp16_backend,
|
|
push_to_hub_model_id = push_to_hub_model_id,
|
|
push_to_hub_organization = push_to_hub_organization,
|
|
push_to_hub_token = push_to_hub_token,
|
|
mp_parameters = mp_parameters,
|
|
auto_find_batch_size = auto_find_batch_size,
|
|
full_determinism = full_determinism,
|
|
torchdynamo = torchdynamo,
|
|
ray_scope = ray_scope,
|
|
ddp_timeout = ddp_timeout,
|
|
torch_compile = torch_compile,
|
|
torch_compile_backend = torch_compile_backend,
|
|
torch_compile_mode = torch_compile_mode,
|
|
include_tokens_per_second = include_tokens_per_second,
|
|
include_num_input_tokens_seen = include_num_input_tokens_seen,
|
|
neftune_noise_alpha = neftune_noise_alpha,
|
|
optim_target_modules = optim_target_modules,
|
|
batch_eval_metrics = batch_eval_metrics,
|
|
eval_on_start = eval_on_start,
|
|
use_liger_kernel = use_liger_kernel,
|
|
eval_use_gather_object = eval_use_gather_object,
|
|
average_tokens_across_devices = average_tokens_across_devices,
|
|
max_length = max_length,
|
|
max_prompt_length = max_prompt_length,
|
|
max_completion_length = max_completion_length,
|
|
beta = beta,
|
|
label_smoothing = label_smoothing,
|
|
loss_type = loss_type,
|
|
disable_dropout = disable_dropout,
|
|
cpo_alpha = cpo_alpha,
|
|
simpo_gamma = simpo_gamma,
|
|
label_pad_token_id = label_pad_token_id,
|
|
padding_value = padding_value,
|
|
truncation_mode = truncation_mode,
|
|
generate_during_eval = generate_during_eval,
|
|
is_encoder_decoder = is_encoder_decoder,
|
|
model_init_kwargs = model_init_kwargs,
|
|
dataset_num_proc = dataset_num_proc,**kwargs)
|
|
self.vllm_sampling_params = vllm_sampling_params
|
|
self.unsloth_num_chunks = unsloth_num_chunks
|
|
pass
|
|
|
|
class _UnslothCPOTrainer(Trainer):
|
|
r""""""
|
|
|
|
_tag_names = ["trl", "cpo"]
|
|
|
|
def __init__(
|
|
self,
|
|
model: Optional[Union[PreTrainedModel, nn.Module, str]] = None,
|
|
args: Optional[CPOConfig] = None,
|
|
data_collator: Optional[DataCollator] = None,
|
|
train_dataset: Optional[Dataset] = None,
|
|
eval_dataset: Optional[Union[Dataset, dict[str, Dataset]]] = None,
|
|
processing_class: Optional[
|
|
Union[PreTrainedTokenizerBase, BaseImageProcessor, FeatureExtractionMixin, ProcessorMixin]
|
|
] = None,
|
|
model_init: Optional[Callable[[], PreTrainedModel]] = None,
|
|
callbacks: Optional[list[TrainerCallback]] = None,
|
|
optimizers: tuple[torch.optim.Optimizer, torch.optim.lr_scheduler.LambdaLR] = (None, None),
|
|
preprocess_logits_for_metrics: Optional[Callable[[torch.Tensor, torch.Tensor], torch.Tensor]] = None,
|
|
peft_config: Optional[dict] = None,
|
|
compute_metrics: Optional[Callable[[EvalLoopOutput], dict]] = None,
|
|
):
|
|
if args.model_init_kwargs is None:
|
|
model_init_kwargs = {}
|
|
elif not isinstance(model, str):
|
|
raise ValueError("You passed model_kwargs to the CPOTrainer. But your model is already instantiated.")
|
|
else:
|
|
model_init_kwargs = args.model_init_kwargs
|
|
torch_dtype = model_init_kwargs.get("torch_dtype")
|
|
if torch_dtype is not None:
|
|
# Convert to `torch.dtype` if an str is passed
|
|
if isinstance(torch_dtype, str) and torch_dtype != "auto":
|
|
torch_dtype = getattr(torch, torch_dtype)
|
|
if torch_dtype != "auto" and not isinstance(torch_dtype, torch.dtype):
|
|
raise ValueError(
|
|
f"Invalid `torch_dtype` passed to the CPOConfig. Expected a string with either `torch.dtype` or 'auto', but got {torch_dtype}."
|
|
)
|
|
model_init_kwargs["torch_dtype"] = torch_dtype
|
|
|
|
if isinstance(model, str):
|
|
model = AutoModelForCausalLM.from_pretrained(model, **model_init_kwargs)
|
|
|
|
# Initialize this variable to False. This helps tracking the case when `peft_module_casting_to_bf16`
|
|
# has been called in order to properly call autocast if needed.
|
|
self._peft_has_been_casted_to_bf16 = False
|
|
|
|
if not is_peft_available() and peft_config is not None:
|
|
raise ValueError(
|
|
"PEFT is not installed and you passed a `peft_config` in the trainer's kwargs, please install it to use the PEFT models"
|
|
)
|
|
elif is_peft_available() and peft_config is not None:
|
|
# if model is a peft model and we have a peft_config, we merge and unload it first
|
|
if isinstance(model, PeftModel):
|
|
model = model.merge_and_unload()
|
|
|
|
if getattr(model, "is_loaded_in_8bit", False) or getattr(model, "is_loaded_in_4bit", False):
|
|
_support_gc_kwargs = hasattr(
|
|
args, "gradient_checkpointing_kwargs"
|
|
) and "gradient_checkpointing_kwargs" in list(
|
|
inspect.signature(prepare_model_for_kbit_training).parameters
|
|
)
|
|
|
|
prepare_model_kwargs = {"use_gradient_checkpointing": args.gradient_checkpointing}
|
|
|
|
if _support_gc_kwargs:
|
|
prepare_model_kwargs["gradient_checkpointing_kwargs"] = args.gradient_checkpointing_kwargs
|
|
|
|
model = prepare_model_for_kbit_training(model, **prepare_model_kwargs)
|
|
elif getattr(args, "gradient_checkpointing", False):
|
|
# For backward compatibility with older versions of transformers
|
|
if hasattr(model, "enable_input_require_grads"):
|
|
model.enable_input_require_grads()
|
|
else:
|
|
|
|
def make_inputs_require_grad(module, input, output):
|
|
output.requires_grad_(True)
|
|
|
|
model.get_input_embeddings().register_forward_hook(make_inputs_require_grad)
|
|
|
|
# get peft model with the given config
|
|
model = model
|
|
if args.bf16 and getattr(model, "is_loaded_in_4bit", False):
|
|
peft_module_casting_to_bf16(model)
|
|
# If args.bf16 we need to explicitly call `generate` with torch amp autocast context manager
|
|
self._peft_has_been_casted_to_bf16 = True
|
|
|
|
# For models that use gradient_checkpointing, we need to attach a hook that enables input
|
|
# to explicitly have `requires_grad=True`, otherwise training will either silently
|
|
# fail or completely fail.
|
|
elif getattr(args, "gradient_checkpointing", False):
|
|
# For backward compatibility with older versions of transformers
|
|
if hasattr(model, "enable_input_require_grads"):
|
|
model.enable_input_require_grads()
|
|
else:
|
|
|
|
def make_inputs_require_grad(module, input, output):
|
|
output.requires_grad_(True)
|
|
|
|
model.get_input_embeddings().register_forward_hook(make_inputs_require_grad)
|
|
|
|
if args.generate_during_eval and not (is_wandb_available() or is_comet_available()):
|
|
raise ValueError(
|
|
"`generate_during_eval=True` requires Weights and Biases or Comet to be installed."
|
|
" Please install `wandb` or `comet-ml` to resolve."
|
|
)
|
|
|
|
if model is not None:
|
|
self.is_encoder_decoder = model.config.is_encoder_decoder
|
|
elif args.is_encoder_decoder is None:
|
|
raise ValueError("When no model is provided, you need to pass the parameter is_encoder_decoder.")
|
|
else:
|
|
self.is_encoder_decoder = args.is_encoder_decoder
|
|
|
|
if self.is_encoder_decoder:
|
|
self.decoder_start_token_id = model.config.decoder_start_token_id
|
|
self.pad_token_id = model.config.pad_token_id
|
|
|
|
if processing_class is None:
|
|
raise ValueError("processing_class must be specified to tokenize a CPO dataset.")
|
|
if args.max_length is None:
|
|
warnings.warn(
|
|
"`max_length` is not set in the CPOConfig's init"
|
|
" it will default to `512` by default, but you should do it yourself in the future.",
|
|
UserWarning,
|
|
)
|
|
max_length = 512
|
|
else:
|
|
max_length = args.max_length
|
|
if args.max_prompt_length is None:
|
|
warnings.warn(
|
|
"`max_prompt_length` is not set in the CPOConfig's init"
|
|
" it will default to `128` by default, but you should do it yourself in the future.",
|
|
UserWarning,
|
|
)
|
|
max_prompt_length = 128
|
|
else:
|
|
max_prompt_length = args.max_prompt_length
|
|
|
|
if not max_prompt_length < max_length:
|
|
raise ValueError(
|
|
f"max_prompt_length ({max_prompt_length}) should be strictly less than max_length ({max_length})."
|
|
)
|
|
|
|
if args.max_completion_length is None and self.is_encoder_decoder:
|
|
warnings.warn(
|
|
"When using an encoder decoder architecture, you should set `max_completion_length` in the CPOConfig's init"
|
|
" it will default to `128` by default, but you should do it yourself in the future.",
|
|
UserWarning,
|
|
)
|
|
max_completion_length = 128
|
|
else:
|
|
max_completion_length = args.max_completion_length
|
|
|
|
if data_collator is None:
|
|
data_collator = DPODataCollatorWithPadding(
|
|
pad_token_id=processing_class.pad_token_id,
|
|
label_pad_token_id=args.label_pad_token_id,
|
|
is_encoder_decoder=self.is_encoder_decoder,
|
|
)
|
|
|
|
if args.remove_unused_columns:
|
|
args.remove_unused_columns = False
|
|
# warn users
|
|
warnings.warn(
|
|
"When using DPODataCollatorWithPadding, you should set `remove_unused_columns=False` in your TrainingArguments"
|
|
" we have set it for you, but you should do it yourself in the future.",
|
|
UserWarning,
|
|
)
|
|
|
|
self.use_dpo_data_collator = True
|
|
else:
|
|
self.use_dpo_data_collator = False
|
|
|
|
# Disable dropout in the model
|
|
if args.disable_dropout:
|
|
disable_dropout_in_model(model)
|
|
|
|
self.max_length = max_length
|
|
self.generate_during_eval = args.generate_during_eval
|
|
self.label_pad_token_id = args.label_pad_token_id
|
|
self.padding_value = args.padding_value if args.padding_value is not None else processing_class.pad_token_id
|
|
self.max_prompt_length = max_prompt_length
|
|
self.truncation_mode = args.truncation_mode
|
|
self.max_completion_length = max_completion_length
|
|
self.processing_class = processing_class
|
|
|
|
if args.loss_type in ["hinge", "ipo"] and args.label_smoothing > 0:
|
|
warnings.warn(
|
|
f"You are using the {args.loss_type} loss type that does not support label smoothing. The "
|
|
"`label_smoothing` parameter will be ignored. Set `label_smoothing` to `0.0` to remove this warning.",
|
|
UserWarning,
|
|
)
|
|
if args.loss_type == "kto_pair":
|
|
raise ValueError("Support for kto_pair has been removed in CPOTrainer. Please use KTOTrainer.")
|
|
|
|
self.beta = args.beta
|
|
self.label_smoothing = args.label_smoothing
|
|
self.loss_type = args.loss_type
|
|
self.cpo_alpha = args.cpo_alpha
|
|
self.aux_loss_enabled = getattr(model.config, "output_router_logits", False)
|
|
self.aux_loss_coef = getattr(model.config, "router_aux_loss_coef", 0.0)
|
|
if self.aux_loss_enabled and self.aux_loss_coef == 0.0:
|
|
warnings.warn(
|
|
"You set `output_router_logits` to `True` in the model config, but `router_aux_loss_coef` is set to "
|
|
"`0.0`, meaning the auxiliary loss will not be used. Either set `router_aux_loss_coef` to a value "
|
|
"greater than `0.0`, or set `output_router_logits` to `False` if you don't want to use the auxiliary "
|
|
"loss.",
|
|
UserWarning,
|
|
)
|
|
|
|
if args.loss_type == "simpo":
|
|
self.simpo_gamma = args.simpo_gamma
|
|
|
|
self._stored_metrics = defaultdict(lambda: defaultdict(list))
|
|
|
|
# The trainer estimates the number of FLOPs (floating-point operations) using the number of elements in the
|
|
# input tensor associated with the key "input_ids". However, in CPO, the sampled data does not include the
|
|
# "input_ids" key. Instead, the available keys are "prompt_input_ids", "chosen_input_ids", and
|
|
# "rejected_input_ids". As a result, the trainer issues the warning: "Could not estimate the number of tokens
|
|
# of the input, floating-point operations will not be computed." To suppress this warning, we set the
|
|
# "estimate_tokens" key in the model's "warnings_issued" dictionary to True. This acts as a flag to indicate
|
|
# that the warning has already been issued.
|
|
model.warnings_issued["estimate_tokens"] = True
|
|
|
|
# Compute that only on the main process for faster data processing.
|
|
# see: https://github.com/huggingface/trl/pull/1255
|
|
with PartialState().main_process_first():
|
|
# Extract the prompt if needed, and apply the chat template if needed
|
|
train_dataset = train_dataset.map(maybe_extract_prompt, num_proc=args.dataset_num_proc)
|
|
train_dataset = train_dataset.map(
|
|
maybe_apply_chat_template, fn_kwargs={"tokenizer": processing_class}, num_proc=args.dataset_num_proc
|
|
)
|
|
if eval_dataset is not None:
|
|
eval_dataset = eval_dataset.map(maybe_extract_prompt, num_proc=args.dataset_num_proc)
|
|
eval_dataset = eval_dataset.map(
|
|
maybe_apply_chat_template,
|
|
fn_kwargs={"tokenizer": processing_class},
|
|
num_proc=args.dataset_num_proc,
|
|
)
|
|
|
|
# tokenize the dataset
|
|
train_dataset = train_dataset.map(self.tokenize_row, num_proc=args.dataset_num_proc)
|
|
if eval_dataset is not None:
|
|
eval_dataset = eval_dataset.map(self.tokenize_row, num_proc=args.dataset_num_proc)
|
|
|
|
super().__init__(
|
|
model=model,
|
|
args=args,
|
|
data_collator=data_collator,
|
|
train_dataset=train_dataset,
|
|
eval_dataset=eval_dataset,
|
|
processing_class=processing_class,
|
|
model_init=model_init,
|
|
compute_metrics=compute_metrics,
|
|
callbacks=callbacks,
|
|
optimizers=optimizers,
|
|
preprocess_logits_for_metrics=preprocess_logits_for_metrics,
|
|
)
|
|
|
|
# Gradient accumulation requires scaled loss. Normally, loss scaling in the parent class depends on whether the
|
|
# model accepts loss-related kwargs. Since we compute our own loss, this check is irrelevant. We set
|
|
# self.model_accepts_loss_kwargs to False to enable scaling.
|
|
self.model_accepts_loss_kwargs = False
|
|
|
|
# Add tags for models that have been loaded with the correct transformers version
|
|
if hasattr(self.model, "add_model_tags"):
|
|
self.model.add_model_tags(self._tag_names)
|
|
|
|
if not hasattr(self, "accelerator"):
|
|
raise AttributeError(
|
|
"Your `Trainer` does not have an `accelerator` object. Consider upgrading `transformers`."
|
|
)
|
|
|
|
def build_tokenized_answer(self, prompt, answer):
|
|
"""
|
|
Llama tokenizer does satisfy `enc(a + b) = enc(a) + enc(b)`.
|
|
It does ensure `enc(a + b) = enc(a) + enc(a + b)[len(enc(a)):]`.
|
|
Reference:
|
|
https://github.com/EleutherAI/lm-evaluation-harness/pull/531#issuecomment-1595586257
|
|
"""
|
|
|
|
full_tokenized = self.processing_class(prompt + answer, add_special_tokens=False)
|
|
prompt_input_ids = self.processing_class(prompt, add_special_tokens=False)["input_ids"]
|
|
|
|
answer_input_ids = full_tokenized["input_ids"][len(prompt_input_ids) :]
|
|
answer_attention_mask = full_tokenized["attention_mask"][len(prompt_input_ids) :]
|
|
|
|
# Concat tokens to form `enc(a) + enc(a + b)[len(enc(a)):]`
|
|
full_concat_input_ids = np.concatenate([prompt_input_ids, answer_input_ids])
|
|
|
|
# Prepare input tokens for token by token comparison
|
|
full_input_ids = np.array(full_tokenized["input_ids"])
|
|
|
|
if len(full_input_ids) != len(full_concat_input_ids):
|
|
raise ValueError("Prompt input ids and answer input ids should have the same length.")
|
|
|
|
# On some tokenizers, like Llama-2 tokenizer, there are occasions where tokens
|
|
# can be merged together when tokenizing prompt+answer. This could result
|
|
# on the last token from the prompt being different when tokenized on its own
|
|
# vs when done as prompt+answer.
|
|
response_token_ids_start_idx = len(prompt_input_ids)
|
|
|
|
# If tokenized prompt is different than both prompt+answer, then it means the
|
|
# last token has changed due to merging.
|
|
if prompt_input_ids != full_tokenized["input_ids"][:response_token_ids_start_idx]:
|
|
response_token_ids_start_idx -= 1
|
|
|
|
prompt_input_ids = full_tokenized["input_ids"][:response_token_ids_start_idx]
|
|
prompt_attention_mask = full_tokenized["attention_mask"][:response_token_ids_start_idx]
|
|
|
|
if len(prompt_input_ids) != len(prompt_attention_mask):
|
|
raise ValueError("Prompt input ids and attention mask should have the same length.")
|
|
|
|
answer_input_ids = full_tokenized["input_ids"][response_token_ids_start_idx:]
|
|
answer_attention_mask = full_tokenized["attention_mask"][response_token_ids_start_idx:]
|
|
|
|
return dict(
|
|
prompt_input_ids=prompt_input_ids,
|
|
prompt_attention_mask=prompt_attention_mask,
|
|
input_ids=answer_input_ids,
|
|
attention_mask=answer_attention_mask,
|
|
)
|
|
|
|
def tokenize_row(self, feature, model: Optional[Union[PreTrainedModel, nn.Module]] = None) -> dict:
|
|
"""Tokenize a single row from a CPO specific dataset.
|
|
|
|
At this stage, we don't convert to PyTorch tensors yet; we just handle the truncation
|
|
in case the prompt + chosen or prompt + rejected responses is/are too long. First
|
|
we truncate the prompt; if we're still too long, we truncate the chosen/rejected.
|
|
|
|
We also create the labels for the chosen/rejected responses, which are of length equal to
|
|
the sum of the length of the prompt and the chosen/rejected response, with
|
|
label_pad_token_id for the prompt tokens.
|
|
"""
|
|
batch = {}
|
|
prompt = feature["prompt"]
|
|
chosen = feature["chosen"]
|
|
rejected = feature["rejected"]
|
|
|
|
if not self.is_encoder_decoder:
|
|
# Check issues below for more details
|
|
# 1. https://github.com/huggingface/trl/issues/907
|
|
# 2. https://github.com/EleutherAI/lm-evaluation-harness/pull/531#issuecomment-1595586257
|
|
# 3. https://github.com/LianjiaTech/BELLE/issues/337
|
|
|
|
if not isinstance(prompt, str):
|
|
raise ValueError(f"prompt should be an str but got {type(prompt)}")
|
|
prompt_tokens = self.processing_class(prompt, add_special_tokens=False)
|
|
prompt_tokens = {f"prompt_{k}": v for k, v in prompt_tokens.items()}
|
|
|
|
if not isinstance(chosen, str):
|
|
raise ValueError(f"chosen should be an str but got {type(chosen)}")
|
|
chosen_tokens = self.build_tokenized_answer(prompt, chosen)
|
|
|
|
if not isinstance(rejected, str):
|
|
raise ValueError(f"rejected should be an str but got {type(rejected)}")
|
|
rejected_tokens = self.build_tokenized_answer(prompt, rejected)
|
|
|
|
# Last prompt token might get merged by tokenizer and
|
|
# it should not be included for generation if that happens
|
|
prompt_len_input_ids = len(prompt_tokens["prompt_input_ids"])
|
|
|
|
chosen_prompt_len_input_ids = len(chosen_tokens["prompt_input_ids"])
|
|
rejected_prompt_len_input_ids = len(rejected_tokens["prompt_input_ids"])
|
|
prompt_len_input_ids = min(chosen_prompt_len_input_ids, rejected_prompt_len_input_ids)
|
|
|
|
for k, v in prompt_tokens.items():
|
|
prompt_tokens[k] = v[:prompt_len_input_ids]
|
|
|
|
# Make sure prompts only have one different token at most an
|
|
# and length only differs by 1 at most
|
|
num_diff_tokens = sum(
|
|
[a != b for a, b in zip(chosen_tokens["prompt_input_ids"], rejected_tokens["prompt_input_ids"])]
|
|
)
|
|
num_diff_len = abs(chosen_prompt_len_input_ids - rejected_prompt_len_input_ids)
|
|
if num_diff_tokens > 1 or num_diff_len > 1:
|
|
raise ValueError(
|
|
"Chosen and rejected prompt_input_ids might only differ on the "
|
|
"last token due to tokenizer merge ops."
|
|
)
|
|
|
|
# add BOS token to head of prompt. Avoid adding if it's already there
|
|
prompt_tokens, chosen_tokens, rejected_tokens = add_bos_token_if_needed(
|
|
self.processing_class.bos_token_id,
|
|
prompt_len_input_ids,
|
|
prompt_tokens,
|
|
chosen_prompt_len_input_ids,
|
|
chosen_tokens,
|
|
rejected_prompt_len_input_ids,
|
|
rejected_tokens,
|
|
)
|
|
|
|
# add EOS token to end of answer. Avoid adding if it's already there
|
|
chosen_tokens, rejected_tokens = add_eos_token_if_needed(
|
|
self.processing_class.eos_token_id, chosen_tokens, rejected_tokens
|
|
)
|
|
|
|
longer_response_length = max(len(chosen_tokens["input_ids"]), len(rejected_tokens["input_ids"]))
|
|
|
|
# if combined sequence is too long, truncate the prompt
|
|
for answer_tokens in [chosen_tokens, rejected_tokens, prompt_tokens]:
|
|
if len(answer_tokens["prompt_input_ids"]) + longer_response_length > self.max_length:
|
|
if self.truncation_mode == "keep_start":
|
|
for k in ["prompt_input_ids", "prompt_attention_mask"]:
|
|
answer_tokens[k] = answer_tokens[k][: self.max_prompt_length]
|
|
elif self.truncation_mode == "keep_end":
|
|
for k in ["prompt_input_ids", "prompt_attention_mask"]:
|
|
answer_tokens[k] = answer_tokens[k][-self.max_prompt_length :]
|
|
else:
|
|
raise ValueError(f"Unknown truncation mode: {self.truncation_mode}")
|
|
|
|
# if that's still too long, truncate the response
|
|
for answer_tokens in [chosen_tokens, rejected_tokens]:
|
|
if len(answer_tokens["prompt_input_ids"]) + longer_response_length > self.max_length:
|
|
for k in ["input_ids", "attention_mask"]:
|
|
answer_tokens[k] = answer_tokens[k][: self.max_length - self.max_prompt_length]
|
|
|
|
# Create labels
|
|
chosen_sequence_tokens = {
|
|
k: chosen_tokens[f"prompt_{k}"] + chosen_tokens[k] for k in ["input_ids", "attention_mask"]
|
|
}
|
|
rejected_sequence_tokens = {
|
|
k: rejected_tokens[f"prompt_{k}"] + rejected_tokens[k] for k in ["input_ids", "attention_mask"]
|
|
}
|
|
chosen_sequence_tokens["labels"] = chosen_sequence_tokens["input_ids"][:]
|
|
chosen_sequence_tokens["labels"][: len(chosen_tokens["prompt_input_ids"])] = [
|
|
self.label_pad_token_id
|
|
] * len(chosen_tokens["prompt_input_ids"])
|
|
rejected_sequence_tokens["labels"] = rejected_sequence_tokens["input_ids"][:]
|
|
rejected_sequence_tokens["labels"][: len(rejected_tokens["prompt_input_ids"])] = [
|
|
self.label_pad_token_id
|
|
] * len(rejected_tokens["prompt_input_ids"])
|
|
|
|
for k, toks in {
|
|
"chosen_": chosen_sequence_tokens,
|
|
"rejected_": rejected_sequence_tokens,
|
|
"": prompt_tokens,
|
|
}.items():
|
|
for type_key, tokens in toks.items():
|
|
if type_key == "token_type_ids":
|
|
continue
|
|
batch[f"{k}{type_key}"] = tokens
|
|
|
|
else:
|
|
chosen_tokens = self.processing_class(
|
|
chosen, truncation=True, max_length=self.max_completion_length, add_special_tokens=True
|
|
)
|
|
rejected_tokens = self.processing_class(
|
|
rejected, truncation=True, max_length=self.max_completion_length, add_special_tokens=True
|
|
)
|
|
prompt_tokens = self.processing_class(
|
|
prompt, truncation=True, max_length=self.max_prompt_length, add_special_tokens=True
|
|
)
|
|
|
|
batch["chosen_labels"] = chosen_tokens["input_ids"]
|
|
batch["rejected_labels"] = rejected_tokens["input_ids"]
|
|
batch["prompt_input_ids"] = prompt_tokens["input_ids"]
|
|
batch["prompt_attention_mask"] = prompt_tokens["attention_mask"]
|
|
|
|
if model is not None and hasattr(model, "prepare_decoder_input_ids_from_labels"):
|
|
batch["rejected_decoder_input_ids"] = model.prepare_decoder_input_ids_from_labels(
|
|
labels=torch.tensor(batch["rejected_labels"])
|
|
)
|
|
batch["chosen_decoder_input_ids"] = model.prepare_decoder_input_ids_from_labels(
|
|
labels=torch.tensor(batch["chosen_labels"])
|
|
)
|
|
|
|
return batch
|
|
|
|
@staticmethod
|
|
def concatenated_inputs(
|
|
batch: dict[str, Union[list, torch.LongTensor]],
|
|
is_encoder_decoder: bool = False,
|
|
label_pad_token_id: int = -100,
|
|
padding_value: int = 0,
|
|
device: Optional[torch.device] = None,
|
|
) -> dict[str, torch.LongTensor]:
|
|
"""Concatenate the chosen and rejected inputs into a single tensor.
|
|
|
|
Args:
|
|
batch: A batch of data. Must contain the keys 'chosen_input_ids' and 'rejected_input_ids', which are tensors of shape (batch_size, sequence_length).
|
|
is_encoder_decoder: Whether the model is an encoder-decoder model.
|
|
label_pad_token_id: The label pad token id.
|
|
padding_value: The padding value to use for the concatenated inputs_ids.
|
|
device: The device for the concatenated inputs.
|
|
|
|
Returns:
|
|
A dictionary containing the concatenated inputs under the key 'concatenated_input_ids'.
|
|
"""
|
|
concatenated_batch = {}
|
|
|
|
if is_encoder_decoder:
|
|
max_length = max(batch["chosen_labels"].shape[1], batch["rejected_labels"].shape[1])
|
|
else:
|
|
max_length = max(batch["chosen_input_ids"].shape[1], batch["rejected_input_ids"].shape[1])
|
|
|
|
for k in batch:
|
|
if k.startswith("chosen") and isinstance(batch[k], torch.Tensor):
|
|
if "labels" in k or is_encoder_decoder:
|
|
pad_value = label_pad_token_id
|
|
elif k.endswith("_input_ids"):
|
|
pad_value = padding_value
|
|
elif k.endswith("_attention_mask"):
|
|
pad_value = 0
|
|
concatenated_key = k.replace("chosen", "concatenated")
|
|
concatenated_batch[concatenated_key] = pad_to_length(batch[k], max_length, pad_value=pad_value)
|
|
for k in batch:
|
|
if k.startswith("rejected") and isinstance(batch[k], torch.Tensor):
|
|
if "labels" in k or is_encoder_decoder:
|
|
pad_value = label_pad_token_id
|
|
elif k.endswith("_input_ids"):
|
|
pad_value = padding_value
|
|
elif k.endswith("_attention_mask"):
|
|
pad_value = 0
|
|
concatenated_key = k.replace("rejected", "concatenated")
|
|
concatenated_batch[concatenated_key] = torch.cat(
|
|
(
|
|
concatenated_batch[concatenated_key],
|
|
pad_to_length(batch[k], max_length, pad_value=pad_value),
|
|
),
|
|
dim=0,
|
|
).to(device=device)
|
|
|
|
if is_encoder_decoder:
|
|
concatenated_batch["concatenated_input_ids"] = batch["prompt_input_ids"].repeat(2, 1).to(device=device)
|
|
concatenated_batch["concatenated_attention_mask"] = (
|
|
batch["prompt_attention_mask"].repeat(2, 1).to(device=device)
|
|
)
|
|
|
|
return concatenated_batch
|
|
|
|
def cpo_loss(
|
|
self,
|
|
policy_chosen_logps: torch.FloatTensor,
|
|
policy_rejected_logps: torch.FloatTensor,
|
|
) -> tuple[torch.FloatTensor, torch.FloatTensor, torch.FloatTensor]:
|
|
"""Compute the CPO loss for a batch of policy and reference model log probabilities.
|
|
|
|
Args:
|
|
policy_chosen_logps: Log probabilities of the policy model for the chosen responses. Shape: (batch_size,)
|
|
policy_rejected_logps: Log probabilities of the policy model for the rejected responses. Shape: (batch_size,)
|
|
|
|
Returns:
|
|
A tuple of three tensors: (losses, chosen_rewards, rejected_rewards).
|
|
The losses tensor contains the CPO loss for each example in the batch.
|
|
The chosen_rewards and rejected_rewards tensors contain the rewards for the chosen and rejected responses, respectively.
|
|
"""
|
|
logits = (policy_chosen_logps - policy_rejected_logps).to(self.accelerator.device)
|
|
|
|
# The beta is a temperature parameter for the CPO loss, typically something in the range of 0.1 to 0.5.
|
|
# We ignore the reference model as beta -> 0. The label_smoothing parameter encodes our uncertainty about the labels and
|
|
# calculates a conservative CPO loss.
|
|
|
|
if self.loss_type == "simpo":
|
|
gamma_logratios = self.simpo_gamma / self.beta
|
|
logits = logits - gamma_logratios
|
|
# This reduces to Equation 3 from the CPO paper when label_smoothing -> 0.
|
|
losses = (
|
|
-F.logsigmoid(self.beta * logits) * (1 - self.label_smoothing)
|
|
- F.logsigmoid(-self.beta * logits) * self.label_smoothing
|
|
)
|
|
elif self.loss_type == "sigmoid":
|
|
# This reduces to Equation 3 from the CPO paper when label_smoothing -> 0.
|
|
losses = (
|
|
-F.logsigmoid(self.beta * logits) * (1 - self.label_smoothing)
|
|
- F.logsigmoid(-self.beta * logits) * self.label_smoothing
|
|
)
|
|
elif self.loss_type == "hinge":
|
|
losses = torch.relu(1 - self.beta * logits)
|
|
elif self.loss_type == "ipo":
|
|
# eqn (17) of the paper where beta is the regularization parameter for the IPO loss, denoted by tau in the paper.
|
|
losses = (logits - 1 / (2 * self.beta)) ** 2
|
|
else:
|
|
raise ValueError(
|
|
f"Unknown loss type: {self.loss_type}. Should be one of ['sigmoid', 'hinge', 'ipo', 'simpo']"
|
|
)
|
|
|
|
chosen_rewards = self.beta * (policy_chosen_logps.to(self.accelerator.device)).detach()
|
|
rejected_rewards = self.beta * (policy_rejected_logps.to(self.accelerator.device)).detach()
|
|
|
|
return losses, chosen_rewards, rejected_rewards
|
|
|
|
@staticmethod
|
|
def get_batch_logps(
|
|
logits: torch.FloatTensor,
|
|
labels: torch.LongTensor,
|
|
average_log_prob: bool = False,
|
|
label_pad_token_id: int = -100,
|
|
is_encoder_decoder: bool = False,
|
|
) -> torch.FloatTensor:
|
|
"""Compute the log probabilities of the given labels under the given logits.
|
|
|
|
Args:
|
|
logits: Logits of the model (unnormalized). Shape: (batch_size, sequence_length, vocab_size)
|
|
labels: Labels for which to compute the log probabilities. Label tokens with a value of label_pad_token_id are ignored. Shape: (batch_size, sequence_length)
|
|
average_log_prob: If True, return the average log probability per (non-masked) token. Otherwise, return the sum of the log probabilities of the (non-masked) tokens.
|
|
label_pad_token_id: The label pad token id.
|
|
is_encoder_decoder: Whether the model is an encoder-decoder model.
|
|
|
|
Returns:
|
|
A tensor of shape (batch_size,) containing the average/sum log probabilities of the given labels under the given logits.
|
|
"""
|
|
if logits.shape[:-1] != labels.shape:
|
|
raise ValueError("Logits (batch and sequence length dim) and labels must have the same shape.")
|
|
|
|
if not is_encoder_decoder:
|
|
labels = labels[:, 1:].clone()
|
|
logits = logits[:, :-1, :]
|
|
loss_mask = labels != label_pad_token_id
|
|
|
|
# dummy token; we'll ignore the losses on these tokens later
|
|
labels[labels == label_pad_token_id] = 0
|
|
|
|
per_token_logps = selective_log_softmax(logits, labels)
|
|
|
|
if average_log_prob:
|
|
return (per_token_logps * loss_mask).sum(-1) / loss_mask.sum(-1)
|
|
else:
|
|
return (per_token_logps * loss_mask).sum(-1)
|
|
|
|
def concatenated_forward(
|
|
self, model: nn.Module, batch: dict[str, Union[list, torch.LongTensor]]
|
|
) -> tuple[torch.FloatTensor, torch.FloatTensor, torch.FloatTensor, torch.FloatTensor]:
|
|
"""Run the given model on the given batch of inputs, concatenating the chosen and rejected inputs together.
|
|
|
|
We do this to avoid doing two forward passes, because it's faster for FSDP.
|
|
"""
|
|
concatenated_batch = self.concatenated_inputs(
|
|
batch,
|
|
is_encoder_decoder=self.is_encoder_decoder,
|
|
label_pad_token_id=self.label_pad_token_id,
|
|
padding_value=self.padding_value,
|
|
device=self.accelerator.device,
|
|
)
|
|
len_chosen = batch["chosen_labels"].shape[0]
|
|
|
|
model_kwargs = (
|
|
{
|
|
"decoder_input_ids": self._shift_right(concatenated_batch["concatenated_labels"]),
|
|
}
|
|
if self.is_encoder_decoder
|
|
else {}
|
|
)
|
|
|
|
if self.aux_loss_enabled:
|
|
model_kwargs["output_router_logits"] = True
|
|
|
|
outputs = model(
|
|
concatenated_batch["concatenated_input_ids"],
|
|
attention_mask=concatenated_batch["concatenated_attention_mask"],
|
|
use_cache=False,
|
|
**model_kwargs,
|
|
)
|
|
all_logits = outputs.logits
|
|
|
|
def cross_entropy_loss(logits, labels):
|
|
if not self.is_encoder_decoder:
|
|
# Shift so that tokens < n predict n
|
|
logits = logits[..., :-1, :].contiguous()
|
|
labels = labels[..., 1:].contiguous()
|
|
# Flatten the tokens
|
|
loss_fct = nn.CrossEntropyLoss()
|
|
logits = logits.view(-1, logits.shape[-1])
|
|
labels = labels.view(-1)
|
|
# Enable model parallelism
|
|
labels = labels.to(logits.device)
|
|
loss = loss_fct(logits, labels)
|
|
return loss
|
|
|
|
labels = concatenated_batch["concatenated_labels"].clone()
|
|
|
|
if self.cpo_alpha == 0:
|
|
nll_loss = torch.tensor(0.0).to(self.accelerator.device)
|
|
else:
|
|
nll_loss = cross_entropy_loss(all_logits[:len_chosen], labels[:len_chosen])
|
|
|
|
all_logps = self.get_batch_logps(
|
|
all_logits,
|
|
concatenated_batch["concatenated_labels"],
|
|
average_log_prob=self.loss_type in ["ipo", "simpo"],
|
|
is_encoder_decoder=self.is_encoder_decoder,
|
|
label_pad_token_id=self.label_pad_token_id,
|
|
)
|
|
|
|
chosen_logps = all_logps[:len_chosen]
|
|
rejected_logps = all_logps[len_chosen:]
|
|
|
|
chosen_logits = all_logits[:len_chosen]
|
|
rejected_logits = all_logits[len_chosen:]
|
|
|
|
if self.aux_loss_enabled:
|
|
return (chosen_logps, rejected_logps, chosen_logits, rejected_logits, nll_loss, outputs.aux_loss)
|
|
|
|
return (chosen_logps, rejected_logps, chosen_logits, rejected_logits, nll_loss)
|
|
|
|
def get_batch_loss_metrics(
|
|
self,
|
|
model,
|
|
batch: dict[str, Union[list, torch.LongTensor]],
|
|
train_eval: Literal["train", "eval"] = "train",
|
|
):
|
|
"""Compute the CPO loss and other metrics for the given batch of inputs for train or test."""
|
|
metrics = {}
|
|
|
|
forward_output = self.concatenated_forward(model, batch)
|
|
(
|
|
policy_chosen_logps,
|
|
policy_rejected_logps,
|
|
policy_chosen_logits,
|
|
policy_rejected_logits,
|
|
policy_nll_loss,
|
|
) = forward_output[:5]
|
|
if self.aux_loss_enabled:
|
|
aux_loss = forward_output[5]
|
|
|
|
losses, chosen_rewards, rejected_rewards = self.cpo_loss(
|
|
policy_chosen_logps,
|
|
policy_rejected_logps,
|
|
)
|
|
|
|
loss = losses.mean() + self.cpo_alpha * policy_nll_loss
|
|
reward_accuracies = (chosen_rewards > rejected_rewards).float()
|
|
|
|
prefix = "eval_" if train_eval == "eval" else ""
|
|
metrics[f"{prefix}rewards/chosen"] = self.accelerator.gather_for_metrics(chosen_rewards).mean().item()
|
|
metrics[f"{prefix}rewards/rejected"] = self.accelerator.gather_for_metrics(rejected_rewards).mean().item()
|
|
metrics[f"{prefix}rewards/accuracies"] = self.accelerator.gather_for_metrics(reward_accuracies).mean().item()
|
|
metrics[f"{prefix}rewards/margins"] = (
|
|
self.accelerator.gather_for_metrics(chosen_rewards - rejected_rewards).mean().item()
|
|
)
|
|
metrics[f"{prefix}logps/rejected"] = (
|
|
self.accelerator.gather_for_metrics(policy_rejected_logps).detach().mean().item()
|
|
)
|
|
metrics[f"{prefix}logps/chosen"] = (
|
|
self.accelerator.gather_for_metrics(policy_chosen_logps).detach().mean().item()
|
|
)
|
|
metrics[f"{prefix}logits/rejected"] = (
|
|
self.accelerator.gather_for_metrics(policy_rejected_logits.detach().mean()).mean().item()
|
|
)
|
|
metrics[f"{prefix}logits/chosen"] = (
|
|
self.accelerator.gather_for_metrics(policy_chosen_logits.detach().mean()).mean().item()
|
|
)
|
|
metrics[f"{prefix}nll_loss"] = self.accelerator.gather_for_metrics(policy_nll_loss).detach().mean().item()
|
|
|
|
if self.aux_loss_enabled:
|
|
loss += self.aux_loss_coef * aux_loss
|
|
|
|
return loss, metrics
|
|
|
|
def compute_loss(
|
|
self,
|
|
model: Union[PreTrainedModel, nn.Module],
|
|
inputs: dict[str, Union[torch.Tensor, Any]],
|
|
return_outputs=False,
|
|
num_items_in_batch=None,
|
|
) -> Union[torch.Tensor, tuple[torch.Tensor, dict[str, torch.Tensor]]]:
|
|
compute_loss_context_manager = amp.autocast("cuda") if self._peft_has_been_casted_to_bf16 else nullcontext()
|
|
|
|
with compute_loss_context_manager:
|
|
loss, metrics = self.get_batch_loss_metrics(model, inputs, train_eval="train")
|
|
|
|
# force log the metrics
|
|
self.store_metrics(metrics, train_eval="train")
|
|
|
|
if return_outputs:
|
|
return (loss, metrics)
|
|
return loss
|
|
|
|
def generate_from_model(self, model, batch: dict[str, torch.LongTensor]) -> str:
|
|
"""Generate samples from the model and reference model for the given batch of inputs."""
|
|
|
|
# If one uses `generate_during_eval` with peft + bf16, we need to explicitly call generate with
|
|
# the torch cuda amp context manager as some hidden states are silently casted to full precision.
|
|
generate_context_manager = amp.autocast("cuda") if self._peft_has_been_casted_to_bf16 else nullcontext()
|
|
|
|
with generate_context_manager:
|
|
policy_output = model.generate(
|
|
input_ids=batch["prompt_input_ids"],
|
|
attention_mask=batch["prompt_attention_mask"],
|
|
max_length=self.max_length,
|
|
do_sample=True,
|
|
pad_token_id=self.processing_class.pad_token_id,
|
|
)
|
|
|
|
policy_output = pad_to_length(policy_output, self.max_length, self.processing_class.pad_token_id)
|
|
policy_output_decoded = self.processing_class.batch_decode(policy_output, skip_special_tokens=True)
|
|
|
|
return policy_output_decoded
|
|
|
|
def prediction_step(
|
|
self,
|
|
model: Union[PreTrainedModel, nn.Module],
|
|
inputs: dict[str, Union[torch.Tensor, Any]],
|
|
prediction_loss_only: bool,
|
|
ignore_keys: Optional[list[str]] = None,
|
|
):
|
|
if ignore_keys is None:
|
|
if hasattr(model, "config"):
|
|
ignore_keys = getattr(model.config, "keys_to_ignore_at_inference", [])
|
|
else:
|
|
ignore_keys = []
|
|
|
|
prediction_context_manager = amp.autocast("cuda") if self._peft_has_been_casted_to_bf16 else nullcontext()
|
|
|
|
with torch.no_grad(), prediction_context_manager:
|
|
loss, metrics = self.get_batch_loss_metrics(model, inputs, train_eval="eval")
|
|
|
|
# force log the metrics
|
|
self.store_metrics(metrics, train_eval="eval")
|
|
|
|
if prediction_loss_only:
|
|
return (loss.detach(), None, None)
|
|
|
|
# logits for the chosen and rejected samples from model
|
|
logits_dict = {
|
|
"eval_logits/chosen": metrics["eval_logits/chosen"],
|
|
"eval_logits/rejected": metrics["eval_logits/rejected"],
|
|
}
|
|
logits = [v for k, v in logits_dict.items() if k not in ignore_keys]
|
|
logits = torch.tensor(logits, device=self.accelerator.device)
|
|
labels = torch.zeros(logits.shape[0], device=self.accelerator.device)
|
|
|
|
return (loss.detach(), logits, labels)
|
|
|
|
def store_metrics(self, metrics: dict[str, float], train_eval: Literal["train", "eval"] = "train") -> None:
|
|
for key, value in metrics.items():
|
|
self._stored_metrics[train_eval][key].append(value)
|
|
|
|
def evaluation_loop(
|
|
self,
|
|
dataloader: DataLoader,
|
|
description: str,
|
|
prediction_loss_only: Optional[bool] = None,
|
|
ignore_keys: Optional[list[str]] = None,
|
|
metric_key_prefix: str = "eval",
|
|
) -> EvalLoopOutput:
|
|
"""
|
|
Overriding built-in evaluation loop to store metrics for each batch.
|
|
Prediction/evaluation loop, shared by `Trainer.evaluate()` and `Trainer.predict()`.
|
|
|
|
Works both with or without labels.
|
|
"""
|
|
|
|
# Sample and save to game log if requested (for one batch to save time)
|
|
if self.generate_during_eval:
|
|
# Generate random indices within the range of the total number of samples
|
|
num_samples = len(dataloader.dataset)
|
|
random_indices = random.sample(range(num_samples), k=self.args.eval_batch_size)
|
|
|
|
# Use dataloader.dataset.select to get the random batch without iterating over the DataLoader
|
|
random_batch_dataset = dataloader.dataset.select(random_indices)
|
|
random_batch = self.data_collator(random_batch_dataset)
|
|
random_batch = self._prepare_inputs(random_batch)
|
|
|
|
policy_output_decoded = self.generate_from_model(self.model, random_batch)
|
|
|
|
table = pd.DataFrame(
|
|
columns=["Prompt", "Policy"],
|
|
data=[
|
|
[prompt, pol[len(prompt) :]] for prompt, pol in zip(random_batch["prompt"], policy_output_decoded)
|
|
],
|
|
)
|
|
if "wandb" in self.args.report_to:
|
|
wandb.log({"game_log": wandb.Table(data=table)})
|
|
|
|
if "comet_ml" in self.args.report_to:
|
|
log_table_to_comet_experiment(
|
|
name="game_log.csv",
|
|
table=table,
|
|
)
|
|
|
|
# Base evaluation
|
|
initial_output = super().evaluation_loop(
|
|
dataloader, description, prediction_loss_only, ignore_keys, metric_key_prefix
|
|
)
|
|
|
|
return initial_output
|
|
|
|
def log(self, logs: dict[str, float], start_time: Optional[float] = None) -> None:
|
|
"""
|
|
Log `logs` on the various objects watching training, including stored metrics.
|
|
|
|
Args:
|
|
logs (`dict[str, float]`):
|
|
The values to log.
|
|
start_time (`float` or `None`, *optional*, defaults to `None`):
|
|
Start time of the training.
|
|
"""
|
|
# logs either has 'loss' or 'eval_loss'
|
|
train_eval = "train" if "loss" in logs else "eval"
|
|
# Add averaged stored metrics to logs
|
|
for key, metrics in self._stored_metrics[train_eval].items():
|
|
logs[key] = torch.tensor(metrics).mean().item()
|
|
del self._stored_metrics[train_eval]
|
|
|
|
if version.parse(transformers.__version__) >= version.parse("4.47.0.dev0"):
|
|
return super().log(logs, start_time)
|
|
else: # transformers<=4.46
|
|
return super().log(logs)
|
|
|
|
def _shift_right(self, input_ids):
|
|
if self.decoder_start_token_id is None:
|
|
raise ValueError(
|
|
"model.config.decoder_start_token_id has to be defined. It is usually set to the pad_token_id."
|
|
)
|
|
|
|
# shift inputs to the right
|
|
if is_torch_fx_proxy(input_ids):
|
|
# Item assignment is not supported natively for proxies.
|
|
shifted_input_ids = torch.full(input_ids.shape[:-1] + (1,), self.decoder_start_token_id)
|
|
shifted_input_ids = torch.cat([shifted_input_ids, input_ids[..., :-1]], dim=-1)
|
|
else:
|
|
shifted_input_ids = input_ids.new_zeros(input_ids.shape)
|
|
shifted_input_ids[..., 1:] = input_ids[..., :-1].clone()
|
|
shifted_input_ids[..., 0] = self.decoder_start_token_id
|
|
|
|
if self.pad_token_id is None:
|
|
raise ValueError("model.config.pad_token_id has to be defined.")
|
|
# replace possible -100 values in labels by `pad_token_id`
|
|
shifted_input_ids.masked_fill_(shifted_input_ids == -100, self.pad_token_id)
|
|
|
|
return shifted_input_ids
|
|
|
|
def create_model_card(
|
|
self,
|
|
model_name: Optional[str] = None,
|
|
dataset_name: Optional[str] = None,
|
|
tags: Union[str, list[str], None] = None,
|
|
):
|
|
"""
|
|
Creates a draft of a model card using the information available to the `Trainer`.
|
|
|
|
Args:
|
|
model_name (`str` or `None`, *optional*, defaults to `None`):
|
|
Name of the model.
|
|
dataset_name (`str` or `None`, *optional*, defaults to `None`):
|
|
Name of the dataset used for training.
|
|
tags (`str`, `list[str]` or `None`, *optional*, defaults to `None`):
|
|
Tags to be associated with the model card.
|
|
"""
|
|
if not self.is_world_process_zero():
|
|
return
|
|
|
|
if hasattr(self.model.config, "_name_or_path") and not os.path.isdir(self.model.config._name_or_path):
|
|
base_model = self.model.config._name_or_path
|
|
else:
|
|
base_model = None
|
|
|
|
tags = tags or []
|
|
if isinstance(tags, str):
|
|
tags = [tags]
|
|
|
|
if hasattr(self.model.config, "unsloth_version"):
|
|
tags.append("unsloth")
|
|
|
|
citation = textwrap.dedent("""\
|
|
@inproceedings{xu2024contrastive,
|
|
title = {{Contrastive Preference Optimization: Pushing the Boundaries of LLM Performance in Machine Translation}},
|
|
author = {Haoran Xu and Amr Sharaf and Yunmo Chen and Weiting Tan and Lingfeng Shen and Benjamin Van Durme and Kenton Murray and Young Jin Kim},
|
|
year = 2024,
|
|
booktitle = {Forty-first International Conference on Machine Learning, {ICML} 2024, Vienna, Austria, July 21-27, 2024},
|
|
publisher = {OpenReview.net},
|
|
url = {https://openreview.net/forum?id=51iwkioZpn}
|
|
}""")
|
|
|
|
model_card = generate_model_card(
|
|
base_model=base_model,
|
|
model_name=model_name,
|
|
hub_model_id=self.hub_model_id,
|
|
dataset_name=dataset_name,
|
|
tags=tags,
|
|
wandb_url=wandb.run.get_url() if is_wandb_available() and wandb.run is not None else None,
|
|
comet_url=get_comet_experiment_url(),
|
|
trainer_name="CPO",
|
|
trainer_citation=citation,
|
|
paper_title="Contrastive Preference Optimization: Pushing the Boundaries of LLM Performance in Machine Translation",
|
|
paper_id="2401.08417",
|
|
)
|
|
model_card.save(os.path.join(self.args.output_dir, "README.md"))
|
|
class UnslothCPOTrainer(_UnslothCPOTrainer):
|
|
"""
|
|
|
|
Initialize CPOTrainer.
|
|
|
|
Args:
|
|
model (`transformers.PreTrainedModel`):
|
|
The model to train, preferably an `AutoModelForSequenceClassification`.
|
|
args (`CPOConfig`):
|
|
The CPO config arguments to use for training.
|
|
data_collator (`transformers.DataCollator`):
|
|
The data collator to use for training. If None is specified, the default data collator (`DPODataCollatorWithPadding`) will be used
|
|
which will pad the sequences to the maximum length of the sequences in the batch, given a dataset of paired sequences.
|
|
train_dataset (`datasets.Dataset`):
|
|
The dataset to use for training.
|
|
eval_dataset (`datasets.Dataset`):
|
|
The dataset to use for evaluation.
|
|
processing_class (`PreTrainedTokenizerBase` or `BaseImageProcessor` or `FeatureExtractionMixin` or `ProcessorMixin`, *optional*):
|
|
Processing class used to process the data. If provided, will be used to automatically process the inputs
|
|
for the model, and it will be saved along the model to make it easier to rerun an interrupted training or
|
|
reuse the fine-tuned model.
|
|
model_init (`Callable[[], transformers.PreTrainedModel]`):
|
|
The model initializer to use for training. If None is specified, the default model initializer will be used.
|
|
callbacks (`list[transformers.TrainerCallback]`):
|
|
The callbacks to use for training.
|
|
optimizers (`tuple[torch.optim.Optimizer, torch.optim.lr_scheduler.LambdaLR]`):
|
|
The optimizer and scheduler to use for training.
|
|
preprocess_logits_for_metrics (`Callable[[torch.Tensor, torch.Tensor], torch.Tensor]`):
|
|
The function to use to preprocess the logits before computing the metrics.
|
|
peft_config (`dict`, defaults to `None`):
|
|
The PEFT configuration to use for training. If you pass a PEFT configuration, the model will be wrapped in a PEFT model.
|
|
compute_metrics (`Callable[[EvalPrediction], dict]`, *optional*):
|
|
The function to use to compute the metrics. Must take a `EvalPrediction` and return
|
|
a dictionary string to metric values.
|
|
|
|
"""
|
|
def __init__(
|
|
self,
|
|
model = None,
|
|
args = None,
|
|
data_collator = None,
|
|
train_dataset = None,
|
|
eval_dataset = None,
|
|
processing_class = None,
|
|
model_init = None,
|
|
callbacks = None,
|
|
preprocess_logits_for_metrics = None,
|
|
peft_config = None,
|
|
compute_metrics = None,
|
|
**kwargs
|
|
):
|
|
if args is None: args = UnslothCPOConfig()
|
|
use_bf16 = getattr(args, 'bf16', False)
|
|
use_fp16 = getattr(args, 'fp16', False)
|
|
force_float32 = False
|
|
if os.environ.get('UNSLOTH_FORCE_FLOAT32', '0') == '1':
|
|
print('Unsloth: Switching to float32 training since model cannot work with float16')
|
|
force_float32 = True
|
|
mixed_precision_dtype = os.environ.get('UNSLOTH_MIXED_PRECISION', 'float32')
|
|
dtype = getattr(model.config, 'torch_dtype', None)
|
|
if dtype is None: dtype = model.get_input_embeddings().dtype
|
|
from unsloth_zoo.utils import _get_dtype
|
|
dtype = _get_dtype(dtype)
|
|
float16 = dtype == torch.float16
|
|
if not force_float32 and (float16 and use_bf16): raise TypeError('Unsloth: Model is in float16 precision but you want to use bfloat16 precision. Set fp16 to `True` and bf16 to `False`')
|
|
if not force_float32 and (not float16 and use_fp16): raise TypeError('Unsloth: Model is in bfloat16 precision but you want to use float16 precision. Set fp16 to `False` and bf16 to `True`')
|
|
if force_float32:
|
|
args.fp16 = False
|
|
args.bf16 = False
|
|
os.environ['ACCELERATE_MIXED_PRECISION'] = 'no'
|
|
elif (not use_bf16 and not use_fp16) and mixed_precision_dtype == 'float32':
|
|
args.fp16 = float16
|
|
args.bf16 = not float16
|
|
os.environ['ACCELERATE_MIXED_PRECISION'] = 'fp16' if float16 else 'bf16'
|
|
if getattr(args, 'eval_dataset', None) is not None and getattr(args, 'eval_strategy', 'no') == 'no':
|
|
args.eval_strategy = 'steps'
|
|
if getattr(args, 'eval_steps', None) is None: args.eval_steps = 0.1
|
|
ga_steps = getattr(args, 'gradient_accumulation_steps', None)
|
|
if ga_steps is not None and ga_steps > 1:
|
|
from transformers import __version__ as transformers_version
|
|
if Version(transformers_version) <= Version('4.45.2'):
|
|
print('**** Unsloth: Please use our fixed gradient_accumulation_steps by updating transformers, TRL and Unsloth!\n'
|
|
'`pip install --upgrade --no-cache-dir --force-reinstall --no-deps unsloth transformers trl unsloth_zoo`')
|
|
if getattr(args, 'eval_strategy', 'no') != 'no':
|
|
eval_bsz = getattr(args, 'per_device_eval_batch_size', 8)
|
|
if eval_bsz == 8 and args.per_device_train_batch_size < eval_bsz: args.per_device_eval_batch_size = args.per_device_train_batch_size
|
|
if getattr(args, 'eval_accumulation_steps', None) is None and ga_steps is not None: args.eval_accumulation_steps = ga_steps
|
|
fp16_full_eval = getattr(args, 'fp16_full_eval', False)
|
|
bf16_full_eval = getattr(args, 'bf16_full_eval', False)
|
|
if args.fp16 and bf16_full_eval: args.bf16_full_eval = False; args.fp16_full_eval = True
|
|
if args.bf16 and fp16_full_eval: args.bf16_full_eval = True; args.fp16_full_eval = False
|
|
if force_float32:
|
|
args.bf16_full_eval = False
|
|
args.fp16_full_eval = False
|
|
elif os.environ.get('UNSLOTH_MIXED_PRECISION', 'float32') == 'bfloat16':
|
|
args.bf16_full_eval = True
|
|
args.fp16_full_eval = False
|
|
elif not bf16_full_eval and not fp16_full_eval:
|
|
args.bf16_full_eval = args.bf16
|
|
args.fp16_full_eval = args.fp16
|
|
_output_logits = False
|
|
if locals().get('compute_metrics', None) is not None: _output_logits = True
|
|
if locals().get('preprocess_logits_for_metrics', None) is not None: _output_logits = True
|
|
if _output_logits:
|
|
os.environ['UNSLOTH_RETURN_LOGITS'] = '1'
|
|
if 'max_seq_length' not in locals() and not hasattr(args, 'max_seq_length'):
|
|
pass
|
|
else:
|
|
model_max_seq_length = getattr(model, 'max_seq_length', None)
|
|
args_max_seq_length = getattr(args, 'max_seq_length', None)
|
|
if args_max_seq_length is None and model_max_seq_length is not None:
|
|
max_seq_length = model.max_seq_length
|
|
if hasattr(args, 'max_seq_length'): args.max_seq_length = max_seq_length
|
|
if model is not None and hasattr(model, 'for_training'):
|
|
model.for_training()
|
|
if 'tokenizer' in locals() and hasattr(tokenizer, 'padding_side'): tokenizer.padding_side = 'right'
|
|
if 'processing_class' in locals():
|
|
if hasattr(processing_class, 'padding_side'): processing_class.padding_side = 'right'
|
|
if hasattr(processing_class, 'tokenizer') and hasattr(processing_class.tokenizer, 'padding_side'): processing_class.tokenizer.padding_side = 'right'
|
|
__tokenizer = processing_class if 'processing_class' in locals() else tokenizer
|
|
from unsloth_zoo.vision_utils import UnslothVisionDataCollator
|
|
if not isinstance(data_collator, UnslothVisionDataCollator):
|
|
if isinstance(data_collator, DataCollatorForSeq2Seq) and 'labels' not in train_dataset.column_names:
|
|
data_collator = TransformersDataCollatorForLanguageModeling(__tokenizer, mlm = False, mlm_probability = 0.0)
|
|
elif isinstance(data_collator, TransformersDataCollatorForLanguageModeling) and 'labels' in train_dataset.column_names:
|
|
data_collator = DataCollatorForSeq2Seq(__tokenizer)
|
|
else:
|
|
if hasattr(args, 'remove_unused_columns'): args.remove_unused_columns = False
|
|
if hasattr(args, 'dataset_text_field'): args.dataset_text_field = ''
|
|
if hasattr(args, 'dataset_kwargs'): args.dataset_kwargs = {'skip_prepare_dataset': True}
|
|
if not isinstance(data_collator, UnslothVisionDataCollator):
|
|
if not hasattr(__tokenizer, 'pad') and hasattr(__tokenizer, 'tokenizer'):
|
|
if isinstance(data_collator, DataCollatorForSeq2Seq):
|
|
data_collator = DataCollatorForSeq2Seq(__tokenizer.tokenizer)
|
|
else:
|
|
data_collator = TransformersDataCollatorForLanguageModeling(__tokenizer.tokenizer, mlm = False, mlm_probability = 0.0)
|
|
other_metrics = []
|
|
|
|
from unsloth_zoo.logging_utils import PatchRLStatistics
|
|
PatchRLStatistics('cpo_trainer', other_metrics)
|
|
|
|
super().__init__(
|
|
model = model,
|
|
args = args,
|
|
data_collator = data_collator,
|
|
train_dataset = train_dataset,
|
|
eval_dataset = eval_dataset,
|
|
processing_class = processing_class,
|
|
model_init = model_init,
|
|
callbacks = callbacks,
|
|
preprocess_logits_for_metrics = preprocess_logits_for_metrics,
|
|
peft_config = peft_config,
|
|
compute_metrics = compute_metrics,**kwargs)
|
|
if hasattr(self, 'neftune_hook_handle'):
|
|
self.neftune_hook_handle.remove()
|
|
if hasattr(self, 'neftune_hook_handle'): del self.neftune_hook_handle
|
|
if getattr(args, 'neftune_noise_alpha', None) is not None:
|
|
model.get_input_embeddings().neftune_noise_alpha = self.neftune_noise_alpha
|
|
pass
|
|
|
|
pass
|