Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

FSDP的训练有两种错误,不确定是什么原因,代码基本上是例子中的代码 #657

Open
apachemycat opened this issue May 7, 2024 · 0 comments

Comments

@apachemycat
Copy link

模型是meta-Llama-3-8B
代码用例子中增加了FSDP的参数:
model_wrapper_cfg=dict(type='MMFullyShardedDataParallel', cpu_offload=True,use_orig_params=True)

指定 FSDPStrategy 并配置参数

size_based_auto_wrap_policy = partial(
size_based_auto_wrap_policy, min_num_params=1e5)
strategy = dict(
type='FSDPStrategy',
model_wrapper=dict(auto_wrap_policy=size_based_auto_wrap_policy))

如果模型有quantization_config部分参数代码,则会报错
Must flatten tensors with uniform dtype but got torch.float32 and torch.bfloat16

model = dict(
type=AutoModelForCausalLM.from_pretrained,
pretrained_model_name_or_path=pretrained_model_name_or_path,
trust_remote_code=True,
torch_dtype=torch.float16,
quantization_config=dict(
type=BitsAndBytesConfig,
load_in_4bit=True,
load_in_8bit=False,
llm_int8_threshold=6.0,
llm_int8_has_fp16_weight=False,
bnb_4bit_compute_dtype=torch.float16,
bnb_4bit_use_double_quant=True,
bnb_4bit_quant_type='nf4'))
如果去掉 quantization_config部分参数,
model = dict(
type=SupervisedFinetune,
use_varlen_attn=use_varlen_attn,
llm=dict(
type=AutoModelForCausalLM.from_pretrained,
pretrained_model_name_or_path=pretrained_model_name_or_path,
trust_remote_code=True,
torch_dtype=torch.float16
),
就会报错
05/07 11:02:21 - mmengine - DEBUG - An from_pretrained instance is built from registry, and its implementation can be found in transformers.models.auto.auto_factory
05/07 11:02:21 - mmengine - DEBUG - An LoraConfig instance is built from registry, and its implementation can be found in peft.tuners.lora.config
05/07 11:02:31 - mmengine - DEBUG - An SupervisedFinetune instance is built from registry, and its implementation can be found in xtuner.model.sft
[rank0]: Traceback (most recent call last):
[rank0]: File "/usr/local/lib/python3.10/dist-packages/xtuner/tools/train.py", line 360, in
[rank0]: main()
[rank0]: File "/usr/local/lib/python3.10/dist-packages/xtuner/tools/train.py", line 349, in main
[rank0]: runner = Runner.from_cfg(cfg)
[rank0]: File "/usr/local/lib/python3.10/dist-packages/mmengine/runner/runner.py", line 462, in from_cfg
[rank0]: runner = cls(
[rank0]: File "/usr/local/lib/python3.10/dist-packages/mmengine/runner/runner.py", line 431, in init
[rank0]: self.model = self.wrap_model(
[rank0]: File "/usr/local/lib/python3.10/dist-packages/mmengine/runner/runner.py", line 874, in wrap_model
[rank0]: model = model.to(get_device())
[rank0]: File "/usr/local/lib/python3.10/dist-packages/mmengine/model/base_model/base_model.py", line 208, in to
[rank0]: return super().to(*args, **kwargs)
[rank0]: File "/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py", line 1173, in to
[rank0]: return self._apply(convert)
[rank0]: File "/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py", line 779, in _apply
[rank0]: module._apply(fn)
[rank0]: File "/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py", line 779, in _apply
[rank0]: module._apply(fn)
[rank0]: File "/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py", line 779, in _apply
[rank0]: module._apply(fn)
[rank0]: [Previous line repeated 6 more times]
[rank0]: File "/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py", line 804, in _apply
[rank0]: param_applied = fn(param)
[rank0]: File "/usr/local/lib/python3.10/dist-packages/torch/nn/modules/module.py", line 1159, in convert
[rank0]: return t.to(
[rank0]: RuntimeError: NVML_SUCCESS == r INTERNAL ASSERT FAILED at "../c10/cuda/CUDACachingAllocator.cpp":844, please report a bug to PyTorch.

完整代码如下:

Copyright (c) OpenMMLab. All rights reserved.

import torch
from datasets import load_dataset
from mmengine.dataset import DefaultSampler
from mmengine.hooks import (CheckpointHook, DistSamplerSeedHook, IterTimerHook,
LoggerHook, ParamSchedulerHook)
from mmengine.optim import AmpOptimWrapper, CosineAnnealingLR, LinearLR
from peft import LoraConfig
from torch.optim import AdamW
from transformers import TrainingArguments
from transformers import (AutoModelForCausalLM, AutoTokenizer,
BitsAndBytesConfig)

from xtuner.dataset import process_hf_dataset
from xtuner.dataset.collate_fns import default_collate_fn
from xtuner.dataset.map_fns import alpaca_map_fn, template_map_fn_factory
from xtuner.engine.hooks import (DatasetInfoHook, EvaluateChatHook,
VarlenAttnArgsToMessageHubHook)
from xtuner.engine.runner import TrainLoop
from xtuner.model import SupervisedFinetune
from xtuner.parallel.sequence import SequenceParallelSampler
from xtuner.utils import PROMPT_TEMPLATE, SYSTEM_TEMPLATE
from torch.distributed.fsdp.wrap import size_based_auto_wrap_policy
from functools import partial
#######################################################################

PART 1 Settings

#######################################################################
model_wrapper_cfg=dict(type='MMFullyShardedDataParallel', cpu_offload=True,use_orig_params=True)

指定 FSDPStrategy 并配置参数

size_based_auto_wrap_policy = partial(
size_based_auto_wrap_policy, min_num_params=1e5)
strategy = dict(
type='FSDPStrategy',
model_wrapper=dict(auto_wrap_policy=size_based_auto_wrap_policy))

#pretrained_model_name_or_path ='/models/meta-Llama-3-8B'
pretrained_model_name_or_path ='/models/nternlm2-1_8b'

use_varlen_attn = False

Data

data_files = ['/models/instruct-finetrain.json']

Data

#alpaca_en_path = 'tatsu-lab/alpaca'
prompt_template = PROMPT_TEMPLATE.default
max_length = 512
pack_to_max_length = True

parallel

sequence_parallel_size = 1

Scheduler & Optimizer

batch_size = 8 # per_device
accumulative_counts = 4
accumulative_counts *= sequence_parallel_size
dataloader_num_workers = 0
max_epochs = 100
optim_type = AdamW
lr = 1e-5
betas = (0.9, 0.999)
weight_decay = 0
max_norm = 1 # grad clip
warmup_ratio = 0.01

Save

save_steps = 20
save_total_limit = 2 # Maximum checkpoints to keep (-1 means unlimited)

Evaluate the generation performance during the training

evaluation_freq = 20
SYSTEM = SYSTEM_TEMPLATE.alpaca
evaluation_inputs = [
'请给我介绍五个上海的景点', 'Please tell me five scenic spots in Shanghai'
]

#######################################################################

PART 2 Model & Tokenizer

#######################################################################
tokenizer = dict(
type=AutoTokenizer.from_pretrained,
pretrained_model_name_or_path=pretrained_model_name_or_path,
trust_remote_code=True,
padding_side='right')

model = dict(
type=SupervisedFinetune,
use_varlen_attn=use_varlen_attn,
llm=dict(
type=AutoModelForCausalLM.from_pretrained,
pretrained_model_name_or_path=pretrained_model_name_or_path,
trust_remote_code=True,
torch_dtype=torch.float16
),
lora=dict(
type=LoraConfig,
r=64,
lora_alpha=16,
lora_dropout=0.1,
bias='none',
task_type='CAUSAL_LM'))

#######################################################################

PART 3 Dataset & Dataloader

#######################################################################
alpaca_en = dict(
type=process_hf_dataset,
dataset=dict(type=load_dataset, path='json',data_files=data_files),
tokenizer=tokenizer,
max_length=max_length,
dataset_map_fn=alpaca_map_fn,
template_map_fn=dict(
type=template_map_fn_factory, template=prompt_template),
remove_unused_columns=True,
shuffle_before_pack=True,
pack_to_max_length=pack_to_max_length,
use_varlen_attn=use_varlen_attn)

sampler = SequenceParallelSampler
if sequence_parallel_size > 1 else DefaultSampler
train_dataloader = dict(
batch_size=batch_size,
num_workers=dataloader_num_workers,
dataset=alpaca_en,
sampler=dict(type=sampler, shuffle=True),
collate_fn=dict(type=default_collate_fn, use_varlen_attn=use_varlen_attn))

#######################################################################

PART 4 Scheduler & Optimizer

#######################################################################

optimizer

optim_wrapper = dict(
type=AmpOptimWrapper,
optimizer=dict(
type=optim_type, lr=lr, betas=betas, weight_decay=weight_decay),
clip_grad=dict(max_norm=max_norm, error_if_nonfinite=False),
accumulative_counts=accumulative_counts,
loss_scale='dynamic',
dtype='float16')

learning policy

More information: https://github.com/open-mmlab/mmengine/blob/main/docs/en/tutorials/param_scheduler.md # noqa: E501

auto_scale_lr=dict(base_batch_size=4, enable=True)
param_scheduler = [
dict(
type=LinearLR,
start_factor=1e-5,
by_epoch=True,
begin=0,
end=10,
convert_to_iter_based=True),
dict(
type=CosineAnnealingLR,
eta_min=0.0,
by_epoch=True,
convert_to_iter_based=True)
]

train, val, test setting

train_cfg = dict(type=TrainLoop, max_epochs=max_epochs)

#######################################################################

PART 5 Runtime

#######################################################################

Log the dialogue periodically during the training process, optional

#custom_hooks = [

dict(type=DatasetInfoHook, tokenizer=tokenizer),

dict(

type=EvaluateChatHook,

tokenizer=tokenizer,

every_n_iters=evaluation_freq,

evaluation_inputs=evaluation_inputs,

system=SYSTEM,

prompt_template=prompt_template)

#]

if use_varlen_attn:
custom_hooks += [dict(type=VarlenAttnArgsToMessageHubHook)]

configure default hooks

default_hooks = dict(
# record the time of every iteration.
timer=dict(type=IterTimerHook),
# print log every 10 iterations.
logger=dict(type=LoggerHook, log_metric_by_epoch=False, interval=10),
# enable the parameter scheduler.
param_scheduler=dict(type=ParamSchedulerHook),
# save checkpoint per save_steps.
checkpoint=dict(
type=CheckpointHook,
by_epoch=False,
interval=save_steps,
max_keep_ckpts=save_total_limit),
# set sampler seed in distributed evrionment.
sampler_seed=dict(type=DistSamplerSeedHook),
)

configure environment

env_cfg = dict(
# whether to enable cudnn benchmark
cudnn_benchmark=False,
# set multi process parameters
mp_cfg=dict(mp_start_method='fork', opencv_num_threads=0),
# set distributed parameters
dist_cfg=dict(backend='nccl'),
)

set visualizer

visualizer = dict(type='Visualizer', vis_backends=[dict(type='TensorboardVisBackend')])

set log level

log_level = 'DEBUG'

load from which checkpoint

load_from = None

whether to resume training from the loaded checkpoint

resume = False

Defaults to use random seed and disable deterministic

randomness = dict(seed=None, deterministic=False)

set log processor

log_processor = dict(by_epoch=False)

——————————————————————————
环境如下:

TorchVision: 0.18.0+cu121
OpenCV: 4.9.0
MMEngine: 0.10.4

Runtime environment:
cudnn_benchmark: False
mp_cfg: {'mp_start_method': 'fork', 'opencv_num_threads': 0}
dist_cfg: {'backend': 'nccl'}
seed: 1929161509
deterministic: False
Distributed launcher: pytorch
Distributed training: True
GPU number: 2

Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment
Labels
None yet
Projects
None yet
Development

No branches or pull requests

1 participant