-
Notifications
You must be signed in to change notification settings - Fork 329
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
- Loading branch information
Showing
20 changed files
with
2,566 additions
and
4 deletions.
There are no files selected for viewing
54 changes: 54 additions & 0 deletions
54
xtuner/configs/internlm/internlm2_chat_1_8b/hybrid/function_call.json
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,54 @@ | ||
[ | ||
{ | ||
"messages": [ | ||
{ | ||
"role": "user", | ||
"content": "I want to know today's weather in Shanghai" | ||
}, | ||
|
||
{ | ||
"role": "assistant", | ||
"content": "Sure, I will search for the weather of Shanghai.", | ||
"function_call": { | ||
"name": "get_current_weather", | ||
"parameters": { | ||
"location": "Shanghai" | ||
} | ||
} | ||
}, | ||
|
||
{ | ||
"role": "function", | ||
"name": "get_current_weather", | ||
"content": "{'temperature': 22}" | ||
}, | ||
{ | ||
"role": "assistant", | ||
"content": "The weather in Shanghai is 22 celsius" | ||
} | ||
|
||
|
||
], | ||
|
||
"functions": [ | ||
{ | ||
"name": "get_current_weather", | ||
"description": "Get the current weather in a given location", | ||
"parameters": { | ||
"type": "object", | ||
"properties": { | ||
"location": { | ||
"type": "string", | ||
"description": "The city and state, e.g. San Francisco, CA", | ||
"unit": {"type": "string"} | ||
}, | ||
"required": ["location"] | ||
} | ||
} | ||
} | ||
] | ||
} | ||
|
||
] | ||
|
||
|
204 changes: 204 additions & 0 deletions
204
xtuner/configs/internlm/internlm2_chat_1_8b/hybrid/internlm2_chat_1_8b_function_call.py
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,204 @@ | ||
# Copyright (c) OpenMMLab. All rights reserved. | ||
import torch | ||
from mmengine.dataset import DefaultSampler | ||
from mmengine.hooks import (CheckpointHook, DistSamplerSeedHook, IterTimerHook, | ||
LoggerHook, ParamSchedulerHook) | ||
from mmengine.optim import AmpOptimWrapper, CosineAnnealingLR, LinearLR | ||
|
||
from torch.optim import AdamW | ||
from transformers import AutoModelForCausalLM, AutoTokenizer | ||
|
||
|
||
from xtuner.dataset.hybrid import HybridDataset, hybrid_collate_fn | ||
from xtuner.dataset.hybrid.mappings import openai_to_raw_training | ||
from xtuner.engine.hooks import DatasetInfoHook | ||
from xtuner.engine.runner import TrainLoop | ||
from xtuner.model import HybridFinetune | ||
from xtuner.types import HybridChatTemplate | ||
|
||
####################################################################### | ||
# PART 1 Settings # | ||
####################################################################### | ||
# Model | ||
llm_name_or_path = '/mnt/petrelfs/share_data/linzhihao/model/models--internlm--internlm2-chat-7b/snapshots/2292b86b21cb856642782cebed0a453997453b1f/' | ||
visual_encoder_name_or_path = 'openai/clip-vit-large-patch14-336' | ||
# Specify the pretrained pth | ||
pretrained_pth = None | ||
# Data | ||
data_dir = './' | ||
data_files = ['function_call.json'] | ||
max_length = 2048 | ||
|
||
# Chat Template | ||
chat_template = dict( | ||
type=HybridChatTemplate, | ||
system='<|im_start|>system\n{system}<|im_end|>\n', | ||
user='<|im_start|>user\n{user}<|im_end|>\n<|im_start|>assistant\n', | ||
assistant='{assistant}<|im_end|>\n', | ||
stop_words=['<|im_end|>'], | ||
image_token='<image>', | ||
function_call= | ||
'{assistant}<|action_start|><|plugin|>\n{function_call}<|action_end|><|im_end|>\n', # noqa: E501, E251 | ||
function_result= | ||
'<|im_start|>environment name=<|plugin|>\n{function_result}<|im_end|>\n<|im_start|>assistant\n', # noqa: E501, E251 | ||
functions='<|im_start|>system name=<|plugin|>\n{functions}<|im_end|>\n') | ||
|
||
# Scheduler & Optimizer | ||
batch_size = 1 # per_device | ||
accumulative_counts = 1 | ||
dataloader_num_workers = 0 | ||
max_epochs = 1 | ||
optim_type = AdamW | ||
lr = 2e-4 | ||
betas = (0.9, 0.999) | ||
weight_decay = 0 | ||
max_norm = 1 # grad clip | ||
warmup_ratio = 0.03 | ||
|
||
# Save | ||
save_steps = 500 | ||
save_total_limit = 2 # Maximum checkpoints to keep (-1 means unlimited) | ||
|
||
# Evaluate the generation performance during the training | ||
evaluation_freq = 500 | ||
SYSTEM = '' | ||
evaluation_images = 'https://llava-vl.github.io/static/images/view.jpg' | ||
evaluation_inputs = ['请描述一下这张照片', 'Please describe this picture'] | ||
|
||
####################################################################### | ||
# PART 2 Model & Tokenizer & Image Processor # | ||
####################################################################### | ||
tokenizer = dict( | ||
type=AutoTokenizer.from_pretrained, | ||
pretrained_model_name_or_path=llm_name_or_path, | ||
trust_remote_code=True, | ||
padding_side='right') | ||
|
||
|
||
model = dict( | ||
type=HybridFinetune, | ||
llm=dict( | ||
type=AutoModelForCausalLM.from_pretrained, | ||
pretrained_model_name_or_path=llm_name_or_path, | ||
trust_remote_code=True, | ||
torch_dtype=torch.float16)) | ||
|
||
####################################################################### | ||
# PART 3 Dataset & Dataloader # | ||
####################################################################### | ||
llava_dataset = dict( | ||
type=HybridDataset, | ||
data_dir=data_dir, | ||
data_files=data_files, | ||
sample_ratio=1, | ||
tokenizer=tokenizer, | ||
chat_template=chat_template, | ||
max_length=max_length, | ||
pack_to_max_length=True, | ||
num_workers = dataloader_num_workers, | ||
mappings=[openai_to_raw_training]) | ||
|
||
train_dataloader = dict( | ||
batch_size=batch_size, | ||
num_workers=dataloader_num_workers, | ||
dataset=llava_dataset, | ||
sampler=dict(type=DefaultSampler, shuffle=True), | ||
collate_fn=dict(type=hybrid_collate_fn)) | ||
|
||
####################################################################### | ||
# PART 4 Scheduler & Optimizer # | ||
####################################################################### | ||
# optimizer | ||
optim_wrapper = dict( | ||
type=AmpOptimWrapper, | ||
optimizer=dict( | ||
type=optim_type, lr=lr, betas=betas, weight_decay=weight_decay), | ||
clip_grad=dict(max_norm=max_norm, error_if_nonfinite=False), | ||
accumulative_counts=accumulative_counts, | ||
loss_scale='dynamic', | ||
dtype='float16') | ||
|
||
# learning policy | ||
# More information: https://github.com/open-mmlab/mmengine/blob/main/docs/en/tutorials/param_scheduler.md # noqa: E501 | ||
param_scheduler = [ | ||
dict( | ||
type=LinearLR, | ||
start_factor=1e-5, | ||
by_epoch=True, | ||
begin=0, | ||
end=warmup_ratio * max_epochs, | ||
convert_to_iter_based=True), | ||
dict( | ||
type=CosineAnnealingLR, | ||
eta_min=0.0, | ||
by_epoch=True, | ||
begin=warmup_ratio * max_epochs, | ||
end=max_epochs, | ||
convert_to_iter_based=True) | ||
] | ||
|
||
# train, val, test setting | ||
train_cfg = dict(type=TrainLoop, max_epochs=max_epochs) | ||
|
||
####################################################################### | ||
# PART 5 Runtime # | ||
####################################################################### | ||
# Log the dialogue periodically during the training process, optional | ||
custom_hooks = [ | ||
dict(type=DatasetInfoHook, tokenizer=tokenizer), | ||
# dict( | ||
# type=EvaluateChatHook, | ||
# tokenizer=tokenizer, | ||
# image_processor=image_processor, | ||
# every_n_iters=evaluation_freq, | ||
# evaluation_inputs=evaluation_inputs, | ||
# evaluation_images=evaluation_images, | ||
# system=SYSTEM, | ||
# prompt_template=prompt_template) | ||
] | ||
|
||
# configure default hooks | ||
default_hooks = dict( | ||
# record the time of every iteration. | ||
timer=dict(type=IterTimerHook), | ||
# print log every 10 iterations. | ||
logger=dict(type=LoggerHook, log_metric_by_epoch=False, interval=10), | ||
# enable the parameter scheduler. | ||
param_scheduler=dict(type=ParamSchedulerHook), | ||
# save checkpoint per `save_steps`. | ||
checkpoint=dict( | ||
type=CheckpointHook, | ||
by_epoch=False, | ||
interval=save_steps, | ||
max_keep_ckpts=save_total_limit), | ||
# set sampler seed in distributed evrionment. | ||
sampler_seed=dict(type=DistSamplerSeedHook), | ||
) | ||
|
||
# configure environment | ||
env_cfg = dict( | ||
# whether to enable cudnn benchmark | ||
cudnn_benchmark=False, | ||
# set multi process parameters | ||
mp_cfg=dict(mp_start_method='fork', opencv_num_threads=0), | ||
# set distributed parameters | ||
dist_cfg=dict(backend='nccl'), | ||
) | ||
|
||
# set visualizer | ||
visualizer = None | ||
|
||
# set log level | ||
log_level = 'INFO' | ||
|
||
# load from which checkpoint | ||
load_from = None | ||
|
||
# whether to resume training from the loaded checkpoint | ||
resume = False | ||
|
||
# Defaults to use random seed and disable `deterministic` | ||
randomness = dict(seed=None, deterministic=False) | ||
|
||
# set log processor | ||
log_processor = dict(by_epoch=False) |
Oops, something went wrong.