LZHgrla commited on
Commit
8a43808
1 Parent(s): dae3e8b

upload adapter

Browse files
Files changed (4) hide show
  1. README.md +45 -0
  2. adapter_config.json +26 -0
  3. adapter_model.bin +3 -0
  4. xtuner_config.py +182 -0
README.md ADDED
@@ -0,0 +1,45 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ library_name: peft
3
+ datasets:
4
+ - burkelibbey/colors
5
+ pipeline_tag: conversational
6
+ base_model: internlm/internlm-7b
7
+ ---
8
+
9
+ <div align="center">
10
+ <img src="https://github.com/InternLM/lmdeploy/assets/36994684/0cf8d00f-e86b-40ba-9b54-dc8f1bc6c8d8" width="600"/>
11
+
12
+
13
+ [![Generic badge](https://img.shields.io/badge/GitHub-%20XTuner-black.svg)](https://github.com/InternLM/xtuner)
14
+
15
+
16
+ </div>
17
+
18
+ ## Model
19
+
20
+ internlm-7b-qlora-colorist is fine-tuned from [InternLM-7B](https://huggingface.co/internlm/internlm-7b) with [colors](https://huggingface.co/datasets/burkelibbey/colors) dataset by [XTuner](https://github.com/InternLM/xtuner).
21
+
22
+
23
+ ## Quickstart
24
+
25
+ ### Usage with XTuner CLI
26
+
27
+ #### Installation
28
+
29
+ ```shell
30
+ pip install xtuner
31
+ ```
32
+
33
+ #### Chat
34
+
35
+ ```shell
36
+ xtuner chat internlm/internlm-7b --adapter xtuner/internlm-7b-qlora-colorist --prompt-template internlm_chat --system-prompt colorist
37
+ ```
38
+
39
+ #### Fine-tune
40
+
41
+ Use the following command to quickly reproduce the fine-tuning results.
42
+
43
+ ```shell
44
+ xtuner train internlm_7b_qlora_colorist_e5
45
+ ```
adapter_config.json ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "auto_mapping": null,
3
+ "base_model_name_or_path": "internlm/internlm-7b",
4
+ "bias": "none",
5
+ "fan_in_fan_out": false,
6
+ "inference_mode": true,
7
+ "init_lora_weights": true,
8
+ "layers_pattern": null,
9
+ "layers_to_transform": null,
10
+ "lora_alpha": 16,
11
+ "lora_dropout": 0.1,
12
+ "modules_to_save": null,
13
+ "peft_type": "LORA",
14
+ "r": 64,
15
+ "revision": null,
16
+ "target_modules": [
17
+ "o_proj",
18
+ "k_proj",
19
+ "down_proj",
20
+ "gate_proj",
21
+ "up_proj",
22
+ "v_proj",
23
+ "q_proj"
24
+ ],
25
+ "task_type": "CAUSAL_LM"
26
+ }
adapter_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:71ab00fda8ecda853eea28673a77cf24520f22fdcb9f0c1b2d537a716e69c9ec
3
+ size 319977229
xtuner_config.py ADDED
@@ -0,0 +1,182 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) OpenMMLab. All rights reserved.
2
+ import torch
3
+ from bitsandbytes.optim import PagedAdamW32bit
4
+ from datasets import load_dataset
5
+ from mmengine.dataset import DefaultSampler
6
+ from mmengine.hooks import (CheckpointHook, DistSamplerSeedHook, IterTimerHook,
7
+ LoggerHook, ParamSchedulerHook)
8
+ from mmengine.optim import AmpOptimWrapper, CosineAnnealingLR
9
+ from peft import LoraConfig
10
+ from transformers import (AutoModelForCausalLM, AutoTokenizer,
11
+ BitsAndBytesConfig)
12
+
13
+ from xtuner.dataset import process_hf_dataset
14
+ from xtuner.dataset.collate_fns import default_collate_fn
15
+ from xtuner.dataset.map_fns import colors_map_fn, template_map_fn_factory
16
+ from xtuner.engine import DatasetInfoHook, EvaluateChatHook
17
+ from xtuner.model import SupervisedFinetune
18
+ from xtuner.utils import PROMPT_TEMPLATE, SYSTEM_TEMPLATE
19
+
20
+ #######################################################################
21
+ # PART 1 Settings #
22
+ #######################################################################
23
+ # Model
24
+ pretrained_model_name_or_path = 'internlm/internlm-7b'
25
+
26
+ # Data
27
+ data_path = 'burkelibbey/colors'
28
+ prompt_template = PROMPT_TEMPLATE.internlm_chat
29
+ max_length = 2048
30
+ pack_to_max_length = True
31
+
32
+ # Scheduler & Optimizer
33
+ batch_size = 1 # per_device
34
+ accumulative_counts = 16
35
+ dataloader_num_workers = 0
36
+ max_epochs = 5
37
+ optim_type = PagedAdamW32bit
38
+ lr = 2e-4
39
+ betas = (0.9, 0.999)
40
+ weight_decay = 0
41
+ max_norm = 1 # grad clip
42
+
43
+ # Evaluate the generation performance during the training
44
+ evaluation_freq = 200
45
+ SYSTEM = SYSTEM_TEMPLATE.colorist
46
+ evaluation_inputs = [
47
+ '请给我一个像天空一样清澈透明的蓝色。', 'Please give me a clear blue like the sky.'
48
+ ]
49
+
50
+ #######################################################################
51
+ # PART 2 Model & Tokenizer #
52
+ #######################################################################
53
+ tokenizer = dict(
54
+ type=AutoTokenizer.from_pretrained,
55
+ pretrained_model_name_or_path=pretrained_model_name_or_path,
56
+ trust_remote_code=True,
57
+ padding_side='right')
58
+
59
+ model = dict(
60
+ type=SupervisedFinetune,
61
+ llm=dict(
62
+ type=AutoModelForCausalLM.from_pretrained,
63
+ pretrained_model_name_or_path=pretrained_model_name_or_path,
64
+ trust_remote_code=True,
65
+ torch_dtype=torch.float16,
66
+ quantization_config=dict(
67
+ type=BitsAndBytesConfig,
68
+ load_in_4bit=True,
69
+ load_in_8bit=False,
70
+ llm_int8_threshold=6.0,
71
+ llm_int8_has_fp16_weight=False,
72
+ bnb_4bit_compute_dtype=torch.float16,
73
+ bnb_4bit_use_double_quant=True,
74
+ bnb_4bit_quant_type='nf4')),
75
+ lora=dict(
76
+ type=LoraConfig,
77
+ r=64,
78
+ lora_alpha=16,
79
+ lora_dropout=0.1,
80
+ bias='none',
81
+ task_type='CAUSAL_LM'))
82
+
83
+ #######################################################################
84
+ # PART 3 Dataset & Dataloader #
85
+ #######################################################################
86
+ train_dataset = dict(
87
+ type=process_hf_dataset,
88
+ dataset=dict(type=load_dataset, path=data_path),
89
+ tokenizer=tokenizer,
90
+ max_length=max_length,
91
+ dataset_map_fn=colors_map_fn,
92
+ template_map_fn=dict(
93
+ type=template_map_fn_factory, template=prompt_template),
94
+ remove_unused_columns=True,
95
+ shuffle_before_pack=True,
96
+ pack_to_max_length=pack_to_max_length)
97
+
98
+ train_dataloader = dict(
99
+ batch_size=batch_size,
100
+ num_workers=dataloader_num_workers,
101
+ dataset=train_dataset,
102
+ sampler=dict(type=DefaultSampler, shuffle=True),
103
+ collate_fn=dict(type=default_collate_fn))
104
+
105
+ #######################################################################
106
+ # PART 4 Scheduler & Optimizer #
107
+ #######################################################################
108
+ # optimizer
109
+ optim_wrapper = dict(
110
+ type=AmpOptimWrapper,
111
+ optimizer=dict(
112
+ type=optim_type, lr=lr, betas=betas, weight_decay=weight_decay),
113
+ clip_grad=dict(max_norm=max_norm, error_if_nonfinite=False),
114
+ accumulative_counts=accumulative_counts,
115
+ loss_scale='dynamic',
116
+ dtype='float16')
117
+
118
+ # learning policy
119
+ # More information: https://github.com/open-mmlab/mmengine/blob/main/docs/en/tutorials/param_scheduler.md # noqa: E501
120
+ param_scheduler = dict(
121
+ type=CosineAnnealingLR,
122
+ eta_min=lr * 0.1,
123
+ by_epoch=True,
124
+ T_max=max_epochs,
125
+ convert_to_iter_based=True)
126
+
127
+ # train, val, test setting
128
+ train_cfg = dict(by_epoch=True, max_epochs=max_epochs, val_interval=1)
129
+
130
+ #######################################################################
131
+ # PART 5 Runtime #
132
+ #######################################################################
133
+ # Log the dialogue periodically during the training process, optional
134
+ custom_hooks = [
135
+ dict(type=DatasetInfoHook, tokenizer=tokenizer),
136
+ dict(
137
+ type=EvaluateChatHook,
138
+ tokenizer=tokenizer,
139
+ every_n_iters=evaluation_freq,
140
+ evaluation_inputs=evaluation_inputs,
141
+ system=SYSTEM,
142
+ prompt_template=prompt_template)
143
+ ]
144
+
145
+ # configure default hooks
146
+ default_hooks = dict(
147
+ # record the time of every iteration.
148
+ timer=dict(type=IterTimerHook),
149
+ # print log every 100 iterations.
150
+ logger=dict(type=LoggerHook, interval=10),
151
+ # enable the parameter scheduler.
152
+ param_scheduler=dict(type=ParamSchedulerHook),
153
+ # save checkpoint per epoch.
154
+ checkpoint=dict(type=CheckpointHook, interval=1),
155
+ # set sampler seed in distributed evrionment.
156
+ sampler_seed=dict(type=DistSamplerSeedHook),
157
+ )
158
+
159
+ # configure environment
160
+ env_cfg = dict(
161
+ # whether to enable cudnn benchmark
162
+ cudnn_benchmark=False,
163
+ # set multi process parameters
164
+ mp_cfg=dict(mp_start_method='fork', opencv_num_threads=0),
165
+ # set distributed parameters
166
+ dist_cfg=dict(backend='nccl'),
167
+ )
168
+
169
+ # set visualizer
170
+ visualizer = None
171
+
172
+ # set log level
173
+ log_level = 'INFO'
174
+
175
+ # load from which checkpoint
176
+ load_from = None
177
+
178
+ # whether to resume training from the loaded checkpoint
179
+ resume = False
180
+
181
+ # Defaults to use random seed and disable `deterministic`
182
+ randomness = dict(seed=None, deterministic=False)