winglian commited on
Commit
7748f3d
1 Parent(s): fe9c29d

fix adam bnb optimizer grouped parameters, fix peft model 8bit conversion logic, black formatting

Browse files
src/axolotl/utils/models.py CHANGED
@@ -158,8 +158,8 @@ def load_model(
158
  for k, v in cfg.tokens.items():
159
  tokenizer.add_special_tokens({k: v})
160
 
161
- if load_in_8bit and cfg.load_4bit:
162
- logging.info("converting model w/ prepare_model_for_int8_training")
163
  model = prepare_model_for_int8_training(model)
164
 
165
  model, lora_config = load_adapter(model, cfg, adapter)
 
158
  for k, v in cfg.tokens.items():
159
  tokenizer.add_special_tokens({k: v})
160
 
161
+ if cfg.adapter and load_in_8bit and not cfg.load_4bit:
162
+ logging.info("converting PEFT model w/ prepare_model_for_int8_training")
163
  model = prepare_model_for_int8_training(model)
164
 
165
  model, lora_config = load_adapter(model, cfg, adapter)
src/axolotl/utils/trainer.py CHANGED
@@ -17,9 +17,21 @@ def setup_trainer(cfg, train_dataset, eval_dataset, model, tokenizer):
17
  total_num_steps = int(
18
  math.ceil(len(train_dataset) * cfg.num_epochs / cfg.batch_size)
19
  )
20
- warmup_steps = cfg.warmup_steps if cfg.warmup_steps is not None else min(int(0.03 * total_num_steps), 100)
21
- logging_steps = cfg.logging_steps if cfg.logging_steps is not None else max(min(int(0.005 * total_num_steps), 10), 1)
22
- save_steps = eval_steps = cfg.save_steps if cfg.save_steps is not None else min(int(0.05 * total_num_steps), 200)
 
 
 
 
 
 
 
 
 
 
 
 
23
 
24
  training_arguments_kwargs = {}
25
  if cfg.bf16 == "full":
@@ -31,19 +43,32 @@ def setup_trainer(cfg, train_dataset, eval_dataset, model, tokenizer):
31
  training_arguments_kwargs["logging_steps"] = logging_steps
32
  if cfg.gradient_checkpointing is not None:
33
  if cfg.load_4bit:
34
- from alpaca_lora_4bit.gradient_checkpointing import apply_gradient_checkpointing
35
- gradient_checkpointing_ratio = cfg.gradient_checkpointing_ratio if cfg.gradient_checkpointing_ratio else 1.0
36
- apply_gradient_checkpointing(model, checkpoint_ratio=gradient_checkpointing_ratio)
 
 
 
 
 
 
 
 
 
37
  else:
38
- training_arguments_kwargs["gradient_checkpointing"] = cfg.gradient_checkpointing
 
 
39
  if cfg.fsdp:
40
  training_arguments_kwargs["fsdp"] = cfg.fsdp
41
  if cfg.fsdp_config:
42
  training_arguments_kwargs["fsdp_config"] = dict(cfg.fsdp_config)
43
 
44
-
45
  # deepspeed
46
- if os.environ.get("ACCELERATE_USE_DEEPSPEED") == "true" and torch.cuda.device_count() > 1:
 
 
 
47
  if cfg.deepspeed:
48
  training_arguments_kwargs["deepspeed"] = cfg.deepspeed
49
  else:
@@ -62,12 +87,14 @@ def setup_trainer(cfg, train_dataset, eval_dataset, model, tokenizer):
62
  save_steps=save_steps,
63
  output_dir=cfg.output_dir,
64
  save_total_limit=3,
65
- load_best_model_at_end=True if cfg.val_set_size > 0 and save_steps % eval_steps == 0 else False,
 
 
66
  ddp_find_unused_parameters=False if cfg.ddp else None,
67
  group_by_length=cfg.group_by_length,
68
  report_to="wandb" if cfg.use_wandb else None,
69
  run_name=cfg.wandb_run_id if cfg.use_wandb else None,
70
- optim=cfg.optimizer if cfg.optimizer != "adam8bit" else cfg.optimizer,
71
  lr_scheduler_type=cfg.lr_scheduler if cfg.lr_scheduler else None,
72
  weight_decay=cfg.weight_decay if cfg.weight_decay else 0.0,
73
  **training_arguments_kwargs,
@@ -78,22 +105,33 @@ def setup_trainer(cfg, train_dataset, eval_dataset, model, tokenizer):
78
  if cfg.optimizer == "adamw_anyprecision":
79
  if Path(cfg.torchdistx_path).exists():
80
  sys.path.append(cfg.torchdistx_path)
81
- torchdistx = importlib.import_module('torchdistx')
82
- if cfg.optimizer == "adam8bit" and not cfg.load_4bit and not "deepspeed" in training_arguments_kwargs:
 
 
 
 
83
  decay_parameters = get_parameter_names(model, [nn.LayerNorm])
84
  decay_parameters = [name for name in decay_parameters if "bias" not in name]
85
  optimizer_grouped_parameters = [
86
  {
87
- "params": [p for n, p in model.named_parameters() if n in decay_parameters],
 
 
 
 
88
  "weight_decay": training_args.weight_decay,
89
  },
90
  {
91
  "params": [
92
- p for n, p in model.named_parameters() if n not in decay_parameters
 
 
93
  ],
94
  "weight_decay": 0.0,
95
  },
96
  ]
 
97
  optimizer = bnb.optim.Adam8bit(
98
  optimizer_grouped_parameters,
99
  betas=(training_args.adam_beta1, training_args.adam_beta2),
 
17
  total_num_steps = int(
18
  math.ceil(len(train_dataset) * cfg.num_epochs / cfg.batch_size)
19
  )
20
+ warmup_steps = (
21
+ cfg.warmup_steps
22
+ if cfg.warmup_steps is not None
23
+ else min(int(0.03 * total_num_steps), 100)
24
+ )
25
+ logging_steps = (
26
+ cfg.logging_steps
27
+ if cfg.logging_steps is not None
28
+ else max(min(int(0.005 * total_num_steps), 10), 1)
29
+ )
30
+ save_steps = eval_steps = (
31
+ cfg.save_steps
32
+ if cfg.save_steps is not None
33
+ else min(int(0.05 * total_num_steps), 200)
34
+ )
35
 
36
  training_arguments_kwargs = {}
37
  if cfg.bf16 == "full":
 
43
  training_arguments_kwargs["logging_steps"] = logging_steps
44
  if cfg.gradient_checkpointing is not None:
45
  if cfg.load_4bit:
46
+ from alpaca_lora_4bit.gradient_checkpointing import (
47
+ apply_gradient_checkpointing,
48
+ )
49
+
50
+ gradient_checkpointing_ratio = (
51
+ cfg.gradient_checkpointing_ratio
52
+ if cfg.gradient_checkpointing_ratio
53
+ else 1.0
54
+ )
55
+ apply_gradient_checkpointing(
56
+ model, checkpoint_ratio=gradient_checkpointing_ratio
57
+ )
58
  else:
59
+ training_arguments_kwargs[
60
+ "gradient_checkpointing"
61
+ ] = cfg.gradient_checkpointing
62
  if cfg.fsdp:
63
  training_arguments_kwargs["fsdp"] = cfg.fsdp
64
  if cfg.fsdp_config:
65
  training_arguments_kwargs["fsdp_config"] = dict(cfg.fsdp_config)
66
 
 
67
  # deepspeed
68
+ if (
69
+ os.environ.get("ACCELERATE_USE_DEEPSPEED") == "true"
70
+ and torch.cuda.device_count() > 1
71
+ ):
72
  if cfg.deepspeed:
73
  training_arguments_kwargs["deepspeed"] = cfg.deepspeed
74
  else:
 
87
  save_steps=save_steps,
88
  output_dir=cfg.output_dir,
89
  save_total_limit=3,
90
+ load_best_model_at_end=True
91
+ if cfg.val_set_size > 0 and save_steps % eval_steps == 0
92
+ else False,
93
  ddp_find_unused_parameters=False if cfg.ddp else None,
94
  group_by_length=cfg.group_by_length,
95
  report_to="wandb" if cfg.use_wandb else None,
96
  run_name=cfg.wandb_run_id if cfg.use_wandb else None,
97
+ optim=cfg.optimizer if cfg.optimizer else None,
98
  lr_scheduler_type=cfg.lr_scheduler if cfg.lr_scheduler else None,
99
  weight_decay=cfg.weight_decay if cfg.weight_decay else 0.0,
100
  **training_arguments_kwargs,
 
105
  if cfg.optimizer == "adamw_anyprecision":
106
  if Path(cfg.torchdistx_path).exists():
107
  sys.path.append(cfg.torchdistx_path)
108
+ importlib.import_module("torchdistx")
109
+ if (
110
+ cfg.optimizer == "adamw_bnb_8bit"
111
+ and not cfg.load_4bit
112
+ and not "deepspeed" in training_arguments_kwargs
113
+ ):
114
  decay_parameters = get_parameter_names(model, [nn.LayerNorm])
115
  decay_parameters = [name for name in decay_parameters if "bias" not in name]
116
  optimizer_grouped_parameters = [
117
  {
118
+ "params": [
119
+ p
120
+ for n, p in model.named_parameters()
121
+ if (n in decay_parameters and p.requires_grad)
122
+ ],
123
  "weight_decay": training_args.weight_decay,
124
  },
125
  {
126
  "params": [
127
+ p
128
+ for n, p in model.named_parameters()
129
+ if (n not in decay_parameters and p.requires_grad)
130
  ],
131
  "weight_decay": 0.0,
132
  },
133
  ]
134
+
135
  optimizer = bnb.optim.Adam8bit(
136
  optimizer_grouped_parameters,
137
  betas=(training_args.adam_beta1, training_args.adam_beta2),