winglian commited on
Commit
99383f1
1 Parent(s): 0f74464

make one cycle lr div factor configurable

Browse files
Files changed (1) hide show
  1. src/axolotl/utils/trainer.py +2 -2
src/axolotl/utils/trainer.py CHANGED
@@ -157,7 +157,7 @@ def setup_trainer(cfg, train_dataset, eval_dataset, model, tokenizer):
157
  cfg.learning_rate,
158
  total_steps=total_num_steps,
159
  epochs=cfg.num_epochs,
160
- div_factor=10,
161
  **lr_scheduler_kwargs,
162
  )
163
  elif cfg.lr_scheduler == "log_sweep":
@@ -182,7 +182,7 @@ def setup_trainer(cfg, train_dataset, eval_dataset, model, tokenizer):
182
  cfg.early_stopping_patience,
183
  )
184
  callbacks.append(early_stop_cb)
185
-
186
  if cfg.local_rank == 0 and cfg.adapter == 'lora': # only save in rank 0
187
  callbacks.append(SavePeftModelCallback)
188
 
 
157
  cfg.learning_rate,
158
  total_steps=total_num_steps,
159
  epochs=cfg.num_epochs,
160
+ div_factor=cfg.lr_div_factor if cfg.lr_div_factor else 6,
161
  **lr_scheduler_kwargs,
162
  )
163
  elif cfg.lr_scheduler == "log_sweep":
 
182
  cfg.early_stopping_patience,
183
  )
184
  callbacks.append(early_stop_cb)
185
+
186
  if cfg.local_rank == 0 and cfg.adapter == 'lora': # only save in rank 0
187
  callbacks.append(SavePeftModelCallback)
188