winglian commited on
Commit
612aabd
1 Parent(s): af05883

push intermediate model checkpoints to hub

Browse files
src/axolotl/prompt_strategies/alpaca_chat.py CHANGED
@@ -6,7 +6,7 @@ from axolotl.prompt_tokenizers import (
6
  AlpacaPromptTokenizingStrategy,
7
  InstructionPromptTokenizingStrategy,
8
  )
9
- from axolotl.prompters import AlpacaPrompter, PromptStyle
10
 
11
 
12
  def load(tokenizer, cfg):
@@ -103,3 +103,12 @@ def load_camel_ai(tokenizer, cfg):
103
  cfg.train_on_inputs,
104
  cfg.sequence_len,
105
  )
 
 
 
 
 
 
 
 
 
 
6
  AlpacaPromptTokenizingStrategy,
7
  InstructionPromptTokenizingStrategy,
8
  )
9
+ from axolotl.prompters import AlpacaPrompter, PromptStyle, UnpromptedPrompter
10
 
11
 
12
  def load(tokenizer, cfg):
 
103
  cfg.train_on_inputs,
104
  cfg.sequence_len,
105
  )
106
+
107
+
108
+ def load_no_prompt(tokenizer, cfg):
109
+ return AlpacaPromptTokenizingStrategy(
110
+ UnpromptedPrompter(PromptStyle.CHAT.value),
111
+ tokenizer,
112
+ cfg.train_on_inputs,
113
+ cfg.sequence_len,
114
+ )
src/axolotl/utils/trainer.py CHANGED
@@ -124,6 +124,10 @@ def setup_trainer(cfg, train_dataset, eval_dataset, model, tokenizer):
124
  if cfg.max_grad_norm:
125
  training_arguments_kwargs["max_grad_norm"] = cfg.max_grad_norm
126
 
 
 
 
 
127
  training_args = transformers.TrainingArguments(
128
  per_device_train_batch_size=cfg.micro_batch_size,
129
  per_device_eval_batch_size=cfg.eval_batch_size
 
124
  if cfg.max_grad_norm:
125
  training_arguments_kwargs["max_grad_norm"] = cfg.max_grad_norm
126
 
127
+ if cfg.push_to_hub_model_id:
128
+ training_arguments_kwargs["push_to_hub_model_id"] = cfg.push_to_hub_model_id
129
+ training_arguments_kwargs["push_to_hub"] = True
130
+
131
  training_args = transformers.TrainingArguments(
132
  per_device_train_batch_size=cfg.micro_batch_size,
133
  per_device_eval_batch_size=cfg.eval_batch_size