winglian commited on
Commit
8f36f3c
1 Parent(s): 69164da

fix conditional check to prevent always using 4bit

Browse files
Files changed (1) hide show
  1. scripts/finetune.py +1 -1
scripts/finetune.py CHANGED
@@ -85,7 +85,7 @@ def load_model(base_model, base_model_config, model_type, tokenizer_type, cfg, a
85
  raise e
86
 
87
  try:
88
- if cfg.load_4bit and "llama" in base_model or "llama" in cfg.model_type.lower():
89
  from alpaca_lora_4bit.autograd_4bit import load_llama_model_4bit_low_ram
90
  from huggingface_hub import snapshot_download
91
 
 
85
  raise e
86
 
87
  try:
88
+ if cfg.load_4bit and ("llama" in base_model or "llama" in cfg.model_type.lower()):
89
  from alpaca_lora_4bit.autograd_4bit import load_llama_model_4bit_low_ram
90
  from huggingface_hub import snapshot_download
91