base_model: cerebras/Cerebras-GPT-1.3B model_type: AutoModelForCausalLM tokenizer_type: AutoTokenizer load_in_8bit: true datasets: - path: data/alpaca_data_gpt4.jsonl type: alpaca - path: data/vicuna_cleaned.jsonl type: sharegpt - path: data/gpt4-instruct-similarity-0.6-dataset.jsonl type: gpteacher - path: data/roleplay-similarity_0.6-instruct-dataset.jsonl type: gpteacher dataset_prepared_path: last_run_prepared val_set_size: 0.05 adapter: lora sequence_len: 2048 lora_r: 8 lora_alpha: 16 lora_dropout: 0.05 lora_target_modules: - c_attn lora_fan_in_fan_out: false wandb_project: pythia-1.4b-lora wandb_watch: wandb_run_id: wandb_log_model: checkpoint output_dir: ./lora-alpaca batch_size: 32 micro_batch_size: 4 num_epochs: 5 learning_rate: 0.0003 train_on_inputs: false group_by_length: false bf16: True tf32: True gradient_checkpointing: early_stopping_patience: resume_from_checkpoint: local_rank: