winglian commited on
Commit
2284209
1 Parent(s): c6d870b

Phi examples (#569)

Browse files

* add phi full ft example

* Add readme to point out that deepspeed should be used

* zero1 is better than zero2 for phi

Files changed (2) hide show
  1. examples/phi/README.md +7 -0
  2. examples/phi/phi-ft.yml +75 -0
examples/phi/README.md ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ # Phi
2
+
3
+ Due to some nuances with the phi code, please use deepspeed when training phi.
4
+
5
+ ```shell
6
+ accelerate launch scripts/finetune.py examples/phi/phi-ft.yml --deepspeed deepspeed/zero1.json
7
+ ```
examples/phi/phi-ft.yml ADDED
@@ -0,0 +1,75 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ base_model: microsoft/phi-1_5
2
+ base_model_config: microsoft/phi-1_5
3
+ model_type: AutoModelForCausalLM
4
+ tokenizer_type: AutoTokenizer
5
+ is_llama_derived_model: false
6
+ trust_remote_code: true
7
+
8
+ load_in_8bit: false
9
+ load_in_4bit: false
10
+ strict: false
11
+
12
+ datasets:
13
+ - path: garage-bAInd/Open-Platypus
14
+ type: alpaca
15
+
16
+ dataset_prepared_path: last_run_prepared
17
+ val_set_size: 0.05
18
+ output_dir: ./phi-sft-out
19
+
20
+ sequence_len: 2048
21
+ sample_packing: false # does not work with phi
22
+ pad_to_sequence_len:
23
+
24
+ adapter:
25
+ lora_model_dir:
26
+ lora_r:
27
+ lora_alpha:
28
+ lora_dropout:
29
+ lora_target_linear:
30
+ lora_fan_in_fan_out:
31
+
32
+ wandb_project:
33
+ wandb_entity:
34
+ wandb_watch:
35
+ wandb_run_id:
36
+ wandb_log_model:
37
+
38
+ gradient_accumulation_steps: 2
39
+ micro_batch_size: 1
40
+ num_epochs: 4
41
+ optimizer: adamw_bnb_8bit
42
+ adam_beta2: 0.95
43
+ adam_epsilon: 0.00001
44
+ max_grad_norm: 1.0
45
+ lr_scheduler: cosine
46
+ learning_rate: 0.000003
47
+
48
+ train_on_inputs: false
49
+ group_by_length: true
50
+ bf16: true
51
+ fp16: false
52
+ tf32: true
53
+
54
+ gradient_checkpointing:
55
+ early_stopping_patience:
56
+ resume_from_checkpoint:
57
+ local_rank:
58
+ logging_steps: 1
59
+ xformers_attention:
60
+ flash_attention:
61
+
62
+ warmup_steps: 100
63
+ eval_steps: 0.05
64
+ save_steps:
65
+ debug:
66
+ deepspeed:
67
+ weight_decay: 0.1
68
+ fsdp:
69
+ fsdp_config:
70
+ resize_token_embeddings_to_32x: true
71
+ special_tokens:
72
+ bos_token: "<|endoftext|>"
73
+ eos_token: "<|endoftext|>"
74
+ unk_token: "<|endoftext|>"
75
+ pad_token: "<|endoftext|>"