BobaZooba commited on
Commit
91256fb
1 Parent(s): a00032b

Training in progress, step 100

Browse files
adapter_config.json ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "auto_mapping": null,
3
+ "base_model_name_or_path": "mistralai/Mistral-7B-v0.1",
4
+ "bias": "none",
5
+ "fan_in_fan_out": false,
6
+ "inference_mode": true,
7
+ "init_lora_weights": true,
8
+ "layers_pattern": null,
9
+ "layers_to_transform": null,
10
+ "lora_alpha": 32,
11
+ "lora_dropout": 0.1,
12
+ "modules_to_save": null,
13
+ "peft_type": "LORA",
14
+ "r": 64,
15
+ "revision": null,
16
+ "target_modules": [
17
+ "v_proj",
18
+ "o_proj",
19
+ "up_proj",
20
+ "gate_proj",
21
+ "down_proj",
22
+ "k_proj",
23
+ "q_proj"
24
+ ],
25
+ "task_type": "CAUSAL_LM"
26
+ }
adapter_model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:dda825c13ac7b2f848b970b524f518b6456014cb3e473ece417bae2eb6739174
3
+ size 335605144
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1a00d251b9e8778867a0be2ae762151214e219b14ed22e794563e9fffee1931b
3
+ size 6264
training_config.json ADDED
@@ -0,0 +1,86 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "experiment_key": "base",
3
+ "save_safetensors": true,
4
+ "max_shard_size": "10GB",
5
+ "local_rank": 1,
6
+ "use_gradient_checkpointing": true,
7
+ "trainer_key": "lm",
8
+ "force_fp32": false,
9
+ "force_fp16": false,
10
+ "from_gptq": false,
11
+ "huggingface_hub_token": null,
12
+ "deepspeed_stage": 2,
13
+ "deepspeed_config_path": null,
14
+ "fsdp_strategy": "",
15
+ "fsdp_offload": true,
16
+ "seed": 42,
17
+ "stabilize": true,
18
+ "path_to_env_file": "./.env",
19
+ "prepare_dataset": true,
20
+ "lora_hub_model_id": null,
21
+ "lora_model_local_path": null,
22
+ "fused_model_local_path": null,
23
+ "quantization_dataset_id": null,
24
+ "quantization_max_samples": 1024,
25
+ "quantized_model_path": "./quantized_model/",
26
+ "quantized_hub_model_id": null,
27
+ "quantized_hub_private_repo": null,
28
+ "dataset_key": "desc2json",
29
+ "train_local_path_to_data": "./train.jsonl",
30
+ "eval_local_path_to_data": null,
31
+ "shuffle": true,
32
+ "max_eval_samples": 1000,
33
+ "add_eval_to_train_if_no_path": false,
34
+ "tokenizer_name_or_path": null,
35
+ "tokenizer_use_fast": null,
36
+ "tokenizer_padding_side": null,
37
+ "collator_key": "completion",
38
+ "max_length": 2048,
39
+ "model_name_or_path": "mistralai/Mistral-7B-v0.1",
40
+ "push_to_hub_bos_add_bos_token": false,
41
+ "use_flash_attention_2": false,
42
+ "trust_remote_code": true,
43
+ "device_map": null,
44
+ "prepare_model_for_kbit_training": true,
45
+ "load_in_8bit": false,
46
+ "load_in_4bit": true,
47
+ "llm_int8_threshold": 6.0,
48
+ "llm_int8_has_fp16_weight": true,
49
+ "bnb_4bit_use_double_quant": true,
50
+ "bnb_4bit_quant_type": "nf4",
51
+ "bnb_quantize_after_model_init": false,
52
+ "gptq_bits": 4,
53
+ "gptq_group_size": 128,
54
+ "gptq_disable_exllama": true,
55
+ "apply_lora": true,
56
+ "lora_rank": 64,
57
+ "lora_alpha": 32,
58
+ "lora_dropout": 0.1,
59
+ "raw_lora_target_modules": "all",
60
+ "output_dir": "./outputs/",
61
+ "per_device_train_batch_size": 2,
62
+ "do_eval": false,
63
+ "per_device_eval_batch_size": null,
64
+ "gradient_accumulation_steps": 4,
65
+ "eval_accumulation_steps": null,
66
+ "eval_delay": 0,
67
+ "eval_steps": 1000,
68
+ "warmup_steps": 100,
69
+ "max_steps": 5000,
70
+ "num_train_epochs": 3,
71
+ "learning_rate": 0.0002,
72
+ "max_grad_norm": 1.0,
73
+ "weight_decay": 0.001,
74
+ "label_smoothing_factor": 0.1,
75
+ "logging_steps": 1,
76
+ "save_steps": 100,
77
+ "save_total_limit": 0,
78
+ "optim": "paged_adamw_8bit",
79
+ "push_to_hub": true,
80
+ "hub_model_id": "BobaZooba/WGPT-LoRA",
81
+ "hub_private_repo": false,
82
+ "report_to_wandb": true,
83
+ "wandb_api_key": null,
84
+ "wandb_project": null,
85
+ "wandb_entity": null
86
+ }