JiunYi commited on
Commit
7e15793
1 Parent(s): b6448b4

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +6 -8
README.md CHANGED
@@ -2,6 +2,7 @@
2
  license: apache-2.0
3
  datasets:
4
  - shibing624/alpaca-zh
 
5
  language:
6
  - zh
7
  tags:
@@ -29,19 +30,16 @@ from peft import PeftModel
29
  from transformers import GenerationConfig, LlamaForCausalLM, LlamaTokenizer
30
 
31
 
32
- max_memory = {i: "15GIB" for i in range(torch.cuda.device_count())}
33
- tokenizer = LlamaTokenizer.from_pretrained(base_model)
34
  model = LlamaForCausalLM.from_pretrained(
35
- base_model,
36
  load_in_8bit=True,
37
  torch_dtype=torch.float16,
38
  device_map="auto"
39
- max_memory=max_memory
40
  )
41
  model = PeftModel.from_pretrained(
42
  model,
43
- lora_weights,
44
- torch_dtype=torch.float16,
45
- max_memory=max_memory
46
  )
47
- ```
 
2
  license: apache-2.0
3
  datasets:
4
  - shibing624/alpaca-zh
5
+ - yahma/alpaca-cleaned
6
  language:
7
  - zh
8
  tags:
 
30
  from transformers import GenerationConfig, LlamaForCausalLM, LlamaTokenizer
31
 
32
 
33
+ tokenizer = LlamaTokenizer.from_pretrained("decapoda-research/llama-7b-hf")
 
34
  model = LlamaForCausalLM.from_pretrained(
35
+ "decapoda-research/llama-7b-hf",
36
  load_in_8bit=True,
37
  torch_dtype=torch.float16,
38
  device_map="auto"
 
39
  )
40
  model = PeftModel.from_pretrained(
41
  model,
42
+ "DataAgent/llama-7b-alpaca-zh-120k",
43
+ torch_dtype=torch.float16
 
44
  )
45
+ ```