Moses25 commited on
Commit
e6c45a8
1 Parent(s): 3393def

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +140 -3
README.md CHANGED
@@ -1,3 +1,140 @@
1
- ---
2
- license: apache-2.0
3
- ---
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: apache-2.0
3
+ ---
4
+ ---
5
+ license: apache-2.0
6
+ ---
7
+ ---
8
+ license: apache-2.0
9
+ ---
10
+ ---
11
+ license: apache-2.0
12
+ ---
13
+ github [Web-UI](https://github.com/moseshu/llama2-chat/tree/main/webui)
14
+
15
+
16
+ ![image/png](https://cdn-uploads.huggingface.co/production/uploads/62f4c7172f63f904a0c61ba3/7Sqj2oRTauOplhXsodMnp.png)
17
+
18
+ ```python
19
+ from transformers import GenerationConfig, LlamaForCausalLM, LlamaTokenizer,AutoTokenizer,AutoModelForCausalLM,MistralForCausalLM
20
+ import torch
21
+ model = AutoModelForCausalLM.from_pretrained(model_id,torch_dtype=torch.bfloat16,device_map="auto",)
22
+ from transformers import GenerationConfig, LlamaForCausalLM, LlamaTokenizer,AutoTokenizer,AutoModelForCausalLM,MistralForCausalLM
23
+ import torch
24
+
25
+
26
+ model_id=Moses25/Meta-LlaMA-3-8B-Instruct-16k
27
+ tokenizer = AutoTokenizer.from_pretrained(model_id)
28
+
29
+
30
+ mistral_template="{% if messages[0]['role'] == 'system' %}{% set loop_messages = messages[1:] %}{% set system_message = messages[0]['content'] %}{% else %}{% set loop_messages = messages %}{% set system_message = false %}{% endif %}{% for message in loop_messages %}{% if (message['role'] == 'user') != (loop.index0 % 2 == 0) %}{{ raise_exception('Conversation roles must alternate user/assistant/user/assistant/...') }}{% endif %}{% if loop.index0 == 0 and system_message != false %}{% set content = '<<SYS>>\\n' + system_message + '\\n<</SYS>>\\n\\n' + message['content'] %}{% else %}{% set content = message['content'] %}{% endif %}{% if message['role'] == 'user' %}{{ bos_token + '[INST] ' + content.strip() + ' [/INST]' }}{% elif message['role'] == 'assistant' %}{{ ' ' + content.strip() + ' ' + eos_token }}{% endif %}{% endfor %}"
31
+
32
+ llama3_template="{% set loop_messages = messages %}{% for message in loop_messages %}{% set content = '<|start_header_id|>' + message['role'] + '<|end_header_id|>\n\n'+ message['content'] | trim + '<|eot_id|>' %}{% if loop.index0 == 0 %}{% set content = bos_token + content %}{% endif %}{{ content }}{% endfor %}{{ '<|start_header_id|>assistant<|end_header_id|>\n\n' }}"
33
+
34
+ def chat_format(conversation:list,tokenizer,chat_type="mistral"):
35
+ system_prompt = "You are a helpful, respectful and honest assistant.Help humman as much as you can."
36
+ ap = [{"role":"system","content":system_prompt}] + conversation
37
+ if chat_type=='mistral':
38
+ id = tokenizer.apply_chat_template(ap,chat_template=mistral_template,tokenize=False)
39
+ elif chat_type=='llama3':
40
+ id = tokenizer.apply_chat_template(ap,chat_template=llama3_template,tokenize=False)
41
+ id = id.rstrip("<|eot_id|><|start_header_id|>assistant<|end_header_id|>\n\n")
42
+ return id
43
+
44
+ user_chat=[{"role":"user","content":"In a basket, there are 20 oranges, 60 apples, and 40 bananas. If 15 pears were added, and half of the oranges were removed, what would be the new ratio of oranges to apples, bananas, and pears combined within the basket?"}]
45
+ text = chat_format(user_chat,tokenizer,'mistral')
46
+ def predict(content_prompt):
47
+ inputs = tokenizer(content_prompt,return_tensors="pt",add_special_tokens=True)
48
+ input_ids = inputs["input_ids"].to("cuda:0")
49
+ # print(f"input length:{len(input_ids[0])}")
50
+ with torch.no_grad():
51
+ generation_output = model.generate(
52
+ input_ids=input_ids,
53
+ #generation_config=generation_config,
54
+ return_dict_in_generate=True,
55
+ output_scores=True,
56
+ max_new_tokens=2048,
57
+ top_p=0.9,
58
+ num_beams=1,
59
+ do_sample=True,
60
+ repetition_penalty=1.0,
61
+ eos_token_id=tokenizer.eos_token_id,
62
+ pad_token_id=tokenizer.pad_token_id,
63
+ )
64
+ s = generation_output.sequences[0]
65
+ output = tokenizer.decode(s,skip_special_tokens=True)
66
+ output1 = output.split("[/INST]")[-1].strip()
67
+ # print(output1)
68
+ return output1
69
+
70
+ predict(text)
71
+ output:"""Let's break down the steps to find the new ratio of oranges to apples, bananas, and pears combined:
72
+ Calculate the total number of fruits initially in the basket: Oranges: 20 Apples: 60 Bananas: 40 Total Fruits = 20 + 60 + 40 = 120
73
+ Add 15 pears: Total Fruits after adding pears = 120 + 15 = 135
74
+ Remove half of the oranges: Oranges remaining = 20 / 2 = 10
75
+ Calculate the total number of fruits remaining in the basket after removing half of the oranges: Total Remaining Fruits = 10 (oranges) + 60 (apples) + 40 (bananas) + 15 (pears) = 125
76
+ Find the ratio of oranges to apples, bananas, and pears combined: Ratio of Oranges to (Apples, Bananas, Pears) Combined = Oranges / (Apples + Bananas + Pears) = 10 / (60 + 40 + 15) = 10 / 115
77
+ So, the new ratio of oranges to apples, bananas, and pears combined within the basket is 10:115.
78
+ However, I should note that the actual fruit distribution in your basket may vary depending on how you decide to count and categorize the fruits. The example calculation provides a theoretical ratio based on the initial quantities mentioned."""
79
+ ```
80
+
81
+
82
+ ## vLLM server
83
+ ```shell
84
+ #llama2-chat-template.jinja file is chat-template above 'mistral-template'
85
+ model_path=Meta-LlaMA-3-8B-Instruct-16k
86
+ python -m vllm.entrypoints.openai.api_server --model=$model_path \
87
+ --trust-remote-code --host 0.0.0.0 --port 7777 \
88
+ --gpu-memory-utilization 0.8 \
89
+ --max-model-len 8192 --chat-template llama2-chat-template.jinja \
90
+ --tensor-parallel-size 1 --served-model-name chatbot
91
+ ```
92
+
93
+ ```python
94
+ from openai import OpenAI
95
+ # Set OpenAI's API key and API base to use vLLM's API server.
96
+ openai_api_key = "EMPTY"
97
+ openai_api_base = "http://localhost:7777/v1"
98
+
99
+ client = OpenAI(
100
+ api_key=openai_api_key,
101
+ base_url=openai_api_base,
102
+ )
103
+ call_args = {
104
+ 'temperature': 0.7,
105
+ 'top_p': 0.9,
106
+ 'top_k': 40,
107
+ 'max_tokens': 2048, # output-len
108
+ 'presence_penalty': 1.0,
109
+ 'frequency_penalty': 0.0,
110
+ "repetition_penalty":1.0,
111
+ "stop":["</s>"],
112
+ }
113
+ chat_response = client.chat.completions.create(
114
+ model="chatbot",
115
+ messages=[
116
+ {"role": "user", "content": "你好"},
117
+ ],
118
+ extra_body=call_args
119
+ )
120
+ print("Chat response:", chat_response)
121
+
122
+
123
+ ```
124
+
125
+ ### ollama ModelFile
126
+ ```shell
127
+ TEMPLATE """[INST] <<SYS>>
128
+ {{ .System }}
129
+ <</SYS>>
130
+
131
+ {{ .Prompt }} [/INST] """
132
+ PARAMETER stop [INST]
133
+ PARAMETER stop [/INST]
134
+ PARAMETER stop <<SYS>>
135
+ PARAMETER stop <</SYS>>
136
+ PARAMETER num_ctx 16384
137
+ PARAMETER temperature 0.7
138
+
139
+ SYSTEM """You are a helpful, respectful and honest assistant.Help humman as much as you can."""
140
+ ```