File size: 6,496 Bytes
e6c45a8
 
 
6d0e6b1
2acc52d
e6c45a8
 
 
 
 
 
 
 
 
 
 
 
 
f6fa33a
e6c45a8
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
710fd5c
e6c45a8
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
---
license: apache-2.0
---
this model was trained on Mistral-Instruct-V0.2 with chinese and english

github [Web-UI](https://github.com/moseshu/llama2-chat/tree/main/webui)


![image/png](https://cdn-uploads.huggingface.co/production/uploads/62f4c7172f63f904a0c61ba3/7Sqj2oRTauOplhXsodMnp.png)

```python
from transformers import GenerationConfig, LlamaForCausalLM, LlamaTokenizer,AutoTokenizer,AutoModelForCausalLM,MistralForCausalLM
import torch
model = AutoModelForCausalLM.from_pretrained(model_id,torch_dtype=torch.bfloat16,device_map="auto",)
from transformers import GenerationConfig, LlamaForCausalLM, LlamaTokenizer,AutoTokenizer,AutoModelForCausalLM,MistralForCausalLM
import torch


model_id=Moses25/Mistral-7B-chat-32k 
tokenizer = AutoTokenizer.from_pretrained(model_id)


mistral_template="{% if messages[0]['role'] == 'system' %}{% set loop_messages = messages[1:] %}{% set system_message = messages[0]['content'] %}{% else %}{% set loop_messages = messages %}{% set system_message = false %}{% endif %}{% for message in loop_messages %}{% if (message['role'] == 'user') != (loop.index0 % 2 == 0) %}{{ raise_exception('Conversation roles must alternate user/assistant/user/assistant/...') }}{% endif %}{% if loop.index0 == 0 and system_message != false %}{% set content = '<<SYS>>\\n' + system_message + '\\n<</SYS>>\\n\\n' + message['content'] %}{% else %}{% set content = message['content'] %}{% endif %}{% if message['role'] == 'user' %}{{ bos_token + '[INST] ' + content.strip() + ' [/INST]' }}{% elif message['role'] == 'assistant' %}{{ ' '  + content.strip() + ' ' + eos_token }}{% endif %}{% endfor %}"

llama3_template="{% set loop_messages = messages %}{% for message in loop_messages %}{% set content = '<|start_header_id|>' + message['role'] + '<|end_header_id|>\n\n'+ message['content'] | trim + '<|eot_id|>' %}{% if loop.index0 == 0 %}{% set content = bos_token + content %}{% endif %}{{ content }}{% endfor %}{{ '<|start_header_id|>assistant<|end_header_id|>\n\n' }}"

def chat_format(conversation:list,tokenizer,chat_type="mistral"):
    system_prompt = "You are a helpful, respectful and honest assistant.Help humman as much as you can."
    ap = [{"role":"system","content":system_prompt}] + conversation
    if chat_type=='mistral':
        id = tokenizer.apply_chat_template(ap,chat_template=mistral_template,tokenize=False)
    elif chat_type=='llama3':
        id = tokenizer.apply_chat_template(ap,chat_template=llama3_template,tokenize=False)
        id = id.rstrip("<|eot_id|><|start_header_id|>assistant<|end_header_id|>\n\n")
    return id

user_chat=[{"role":"user","content":"In a basket, there are 20 oranges, 60 apples, and 40 bananas. If 15 pears were added, and half of the oranges were removed, what would be the new ratio of oranges to apples, bananas, and pears combined within the basket?"}]
text = chat_format(user_chat,tokenizer,'mistral')
def predict(content_prompt):
    inputs = tokenizer(content_prompt,return_tensors="pt",add_special_tokens=True)
    input_ids = inputs["input_ids"].to("cuda:0")
    # print(f"input length:{len(input_ids[0])}")
    with torch.no_grad():
        generation_output = model.generate(
                    input_ids=input_ids,
                    #generation_config=generation_config,
                    return_dict_in_generate=True,
                    output_scores=True,
                    max_new_tokens=2048,
                    top_p=0.9,
                    num_beams=1,
                    do_sample=True,
                    repetition_penalty=1.0,
                    eos_token_id=tokenizer.eos_token_id,
                    pad_token_id=tokenizer.pad_token_id,
                )
        s = generation_output.sequences[0]
        output = tokenizer.decode(s,skip_special_tokens=True)
        output1 = output.split("[/INST]")[-1].strip()
        # print(output1)
    return output1

predict(text)
output:"""Let's break down the steps to find the new ratio of oranges to apples, bananas, and pears combined:
Calculate the total number of fruits initially in the basket: Oranges: 20 Apples: 60 Bananas: 40 Total Fruits = 20 + 60 + 40 = 120
Add 15 pears: Total Fruits after adding pears = 120 + 15 = 135
Remove half of the oranges: Oranges remaining = 20 / 2 = 10
Calculate the total number of fruits remaining in the basket after removing half of the oranges: Total Remaining Fruits = 10 (oranges) + 60 (apples) + 40 (bananas) + 15 (pears) = 125
Find the ratio of oranges to apples, bananas, and pears combined: Ratio of Oranges to (Apples, Bananas, Pears) Combined = Oranges / (Apples + Bananas + Pears) = 10 / (60 + 40 + 15) = 10 / 115
So, the new ratio of oranges to apples, bananas, and pears combined within the basket is 10:115.
However, I should note that the actual fruit distribution in your basket may vary depending on how you decide to count and categorize the fruits. The example calculation provides a theoretical ratio based on the initial quantities mentioned."""
```


## vLLM server
```shell
#llama2-chat-template.jinja file is chat-template above 'mistral-template'
model_path="Moses25/Mistral-7B-chat-32k"
python  -m vllm.entrypoints.openai.api_server --model=$model_path \
        --trust-remote-code --host 0.0.0.0  --port 7777 \
        --gpu-memory-utilization 0.8 \
        --max-model-len 8192 --chat-template llama2-chat-template.jinja \
        --tensor-parallel-size 1 --served-model-name chatbot
```

```python
from openai import OpenAI
# Set OpenAI's API key and API base to use vLLM's API server.
openai_api_key = "EMPTY"
openai_api_base = "http://localhost:7777/v1"

client = OpenAI(
    api_key=openai_api_key,
    base_url=openai_api_base,
)
call_args = {
        'temperature': 0.7,
        'top_p': 0.9,
        'top_k': 40,
        'max_tokens': 2048, # output-len
        'presence_penalty': 1.0,
        'frequency_penalty': 0.0,
        "repetition_penalty":1.0,
        "stop":["</s>"],
    }
chat_response = client.chat.completions.create(
    model="chatbot",
    messages=[
        {"role": "user", "content": "你好"},
    ],
    extra_body=call_args
)
print("Chat response:", chat_response)


```

### ollama ModelFile
```shell
TEMPLATE """[INST] <<SYS>>
{{ .System }}
<</SYS>>

{{ .Prompt }} [/INST] """
PARAMETER stop [INST]
PARAMETER stop [/INST]
PARAMETER stop <<SYS>>
PARAMETER stop <</SYS>>
PARAMETER num_ctx 16384
PARAMETER temperature 0.7

SYSTEM """You are a helpful, respectful and honest assistant.Help humman as much as you can."""
```