File size: 8,485 Bytes
b45f299
1a83b46
 
b45f299
e6cbc32
b45f299
 
 
 
79e6b14
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1f465f9
 
b45f299
 
 
 
 
 
4fb337d
b45f299
 
 
 
029c3b0
b45f299
cf34b99
 
b45f299
 
 
 
 
 
 
 
 
1ccecb7
e6cde65
 
4ecfd72
 
 
 
 
 
 
 
 
1ccecb7
 
 
 
 
b45f299
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
8971fda
429e239
b45f299
429e239
b45f299
 
 
8971fda
4fb337d
8971fda
 
b45f299
 
 
8971fda
b45f299
8971fda
 
b45f299
4fb337d
b45f299
 
 
 
 
4fb337d
88c05b4
8971fda
 
 
88c05b4
656338b
 
 
 
 
aac2baa
88c05b4
 
c417521
2c4ce4d
d121e4b
c417521
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6fce2df
b45f299
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
e6cbc32
46e8687
b45f299
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
import os
import requests
import json

import gradio as gr
from transformers import AutoTokenizer


DESCRIPTION = """
# Demo: Breeze-7B-Instruct-v0.1

Breeze-7B is a language model family that builds on top of [Mistral-7B](https://huggingface.co/mistralai/Mistral-7B-v0.1), specifically intended for Traditional Chinese use.

[Breeze-7B-Base](https://huggingface.co/MediaTek-Research/Breeze-7B-Base-v0.1) is the base model for the Breeze-7B series. 
It is suitable for use if you have substantial fine-tuning data to tune it for your specific use case.

[Breeze-7B-Instruct](https://huggingface.co/MediaTek-Research/Breeze-7B-Instruct-v0.1) derives from the base model Breeze-7B-Base, making the resulting model amenable to be used as-is for commonly seen tasks.

[Breeze-7B-Instruct-64k](https://huggingface.co/MediaTek-Research/Breeze-7B-Instruct-64k-v0.1) is a slightly modified version of 
Breeze-7B-Instruct to enable a 64k-token context length. Roughly speaking, that is equivalent to 88k Traditional Chinese characters.

The current release version of Breeze-7B is v0.1.

*A project by the members (in alphabetical order): Chan-Jan Hsu 許湛然, Chang-Le Liu 劉昶樂, Feng-Ting Liao 廖峰挺, Po-Chun Hsu 許博竣, Yi-Chang Chen 陳宜昌, and the supervisor Da-Shan Shiu 許大山.*

**免責聲明: Breeze-7B-Instruct 和 Breeze-7B-Instruct-64k 並未針對問答進行安全保護,因此語言模型的任何回應不代表 MediaTek Research 立場。**
"""

LICENSE = """

"""

DEFAULT_SYSTEM_PROMPT = "You are a helpful AI assistant built by MediaTek Research. The user you are helping speaks Traditional Chinese and comes from Taiwan."

API_URL = os.environ.get("API_URL")
TOKEN = os.environ.get("TOKEN")

HEADERS = {
    "accept": "application/json",
    "Authorization": f"Bearer {TOKEN}", 
    "Content-Type": "application/json",
}

MODEL_NAME="breeze-7b-instruct-v01"
PRESENCE_PENALTY=0
FREQUENCY_PENALTY=0

model_name = "MediaTek-Research/Breeze-7B-Instruct-v0.1"
tokenizer = AutoTokenizer.from_pretrained(model_name)

def refusal_condition(query):
    # 不要再問這些問題啦!

    query_remove_space = query.replace(' ', '').lower()
    is_including_tw = False
    for x in ['台灣', '台湾', 'taiwan', 'tw', '中華民國', '中华民国']:
        if x in query_remove_space:
            is_including_tw = True
    is_including_cn = False
    for x in ['中國', '中国', 'china', 'cn', '大陸', '內地', '大陆', '内地', '中華人民共和國', '中华人民共和国']:
        if x in query_remove_space:
            is_including_cn = True
    if is_including_tw and is_including_cn:
        return True

    return False

with gr.Blocks() as demo:
    gr.Markdown(DESCRIPTION)

    chatbot = gr.Chatbot()
    with gr.Row():
        msg = gr.Textbox(
            container=False,
            show_label=False,
            placeholder='Type a message...',
            scale=10,
        )
        submit_button = gr.Button('Submit',
                                  variant='primary',
                                  scale=1,
                                  min_width=0)

    with gr.Row():
        retry_button = gr.Button('🔄  Retry', variant='secondary')
        undo_button = gr.Button('↩️ Undo', variant='secondary')
        clear = gr.Button('🗑️  Clear', variant='secondary')

    saved_input = gr.State()

    with gr.Accordion(label='Advanced options', open=False):
        system_prompt = gr.Textbox(label='System prompt',
                                   value=DEFAULT_SYSTEM_PROMPT,
                                   lines=6)
        max_new_tokens = gr.Slider(
            label='Max new tokens',
            minimum=32,
            maximum=1024,
            step=1,
            value=512,
        )
        temperature = gr.Slider(
            label='Temperature',
            minimum=0.01,
            maximum=0.5,
            step=0.01,
            value=0.01,
        )
        top_p = gr.Slider(
            label='Top-p (nucleus sampling)',
            minimum=0.01,
            maximum=1.0,
            step=0.01,
            value=0.01,
        )


    def user(user_message, history):
        return "", history + [[user_message, None]]


    def bot(history, max_new_tokens, temperature, top_p, system_prompt):
        chat_data = []
        system_prompt = system_prompt.strip()
        if system_prompt:
            chat_data.append({"role": "system", "content": system_prompt})
        for user_msg, assistant_msg in history:
            if user_msg is not None:
                chat_data.append({"role": "user", "content": user_msg})
            if assistant_msg is not None:
                chat_data.append({"role": "assistant", "content": assistant_msg})

        message = tokenizer.apply_chat_template(chat_data, tokenize=False)
        message = message[3:]  # remove SOT token

        if refusal_condition(history[-1][0]):
            history = [['[安全拒答啟動]', '請清除再開啟對話']]
            yield history
        else:
            data = {
                "model": MODEL_NAME,
                "prompt": str(message),
                "temperature": float(temperature) + 0.01,
                "n": 1,
                "max_tokens": int(max_new_tokens),
                "stop": "",
                "top_p": float(top_p),
                "logprobs": 0,
                "echo": False,
                "presence_penalty": PRESENCE_PENALTY,
                "frequency_penalty": FREQUENCY_PENALTY,
                "stream": True,
            }
     
            with requests.post(API_URL, headers=HEADERS, data=json.dumps(data), stream=True) as r:
                for response in r.iter_lines():
                    if len(response) > 0:
                        text = response.decode()
                        if text != "data: [DONE]":
                            if text.startswith("data: "):
                                text = text[5:]
                            delta = json.loads(text)["choices"][0]["text"]
    
                            if history[-1][1] is None:
                                history[-1][1] = delta
                            else:
                                history[-1][1] += delta
                            yield history
            if history[-1][1].endswith('</s>'):
                history[-1][1] = history[-1][1][:-4]
                yield history
    
        print('== Record ==\nQuery: {query}\nResponse: {response}'.format(query=repr(message), response=repr(history[-1][1])))

    msg.submit(user, [msg, chatbot], [msg, chatbot], queue=False).then(
        fn=bot,
        inputs=[
            chatbot,
            max_new_tokens,
            temperature,
            top_p,
            system_prompt,
        ],
        outputs=chatbot
    )
    submit_button.click(
        user, [msg, chatbot], [msg, chatbot], queue=False
    ).then(
        fn=bot,
        inputs=[
            chatbot,
            max_new_tokens,
            temperature,
            top_p,
            system_prompt,
        ],
        outputs=chatbot
    )


    def delete_prev_fn(
            history: list[tuple[str, str]]) -> tuple[list[tuple[str, str]], str]:
        try:
            message, _ = history.pop()
        except IndexError:
            message = ''
        return history, message or ''


    def display_input(message: str,
                      history: list[tuple[str, str]]) -> list[tuple[str, str]]:
        history.append((message, ''))
        return history

    retry_button.click(
        fn=delete_prev_fn,
        inputs=chatbot,
        outputs=[chatbot, saved_input],
        api_name=False,
        queue=False,
    ).then(
        fn=display_input,
        inputs=[saved_input, chatbot],
        outputs=chatbot,
        api_name=False,
        queue=False,
    ).then(
        fn=bot,
        inputs=[
            chatbot,
            max_new_tokens,
            temperature,
            top_p,
            system_prompt,
        ],
        outputs=chatbot,
    )

    undo_button.click(
        fn=delete_prev_fn,
        inputs=chatbot,
        outputs=[chatbot, saved_input],
        api_name=False,
        queue=False,
    ).then(
        fn=lambda x: x,
        inputs=[saved_input],
        outputs=msg,
        api_name=False,
        queue=False,
    )

    clear.click(lambda: None, None, chatbot, queue=False)

    gr.Markdown(LICENSE)

demo.queue(concurrency_count=1, max_size=16)
demo.launch()