YC-Chen's picture
Update app.py
1a83b46 verified
raw
history blame
No virus
5.54 kB
import os
import requests
import json
import gradio as gr
from transformers import AutoTokenizer
DESCRIPTION = """
"""
LICENSE = """
"""
DEFAULT_SYSTEM_PROMPT = ""
API_URL = os.environ.get("API_URL")
TOKEN = os.environ.get("TOKEN")
HEADER = {
"accept": "application/json",
"Authorization": f"Bearer {TOKEN}", "Content-Type": "application/json",
}
MODEL_NAME="breeze-7b-instruct-v01"
TEMPERATURE=1
MAX_TOKENS=16
TOP_P=0
PRESENCE_PENALTY=0
FREQUENCY_PENALTY=0
eos_token = "</s>"
MAX_MAX_NEW_TOKENS = 4096
DEFAULT_MAX_NEW_TOKENS = 1536
max_prompt_length = 8192 - MAX_MAX_NEW_TOKENS - 10
model_name = "MediaTek-Research/Breeze-7B-Instruct-v0.1"
tokenizer = AutoTokenizer.from_pretrained(model_name)
with gr.Blocks() as demo:
gr.Markdown(DESCRIPTION)
chatbot = gr.Chatbot()
with gr.Row():
msg = gr.Textbox(
container=False,
show_label=False,
placeholder='Type a message...',
scale=10,
)
submit_button = gr.Button('Submit',
variant='primary',
scale=1,
min_width=0)
with gr.Row():
retry_button = gr.Button('🔄 Retry', variant='secondary')
undo_button = gr.Button('↩️ Undo', variant='secondary')
clear = gr.Button('🗑️ Clear', variant='secondary')
saved_input = gr.State()
with gr.Accordion(label='Advanced options', open=False):
system_prompt = gr.Textbox(label='System prompt',
value=DEFAULT_SYSTEM_PROMPT,
lines=6)
max_new_tokens = gr.Slider(
label='Max new tokens',
minimum=1,
maximum=MAX_MAX_NEW_TOKENS,
step=1,
value=DEFAULT_MAX_NEW_TOKENS,
)
temperature = gr.Slider(
label='Temperature',
minimum=0.1,
maximum=1.0,
step=0.1,
value=0.3,
)
top_p = gr.Slider(
label='Top-p (nucleus sampling)',
minimum=0.05,
maximum=1.0,
step=0.05,
value=0.95,
)
top_k = gr.Slider(
label='Top-k',
minimum=1,
maximum=1000,
step=1,
value=50,
)
def user(user_message, history):
return "", history + [[user_message, None]]
def bot(history, max_new_tokens, temperature, top_p, top_k, system_prompt):
chat_data = []
for user_msg, assistant_msg in history:
if user_msg is not None:
chat_data.append({"role": "user", "content": user_msg})
if assistant_msg is not None:
chat_data.append({"role": "assistant", "content": assistant_msg})
print(chat_data
)
message = tokenizer.apply_chat_template(chat_data, tokenize=False)
message = message[3:] # remove SOT token
print(message)
data = {
"model": MODEL_NAME,
"messages": str(message),
"temperature": TEMPERATURE,
"n": 1,
"max_tokens": MAX_TOKENS,
"stop": "",
"top_p": TOP_P,
"logprobs": 0,
"echo": False,
"presence_penalty": PRESENCE_PENALTY,
"frequency_penalty": FREQUENCY_PENALTY,
}
outputs = requests.post(url, headers=headers, data=json.dumps(data)).json()
return outputs
msg.submit(user, [msg, chatbot], [msg, chatbot], queue=False).then(
fn=bot,
inputs=[
chatbot,
max_new_tokens,
temperature,
top_p,
top_k,
system_prompt,
],
outputs=chatbot
)
submit_button.click(
user, [msg, chatbot], [msg, chatbot], queue=False
).then(
fn=bot,
inputs=[
chatbot,
max_new_tokens,
temperature,
top_p,
top_k,
system_prompt,
],
outputs=chatbot
)
def delete_prev_fn(
history: list[tuple[str, str]]) -> tuple[list[tuple[str, str]], str]:
try:
message, _ = history.pop()
except IndexError:
message = ''
return history, message or ''
def display_input(message: str,
history: list[tuple[str, str]]) -> list[tuple[str, str]]:
history.append((message, ''))
return history
retry_button.click(
fn=delete_prev_fn,
inputs=chatbot,
outputs=[chatbot, saved_input],
api_name=False,
queue=False,
).then(
fn=display_input,
inputs=[saved_input, chatbot],
outputs=chatbot,
api_name=False,
queue=False,
).then(
fn=bot,
inputs=[
chatbot,
max_new_tokens,
temperature,
top_p,
top_k,
system_prompt,
],
outputs=chatbot,
)
undo_button.click(
fn=delete_prev_fn,
inputs=chatbot,
outputs=[chatbot, saved_input],
api_name=False,
queue=False,
).then(
fn=lambda x: x,
inputs=[saved_input],
outputs=msg,
api_name=False,
queue=False,
)
clear.click(lambda: None, None, chatbot, queue=False)
gr.Markdown(LICENSE)
# demo.queue(concurrency_count=4, max_size=128)
demo.launch()