Spaces:
Running
Running
Updated space.
Browse files
app.py
CHANGED
@@ -296,25 +296,27 @@ async def streamChat(params):
|
|
296 |
if message["role"] == "system":
|
297 |
message["role"] = "user"
|
298 |
message["content"] = f"[System: {message['content']}]"
|
299 |
-
|
300 |
-
|
301 |
-
async with session.post(f"{base_url}/chat/completions", headers={"Authorization": f"Bearer {get_api_key(call='api_key')}", "Content-Type": "application/json"}, json=params) as r:
|
302 |
-
r.raise_for_status()
|
303 |
-
async for line in r.content:
|
304 |
-
if line:
|
305 |
-
line_str = line.decode('utf-8')
|
306 |
-
if line_str.startswith("data: "):
|
307 |
-
line_str = line_str[6:].strip()
|
308 |
-
if line_str == "[DONE]":
|
309 |
-
continue
|
310 |
-
try:
|
311 |
-
message = json.loads(line_str)
|
312 |
-
yield message
|
313 |
-
except json.JSONDecodeError:
|
314 |
-
continue
|
315 |
-
except aiohttp.ClientError:
|
316 |
try:
|
317 |
-
async with session.post("
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
318 |
r.raise_for_status()
|
319 |
async for line in r.content:
|
320 |
if line:
|
@@ -329,7 +331,23 @@ async def streamChat(params):
|
|
329 |
except json.JSONDecodeError:
|
330 |
continue
|
331 |
except aiohttp.ClientError:
|
332 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
333 |
|
334 |
def imagine(prompt):
|
335 |
try:
|
@@ -594,15 +612,15 @@ async def respond(
|
|
594 |
|
595 |
|
596 |
handleApiKeys();loadModels();checkModels();loadENV();
|
597 |
-
lastUpdateMessage = "New
|
598 |
demo = gr.ChatInterface(
|
599 |
respond,
|
600 |
-
title="
|
601 |
description=f"A OpenAI API proxy!<br/>View API docs [here](/api/v1/docs) <strong>[Yes you can use this as an API in a simpler manner]</strong>.<br/><strong>[Last update: {lastUpdateMessage}]</strong> Also you can only submit images to vision models; txt/code/etc. to all models.",
|
602 |
multimodal=True,
|
603 |
additional_inputs=[
|
604 |
gr.Textbox(value="You are a helpful assistant. You are an OpenAI GPT model. Please adhere to OpenAI's usage policies and guidelines. Ensure your responses are accurate, respectful, and within the scope of OpenAI's rules.", label="System message"),
|
605 |
-
gr.Dropdown(choices=models, value="
|
606 |
gr.Slider(minimum=1, maximum=4096, value=4096, step=1, label="Max new tokens"),
|
607 |
gr.Slider(minimum=0.1, maximum=2.0, value=0.7, step=0.05, label="Temperature"),
|
608 |
gr.Slider(minimum=0.05, maximum=1.0, value=0.95, step=0.05, label="Top-p (nucleus sampling)"),
|
|
|
296 |
if message["role"] == "system":
|
297 |
message["role"] = "user"
|
298 |
message["content"] = f"[System: {message['content']}]"
|
299 |
+
params["stream"] = False;
|
300 |
+
async with aiohttp.ClientSession() as session:
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
301 |
try:
|
302 |
+
async with session.post(f"{base_url}/chat/completions", headers={"Authorization": f"Bearer {get_api_key(call='api_key')}", "Content-Type": "application/json"}, json=params) as r:
|
303 |
+
r.raise_for_status()
|
304 |
+
response_data = await r.json()
|
305 |
+
yield {"choices": [{"delta": {"content": response_data["choices"][0]["message"]["content"]}}]}
|
306 |
+
yield {"choices": [{"delta": {"content": "[DONE]"}}]}
|
307 |
+
except aiohttp.ClientError:
|
308 |
+
try:
|
309 |
+
async with session.post("https://api.openai.com/v1/chat/completions", headers={"Authorization": f"Bearer {get_api_key(call='oai_api_key')}", "Content-Type": "application/json"}, json=params) as r:
|
310 |
+
r.raise_for_status()
|
311 |
+
response_data = await r.json()
|
312 |
+
yield {"choices": [{"delta": {"content": response_data["choices"][0]["message"]["content"]}}]}
|
313 |
+
yield {"choices": [{"delta": {"content": "[DONE]"}}]}
|
314 |
+
except aiohttp.ClientError:
|
315 |
+
return
|
316 |
+
else:
|
317 |
+
async with aiohttp.ClientSession() as session:
|
318 |
+
try:
|
319 |
+
async with session.post(f"{base_url}/chat/completions", headers={"Authorization": f"Bearer {get_api_key(call='api_key')}", "Content-Type": "application/json"}, json=params) as r:
|
320 |
r.raise_for_status()
|
321 |
async for line in r.content:
|
322 |
if line:
|
|
|
331 |
except json.JSONDecodeError:
|
332 |
continue
|
333 |
except aiohttp.ClientError:
|
334 |
+
try:
|
335 |
+
async with session.post("https://api.openai.com/v1/chat/completions", headers={"Authorization": f"Bearer {get_api_key(call='oai_api_key')}", "Content-Type": "application/json"}, json=params) as r:
|
336 |
+
r.raise_for_status()
|
337 |
+
async for line in r.content:
|
338 |
+
if line:
|
339 |
+
line_str = line.decode('utf-8')
|
340 |
+
if line_str.startswith("data: "):
|
341 |
+
line_str = line_str[6:].strip()
|
342 |
+
if line_str == "[DONE]":
|
343 |
+
continue
|
344 |
+
try:
|
345 |
+
message = json.loads(line_str)
|
346 |
+
yield message
|
347 |
+
except json.JSONDecodeError:
|
348 |
+
continue
|
349 |
+
except aiohttp.ClientError:
|
350 |
+
return
|
351 |
|
352 |
def imagine(prompt):
|
353 |
try:
|
|
|
612 |
|
613 |
|
614 |
handleApiKeys();loadModels();checkModels();loadENV();
|
615 |
+
lastUpdateMessage = "New model support O1-* (no streaming/temp/top-p tho, and a bit edited system prompt thingies)"
|
616 |
demo = gr.ChatInterface(
|
617 |
respond,
|
618 |
+
title="O1-preview",
|
619 |
description=f"A OpenAI API proxy!<br/>View API docs [here](/api/v1/docs) <strong>[Yes you can use this as an API in a simpler manner]</strong>.<br/><strong>[Last update: {lastUpdateMessage}]</strong> Also you can only submit images to vision models; txt/code/etc. to all models.",
|
620 |
multimodal=True,
|
621 |
additional_inputs=[
|
622 |
gr.Textbox(value="You are a helpful assistant. You are an OpenAI GPT model. Please adhere to OpenAI's usage policies and guidelines. Ensure your responses are accurate, respectful, and within the scope of OpenAI's rules.", label="System message"),
|
623 |
+
gr.Dropdown(choices=models, value="o1-mini", label="Model"),
|
624 |
gr.Slider(minimum=1, maximum=4096, value=4096, step=1, label="Max new tokens"),
|
625 |
gr.Slider(minimum=0.1, maximum=2.0, value=0.7, step=0.05, label="Temperature"),
|
626 |
gr.Slider(minimum=0.05, maximum=1.0, value=0.95, step=0.05, label="Top-p (nucleus sampling)"),
|