Quardo commited on
Commit
5799683
1 Parent(s): 1e0c08a

Updated space

Browse files
Files changed (2) hide show
  1. app.py +85 -55
  2. index.html +4 -0
app.py CHANGED
@@ -38,6 +38,7 @@ def loadENV():
38
  env_data = response.json()
39
  for key, value in env_data.items():
40
  os.environ[key] = value
 
41
  checkModels()
42
  loadModels()
43
  except Exception as e:
@@ -51,6 +52,7 @@ def loadENV():
51
  env_data = response.json()
52
  for key, value in env_data.items():
53
  os.environ[key] = value
 
54
  checkModels()
55
  loadModels()
56
  except Exception as e:
@@ -62,7 +64,7 @@ def checkModels():
62
  global base_url
63
  if API_BASE == "env":
64
  try:
65
- response = requests.get(f"{base_url}/models", headers={"Authorization": f"Bearer {api_key}"})
66
  response.raise_for_status()
67
  if not ('data' in response.json()):
68
  base_url = "https://api.openai.com/v1"
@@ -85,6 +87,36 @@ def loadModels():
85
  "data": [{"id": v, "object": "model", "created": 0, "owned_by": "system"} for v in models]
86
  }
87
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
88
  def encodeChat(messages):
89
  output = []
90
  for message in messages:
@@ -101,7 +133,7 @@ def moderate(messages):
101
  f"{base_url}/moderations",
102
  headers={
103
  "Content-Type": "application/json",
104
- "Authorization": f"Bearer {api_key}"
105
  },
106
  json={"input": encodeChat(messages)}
107
  )
@@ -114,7 +146,7 @@ def moderate(messages):
114
  "https://api.openai.com/v1/moderations",
115
  headers={
116
  "Content-Type": "application/json",
117
- "Authorization": f"Bearer {api_key}"
118
  },
119
  json={"input": encodeChat(messages)}
120
  )
@@ -136,7 +168,7 @@ def moderate(messages):
136
  async def streamChat(params):
137
  async with aiohttp.ClientSession() as session:
138
  try:
139
- async with session.post(f"{base_url}/chat/completions", headers={"Authorization": f"Bearer {api_key}", "Content-Type": "application/json"}, json=params) as r:
140
  r.raise_for_status()
141
  async for line in r.content:
142
  if line:
@@ -152,7 +184,7 @@ async def streamChat(params):
152
  continue
153
  except aiohttp.ClientError:
154
  try:
155
- async with session.post("https://api.openai.com/v1/chat/completions", headers={"Authorization": f"Bearer {api_key}", "Content-Type": "application/json"}, json=params) as r:
156
  r.raise_for_status()
157
  async for line in r.content:
158
  if line:
@@ -169,6 +201,45 @@ async def streamChat(params):
169
  except aiohttp.ClientError:
170
  return
171
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
172
  def rnd(length=8):
173
  letters = string.ascii_letters + string.digits
174
  return ''.join(random.choice(letters) for i in range(length))
@@ -231,45 +302,6 @@ def handleMultimodalData(model, role, data):
231
  return {"role": role, "content": str(data)}
232
  return {"role": role, "content": getattr(data, 'text', str(data))}
233
 
234
- def imagine(prompt):
235
- try:
236
- response = requests.post(
237
- f"{base_url}/images/generations",
238
- headers={
239
- "Content-Type": "application/json",
240
- "Authorization": f"Bearer {api_key}"
241
- },
242
- json={
243
- "model": "dall-e-3",
244
- "prompt": prompt,
245
- "quality": "hd",
246
- }
247
- )
248
- response.raise_for_status()
249
- result = response.json()
250
- except requests.exceptions.RequestException as e:
251
- print(f"Error during moderation request to {base_url}: {e}")
252
- try:
253
- response = requests.post(
254
- "https://api.openai.com/v1/images/generations",
255
- headers={
256
- "Content-Type": "application/json",
257
- "Authorization": f"Bearer {api_key}"
258
- },
259
- json={
260
- "model": "dall-e-3",
261
- "prompt": prompt,
262
- "quality": "hd",
263
- }
264
- )
265
- response.raise_for_status()
266
- result = response.json()
267
- except requests.exceptions.RequestException as e:
268
- print(f"Error during moderation request to fallback URL: {e}")
269
- return False
270
-
271
- return result.get('data', [{}])[0].get('url')
272
-
273
  class FileMessage(GradioModel):
274
  file: FileData
275
  alt_text: Optional[str] = None
@@ -292,6 +324,7 @@ async def respond(
292
  seed,
293
  random_seed,
294
  fakeTool,
 
295
  consent
296
  ):
297
  if not consent:
@@ -355,7 +388,10 @@ Alright! Here's an image of a whimsical scene featuring a cat wearing a wizard h
355
  5. (if in any case the user asks for the prompt)```
356
  Sure here's the prompt i wrote to generate the image below: `A colorful bird soaring through a bustling city skyline. The bird should have vibrant feathers, contrasting against the modern buildings and blue sky. Below, the city is alive with activity, featuring tall skyscrapers, busy streets, and small parks, creating a dynamic urban scene.`
357
  ```]"""})
358
- messages.append({"role": "system", "content": system_message});
 
 
 
359
 
360
  for val in history:
361
  if val[0] is not None:
@@ -411,8 +447,7 @@ Sure here's the prompt i wrote to generate the image below: `A colorful bird soa
411
  except json.JSONDecodeError:
412
  continue
413
 
414
- loadModels();checkModels();loadENV();
415
-
416
  lastUpdateMessage = "Added image generation via DALL-E-3."
417
  demo = gr.ChatInterface(
418
  respond,
@@ -420,20 +455,15 @@ demo = gr.ChatInterface(
420
  description=f"A simple proxy to OpenAI!<br/>You can use this space as a proxy! click [here](/api/v1/docs) to view the documents. <strong>[last update: {lastUpdateMessage}]</strong><br/>Also you can only submit images to vision/4o models but can submit txt/code/etc. files to all models.",
421
  multimodal=True,
422
  additional_inputs=[
423
- gr.Textbox(value="You are a helpful assistant.", label="System message"),
424
  gr.Dropdown(choices=models, value="gpt-4o-mini-2024-07-18", label="Model"),
425
  gr.Slider(minimum=1, maximum=4096, value=4096, step=1, label="Max new tokens"),
426
  gr.Slider(minimum=0.1, maximum=2.0, value=0.7, step=0.05, label="Temperature"),
427
- gr.Slider(
428
- minimum=0.05,
429
- maximum=1.0,
430
- value=0.95,
431
- step=0.05,
432
- label="Top-p (nucleus sampling)",
433
- ),
434
  gr.Slider(minimum=0, maximum=2**32, value=0, step=1, label="Seed"),
435
  gr.Checkbox(label="Randomize Seed", value=True),
436
  gr.Checkbox(label="FakeTool [Image generation beta]", value=True),
 
437
  gr.Checkbox(label="User Consent [I agree to the terms and conditions. (can't make a button for it)]", value=False)
438
  ],
439
  )
 
38
  env_data = response.json()
39
  for key, value in env_data.items():
40
  os.environ[key] = value
41
+ handleApiKeys()
42
  checkModels()
43
  loadModels()
44
  except Exception as e:
 
52
  env_data = response.json()
53
  for key, value in env_data.items():
54
  os.environ[key] = value
55
+ handleApiKeys()
56
  checkModels()
57
  loadModels()
58
  except Exception as e:
 
64
  global base_url
65
  if API_BASE == "env":
66
  try:
67
+ response = requests.get(f"{base_url}/models", headers={"Authorization": f"Bearer {get_api_key()}"})
68
  response.raise_for_status()
69
  if not ('data' in response.json()):
70
  base_url = "https://api.openai.com/v1"
 
87
  "data": [{"id": v, "object": "model", "created": 0, "owned_by": "system"} for v in models]
88
  }
89
 
90
+ def handleApiKeys():
91
+ global api_key
92
+ if ',' in api_key:
93
+ output = []
94
+ for key in api_key.split(','):
95
+ try:
96
+ response = requests.get(f"{base_url}/models", headers={"Authorization": f"Bearer {key}"})
97
+ response.raise_for_status()
98
+ if ('data' in response.json()):
99
+ output.append(key)
100
+ except Exception as e:
101
+ print(("API key {" + key + "} is not valid or an actuall error happend {" + e + "}"))
102
+ if len(output)==1:
103
+ raise RuntimeError("No API key is working")
104
+ api_key = ",".join(output)
105
+ else:
106
+ try:
107
+ response = requests.get(f"{base_url}/models", headers={"Authorization": f"Bearer {key}"})
108
+ response.raise_for_status()
109
+ if not ('data' in response.json()):
110
+ raise RuntimeError("Current API key is not valid")
111
+ except Exception as e:
112
+ raise RuntimeError("Current API key is not valid or an actuall error happend {" + e + "}")
113
+
114
+
115
+ def get_api_key():
116
+ if ',' in api_key:
117
+ return random.choice(api_key.split(','))
118
+ return api_key
119
+
120
  def encodeChat(messages):
121
  output = []
122
  for message in messages:
 
133
  f"{base_url}/moderations",
134
  headers={
135
  "Content-Type": "application/json",
136
+ "Authorization": f"Bearer {get_api_key()}"
137
  },
138
  json={"input": encodeChat(messages)}
139
  )
 
146
  "https://api.openai.com/v1/moderations",
147
  headers={
148
  "Content-Type": "application/json",
149
+ "Authorization": f"Bearer {get_api_key()}"
150
  },
151
  json={"input": encodeChat(messages)}
152
  )
 
168
  async def streamChat(params):
169
  async with aiohttp.ClientSession() as session:
170
  try:
171
+ async with session.post(f"{base_url}/chat/completions", headers={"Authorization": f"Bearer {get_api_key()}", "Content-Type": "application/json"}, json=params) as r:
172
  r.raise_for_status()
173
  async for line in r.content:
174
  if line:
 
184
  continue
185
  except aiohttp.ClientError:
186
  try:
187
+ async with session.post("https://api.openai.com/v1/chat/completions", headers={"Authorization": f"Bearer {get_api_key()}", "Content-Type": "application/json"}, json=params) as r:
188
  r.raise_for_status()
189
  async for line in r.content:
190
  if line:
 
201
  except aiohttp.ClientError:
202
  return
203
 
204
+ def imagine(prompt):
205
+ try:
206
+ response = requests.post(
207
+ f"{base_url}/images/generations",
208
+ headers={
209
+ "Content-Type": "application/json",
210
+ "Authorization": f"Bearer {get_api_key()}"
211
+ },
212
+ json={
213
+ "model": "dall-e-3",
214
+ "prompt": prompt,
215
+ "quality": "hd",
216
+ }
217
+ )
218
+ response.raise_for_status()
219
+ result = response.json()
220
+ except requests.exceptions.RequestException as e:
221
+ print(f"Error during moderation request to {base_url}: {e}")
222
+ try:
223
+ response = requests.post(
224
+ "https://api.openai.com/v1/images/generations",
225
+ headers={
226
+ "Content-Type": "application/json",
227
+ "Authorization": f"Bearer {get_api_key()}"
228
+ },
229
+ json={
230
+ "model": "dall-e-3",
231
+ "prompt": prompt,
232
+ "quality": "hd",
233
+ }
234
+ )
235
+ response.raise_for_status()
236
+ result = response.json()
237
+ except requests.exceptions.RequestException as e:
238
+ print(f"Error during moderation request to fallback URL: {e}")
239
+ return False
240
+
241
+ return result.get('data', [{}])[0].get('url')
242
+
243
  def rnd(length=8):
244
  letters = string.ascii_letters + string.digits
245
  return ''.join(random.choice(letters) for i in range(length))
 
302
  return {"role": role, "content": str(data)}
303
  return {"role": role, "content": getattr(data, 'text', str(data))}
304
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
305
  class FileMessage(GradioModel):
306
  file: FileData
307
  alt_text: Optional[str] = None
 
324
  seed,
325
  random_seed,
326
  fakeTool,
327
+ betterSystemPrompt,
328
  consent
329
  ):
330
  if not consent:
 
388
  5. (if in any case the user asks for the prompt)```
389
  Sure here's the prompt i wrote to generate the image below: `A colorful bird soaring through a bustling city skyline. The bird should have vibrant feathers, contrasting against the modern buildings and blue sky. Below, the city is alive with activity, featuring tall skyscrapers, busy streets, and small parks, creating a dynamic urban scene.`
390
  ```]"""})
391
+ if betterSystemPrompt:
392
+ messages.append({"role": "system", "content": f"You are a helpful assistant. You are an OpenAI GPT model named {model_name}. The current time is {time.strftime('%Y-%m-%d %H:%M:%S')}. Please adhere to OpenAI's usage policies and guidelines. Ensure your responses are accurate, respectful, and within the scope of OpenAI's rules."});
393
+ else:
394
+ messages.append({"role": "system", "content": system_message});
395
 
396
  for val in history:
397
  if val[0] is not None:
 
447
  except json.JSONDecodeError:
448
  continue
449
 
450
+ handleApiKeys();loadModels();checkModels();loadENV();
 
451
  lastUpdateMessage = "Added image generation via DALL-E-3."
452
  demo = gr.ChatInterface(
453
  respond,
 
455
  description=f"A simple proxy to OpenAI!<br/>You can use this space as a proxy! click [here](/api/v1/docs) to view the documents. <strong>[last update: {lastUpdateMessage}]</strong><br/>Also you can only submit images to vision/4o models but can submit txt/code/etc. files to all models.",
456
  multimodal=True,
457
  additional_inputs=[
458
+ gr.Textbox(value="You are a helpful assistant. You are an OpenAI GPT model. Please adhere to OpenAI's usage policies and guidelines. Ensure your responses are accurate, respectful, and within the scope of OpenAI's rules.", label="System message"),
459
  gr.Dropdown(choices=models, value="gpt-4o-mini-2024-07-18", label="Model"),
460
  gr.Slider(minimum=1, maximum=4096, value=4096, step=1, label="Max new tokens"),
461
  gr.Slider(minimum=0.1, maximum=2.0, value=0.7, step=0.05, label="Temperature"),
462
+ gr.Slider(minimum=0.05, maximum=1.0, value=0.95, step=0.05, label="Top-p (nucleus sampling)"),
 
 
 
 
 
 
463
  gr.Slider(minimum=0, maximum=2**32, value=0, step=1, label="Seed"),
464
  gr.Checkbox(label="Randomize Seed", value=True),
465
  gr.Checkbox(label="FakeTool [Image generation beta]", value=True),
466
+ gr.Checkbox(label="Better system prompt (ignores the system prompt set by user.)", value=True),
467
  gr.Checkbox(label="User Consent [I agree to the terms and conditions. (can't make a button for it)]", value=False)
468
  ],
469
  )
index.html CHANGED
@@ -37,6 +37,10 @@
37
  <hr/>
38
  <h2>Updates</h2>
39
  <div>
 
 
 
 
40
  <div>
41
  <strong> - 5. Update Rollback (part 2).</strong>
42
  <p> * Switched back to old model, as it at least did some enough check</p>
 
37
  <hr/>
38
  <h2>Updates</h2>
39
  <div>
40
+ <div>
41
+ <strong> - 6. Update.</strong>
42
+ <p> * Added image generation via DALL-E-3.</p>
43
+ </div><hr/>
44
  <div>
45
  <strong> - 5. Update Rollback (part 2).</strong>
46
  <p> * Switched back to old model, as it at least did some enough check</p>