John6666 commited on
Commit
97cf4f0
β€’
1 Parent(s): 8300fda

Upload 2 files

Browse files
Files changed (2) hide show
  1. app.py +11 -9
  2. externalmod.py +27 -26
app.py CHANGED
@@ -125,9 +125,9 @@ with gr.Blocks(theme='Nymbo/Nymbo_Theme', fill_width=True, css=css) as demo:
125
  num_imagesone = gr.Slider(1, max_imagesone, value=max_imagesone, step=1, label='Nobody gets to see this label so I can put here whatever I want!', visible=False)
126
 
127
  with gr.Row():
128
- gen_button = gr.Button('Generate', scale=3)
129
- stop_button = gr.Button('Stop', variant='secondary', interactive=False, scale=1)
130
- gen_button.click(lambda: gr.update(interactive=True), None, stop_button)
131
 
132
  with gr.Row():
133
  output = [gr.Image(label='', show_download_button=True, elem_classes="outputone",
@@ -140,8 +140,9 @@ with gr.Blocks(theme='Nymbo/Nymbo_Theme', fill_width=True, css=css) as demo:
140
  gen_event = gr.on(triggers=[gen_button.click, txt_input.submit],
141
  fn=lambda i, n, m, t1, t2, n1, n2, n3, n4, n5: gen_fn(m, t1, t2, n1, n2, n3, n4, n5) if (i < n) else None,
142
  inputs=[img_in, num_imagesone, model_choice, txt_input, neg_input,
143
- height, width, steps, cfg, seed], outputs=[o])
144
- stop_button.click(lambda: gr.update(interactive = False), None, stop_button, cancels=[gen_event])
 
145
  with gr.Row():
146
  gr.HTML(
147
  """
@@ -170,8 +171,8 @@ with gr.Blocks(theme='Nymbo/Nymbo_Theme', fill_width=True, css=css) as demo:
170
 
171
  with gr.Row():
172
  gen_button2 = gr.Button(f'Generate up to {int(max_images)} images in up to 3 minutes total', scale=3)
173
- stop_button2 = gr.Button('Stop', variant='secondary', interactive=False, scale=1)
174
- gen_button2.click(lambda: gr.update(interactive=True), None, stop_button2)
175
  gr.HTML(
176
  """
177
  <div style="text-align: center; max-width: 1200px; margin: 0 auto;">
@@ -195,8 +196,9 @@ with gr.Blocks(theme='Nymbo/Nymbo_Theme', fill_width=True, css=css) as demo:
195
  gen_event2 = gr.on(triggers=[gen_button2.click, txt_input2.submit],
196
  fn=lambda i, n, m, t1, t2, n1, n2, n3, n4, n5: gen_fn(m, t1, t2, n1, n2, n3, n4, n5) if (i < n) else None,
197
  inputs=[img_i, num_images, model_choice2, txt_input2, neg_input2,
198
- height2, width2, steps2, cfg2, seed2], outputs=[o])
199
- stop_button2.click(lambda: gr.update(interactive=False), None, stop_button2, cancels=[gen_event2])
 
200
  with gr.Row():
201
  gr.HTML(
202
  """
 
125
  num_imagesone = gr.Slider(1, max_imagesone, value=max_imagesone, step=1, label='Nobody gets to see this label so I can put here whatever I want!', visible=False)
126
 
127
  with gr.Row():
128
+ gen_button = gr.Button('Generate', variant='primary', scale=3)
129
+ #stop_button = gr.Button('Stop', variant='secondary', interactive=False, scale=1)
130
+ #gen_button.click(lambda: gr.update(interactive=True), None, stop_button)
131
 
132
  with gr.Row():
133
  output = [gr.Image(label='', show_download_button=True, elem_classes="outputone",
 
140
  gen_event = gr.on(triggers=[gen_button.click, txt_input.submit],
141
  fn=lambda i, n, m, t1, t2, n1, n2, n3, n4, n5: gen_fn(m, t1, t2, n1, n2, n3, n4, n5) if (i < n) else None,
142
  inputs=[img_in, num_imagesone, model_choice, txt_input, neg_input,
143
+ height, width, steps, cfg, seed], outputs=[o],
144
+ concurrency_limit=None, queue=False) # Be sure to delete ", queue=False" when activating the stop button
145
+ #stop_button.click(lambda: gr.update(interactive = False), None, stop_button, cancels=[gen_event])
146
  with gr.Row():
147
  gr.HTML(
148
  """
 
171
 
172
  with gr.Row():
173
  gen_button2 = gr.Button(f'Generate up to {int(max_images)} images in up to 3 minutes total', scale=3)
174
+ #stop_button2 = gr.Button('Stop', variant='secondary', interactive=False, scale=1)
175
+ #gen_button2.click(lambda: gr.update(interactive=True), None, stop_button2)
176
  gr.HTML(
177
  """
178
  <div style="text-align: center; max-width: 1200px; margin: 0 auto;">
 
196
  gen_event2 = gr.on(triggers=[gen_button2.click, txt_input2.submit],
197
  fn=lambda i, n, m, t1, t2, n1, n2, n3, n4, n5: gen_fn(m, t1, t2, n1, n2, n3, n4, n5) if (i < n) else None,
198
  inputs=[img_i, num_images, model_choice2, txt_input2, neg_input2,
199
+ height2, width2, steps2, cfg2, seed2], outputs=[o],
200
+ concurrency_limit=None, queue=False) # Be sure to delete ", queue=False" when activating the stop button
201
+ #stop_button2.click(lambda: gr.update(interactive=False), None, stop_button2, cancels=[gen_event2])
202
  with gr.Row():
203
  gr.HTML(
204
  """
externalmod.py CHANGED
@@ -9,7 +9,7 @@ import re
9
  import tempfile
10
  import warnings
11
  from pathlib import Path
12
- from typing import TYPE_CHECKING, Callable
13
 
14
  import httpx
15
  import huggingface_hub
@@ -33,6 +33,7 @@ if TYPE_CHECKING:
33
  from gradio.interface import Interface
34
 
35
 
 
36
  server_timeout = 600
37
 
38
 
@@ -40,7 +41,7 @@ server_timeout = 600
40
  def load(
41
  name: str,
42
  src: str | None = None,
43
- hf_token: str | None = None,
44
  alias: str | None = None,
45
  **kwargs,
46
  ) -> Blocks:
@@ -51,7 +52,7 @@ def load(
51
  Parameters:
52
  name: the name of the model (e.g. "gpt2" or "facebook/bart-base") or space (e.g. "flax-community/spanish-gpt2"), can include the `src` as prefix (e.g. "models/facebook/bart-base")
53
  src: the source of the model: `models` or `spaces` (or leave empty if source is provided as a prefix in `name`)
54
- hf_token: optional access token for loading private Hugging Face Hub models or spaces. Find your token here: https://huggingface.co/settings/tokens. Warning: only provide this if you are loading a trusted private Space as it can be read by the Space you are loading.
55
  alias: optional string used as the name of the loaded model instead of the default name (only applies if loading a Space running Gradio 2.x)
56
  Returns:
57
  a Gradio Blocks object for the given model
@@ -68,7 +69,7 @@ def load(
68
  def load_blocks_from_repo(
69
  name: str,
70
  src: str | None = None,
71
- hf_token: str | None = None,
72
  alias: str | None = None,
73
  **kwargs,
74
  ) -> Blocks:
@@ -92,7 +93,7 @@ def load_blocks_from_repo(
92
  if src.lower() not in factory_methods:
93
  raise ValueError(f"parameter: src must be one of {factory_methods.keys()}")
94
 
95
- if hf_token is not None:
96
  if Context.hf_token is not None and Context.hf_token != hf_token:
97
  warnings.warn(
98
  """You are loading a model/Space with a different access token than the one you used to load a previous model/Space. This is not recommended, as it may cause unexpected behavior."""
@@ -103,12 +104,16 @@ def load_blocks_from_repo(
103
  return blocks
104
 
105
 
106
- def from_model(model_name: str, hf_token: str | None, alias: str | None, **kwargs):
 
 
107
  model_url = f"https://huggingface.co/{model_name}"
108
  api_url = f"https://api-inference.huggingface.co/models/{model_name}"
109
  print(f"Fetching model from: {model_url}")
110
 
111
- headers = {"Authorization": f"Bearer {hf_token}"} if hf_token is not None else {}
 
 
112
  response = httpx.request("GET", api_url, headers=headers)
113
  if response.status_code != 200:
114
  raise ModelNotFoundError(
@@ -371,7 +376,11 @@ def from_model(model_name: str, hf_token: str | None, alias: str | None, **kwarg
371
  def query_huggingface_inference_endpoints(*data, **kwargs):
372
  if preprocess is not None:
373
  data = preprocess(*data)
374
- data = fn(*data, **kwargs) # type: ignore
 
 
 
 
375
  if postprocess is not None:
376
  data = postprocess(data) # type: ignore
377
  return data
@@ -383,7 +392,7 @@ def from_model(model_name: str, hf_token: str | None, alias: str | None, **kwarg
383
  "inputs": inputs,
384
  "outputs": outputs,
385
  "title": model_name,
386
- # "examples": examples,
387
  }
388
 
389
  kwargs = dict(interface_info, **kwargs)
@@ -394,19 +403,12 @@ def from_model(model_name: str, hf_token: str | None, alias: str | None, **kwarg
394
  def from_spaces(
395
  space_name: str, hf_token: str | None, alias: str | None, **kwargs
396
  ) -> Blocks:
397
- client = Client(
398
- space_name,
399
- hf_token=hf_token,
400
- download_files=False,
401
- _skip_components=False,
402
- )
403
-
404
  space_url = f"https://huggingface.co/spaces/{space_name}"
405
 
406
  print(f"Fetching Space from: {space_url}")
407
 
408
  headers = {}
409
- if hf_token is not None:
410
  headers["Authorization"] = f"Bearer {hf_token}"
411
 
412
  iframe_url = (
@@ -443,8 +445,7 @@ def from_spaces(
443
  "Blocks or Interface locally. You may find this Guide helpful: "
444
  "https://gradio.app/using_blocks_like_functions/"
445
  )
446
- if client.app_version < version.Version("4.0.0b14"):
447
- return from_spaces_blocks(space=space_name, hf_token=hf_token)
448
 
449
 
450
  def from_spaces_blocks(space: str, hf_token: str | None) -> Blocks:
@@ -489,7 +490,7 @@ def from_spaces_interface(
489
  config = external_utils.streamline_spaces_interface(config)
490
  api_url = f"{iframe_url}/api/predict/"
491
  headers = {"Content-Type": "application/json"}
492
- if hf_token is not None:
493
  headers["Authorization"] = f"Bearer {hf_token}"
494
 
495
  # The function should call the API with preprocessed data
@@ -529,7 +530,7 @@ def gr_Interface_load(
529
  src: str | None = None,
530
  hf_token: str | None = None,
531
  alias: str | None = None,
532
- **kwargs,
533
  ) -> Blocks:
534
  try:
535
  return load_blocks_from_repo(name, src, hf_token, alias)
@@ -543,8 +544,8 @@ def list_uniq(l):
543
 
544
 
545
  def get_status(model_name: str):
546
- from huggingface_hub import InferenceClient
547
- client = InferenceClient(timeout=10)
548
  return client.get_model_status(model_name)
549
 
550
 
@@ -563,20 +564,20 @@ def is_loadable(model_name: str, force_gpu: bool = False):
563
 
564
  def find_model_list(author: str="", tags: list[str]=[], not_tag="", sort: str="last_modified", limit: int=30, force_gpu=False, check_status=False):
565
  from huggingface_hub import HfApi
566
- api = HfApi()
567
  default_tags = ["diffusers"]
568
  if not sort: sort = "last_modified"
569
  limit = limit * 20 if check_status and force_gpu else limit * 5
570
  models = []
571
  try:
572
- model_infos = api.list_models(author=author, task="text-to-image",
573
  tags=list_uniq(default_tags + tags), cardData=True, sort=sort, limit=limit)
574
  except Exception as e:
575
  print(f"Error: Failed to list models.")
576
  print(e)
577
  return models
578
  for model in model_infos:
579
- if not model.private and not model.gated:
580
  loadable = is_loadable(model.id, force_gpu) if check_status else True
581
  if not_tag and not_tag in model.tags or not loadable: continue
582
  models.append(model.id)
 
9
  import tempfile
10
  import warnings
11
  from pathlib import Path
12
+ from typing import TYPE_CHECKING, Callable, Literal
13
 
14
  import httpx
15
  import huggingface_hub
 
33
  from gradio.interface import Interface
34
 
35
 
36
+ HF_TOKEN = os.environ.get("HF_TOKEN") if os.environ.get("HF_TOKEN") else None # If private or gated models aren't used, ENV setting is unnecessary.
37
  server_timeout = 600
38
 
39
 
 
41
  def load(
42
  name: str,
43
  src: str | None = None,
44
+ hf_token: str | Literal[False] | None = None,
45
  alias: str | None = None,
46
  **kwargs,
47
  ) -> Blocks:
 
52
  Parameters:
53
  name: the name of the model (e.g. "gpt2" or "facebook/bart-base") or space (e.g. "flax-community/spanish-gpt2"), can include the `src` as prefix (e.g. "models/facebook/bart-base")
54
  src: the source of the model: `models` or `spaces` (or leave empty if source is provided as a prefix in `name`)
55
+ hf_token: optional access token for loading private Hugging Face Hub models or spaces. Will default to the locally saved token if not provided. Pass `token=False` if you don't want to send your token to the server. Find your token here: https://huggingface.co/settings/tokens. Warning: only provide a token if you are loading a trusted private Space as it can be read by the Space you are loading.
56
  alias: optional string used as the name of the loaded model instead of the default name (only applies if loading a Space running Gradio 2.x)
57
  Returns:
58
  a Gradio Blocks object for the given model
 
69
  def load_blocks_from_repo(
70
  name: str,
71
  src: str | None = None,
72
+ hf_token: str | Literal[False] | None = None,
73
  alias: str | None = None,
74
  **kwargs,
75
  ) -> Blocks:
 
93
  if src.lower() not in factory_methods:
94
  raise ValueError(f"parameter: src must be one of {factory_methods.keys()}")
95
 
96
+ if hf_token is not None and hf_token is not False:
97
  if Context.hf_token is not None and Context.hf_token != hf_token:
98
  warnings.warn(
99
  """You are loading a model/Space with a different access token than the one you used to load a previous model/Space. This is not recommended, as it may cause unexpected behavior."""
 
104
  return blocks
105
 
106
 
107
+ def from_model(
108
+ model_name: str, hf_token: str | Literal[False] | None, alias: str | None, **kwargs
109
+ ):
110
  model_url = f"https://huggingface.co/{model_name}"
111
  api_url = f"https://api-inference.huggingface.co/models/{model_name}"
112
  print(f"Fetching model from: {model_url}")
113
 
114
+ headers = (
115
+ {} if hf_token in [False, None] else {"Authorization": f"Bearer {hf_token}"}
116
+ )
117
  response = httpx.request("GET", api_url, headers=headers)
118
  if response.status_code != 200:
119
  raise ModelNotFoundError(
 
376
  def query_huggingface_inference_endpoints(*data, **kwargs):
377
  if preprocess is not None:
378
  data = preprocess(*data)
379
+ try:
380
+ data = fn(*data, **kwargs) # type: ignore
381
+ except huggingface_hub.utils.HfHubHTTPError as e:
382
+ if "429" in str(e):
383
+ raise TooManyRequestsError() from e
384
  if postprocess is not None:
385
  data = postprocess(data) # type: ignore
386
  return data
 
392
  "inputs": inputs,
393
  "outputs": outputs,
394
  "title": model_name,
395
+ #"examples": examples,
396
  }
397
 
398
  kwargs = dict(interface_info, **kwargs)
 
403
  def from_spaces(
404
  space_name: str, hf_token: str | None, alias: str | None, **kwargs
405
  ) -> Blocks:
 
 
 
 
 
 
 
406
  space_url = f"https://huggingface.co/spaces/{space_name}"
407
 
408
  print(f"Fetching Space from: {space_url}")
409
 
410
  headers = {}
411
+ if hf_token not in [False, None]:
412
  headers["Authorization"] = f"Bearer {hf_token}"
413
 
414
  iframe_url = (
 
445
  "Blocks or Interface locally. You may find this Guide helpful: "
446
  "https://gradio.app/using_blocks_like_functions/"
447
  )
448
+ return from_spaces_blocks(space=space_name, hf_token=hf_token)
 
449
 
450
 
451
  def from_spaces_blocks(space: str, hf_token: str | None) -> Blocks:
 
490
  config = external_utils.streamline_spaces_interface(config)
491
  api_url = f"{iframe_url}/api/predict/"
492
  headers = {"Content-Type": "application/json"}
493
+ if hf_token not in [False, None]:
494
  headers["Authorization"] = f"Bearer {hf_token}"
495
 
496
  # The function should call the API with preprocessed data
 
530
  src: str | None = None,
531
  hf_token: str | None = None,
532
  alias: str | None = None,
533
+ **kwargs, # ignore
534
  ) -> Blocks:
535
  try:
536
  return load_blocks_from_repo(name, src, hf_token, alias)
 
544
 
545
 
546
  def get_status(model_name: str):
547
+ from huggingface_hub import AsyncInferenceClient
548
+ client = AsyncInferenceClient(token=HF_TOKEN, timeout=10)
549
  return client.get_model_status(model_name)
550
 
551
 
 
564
 
565
  def find_model_list(author: str="", tags: list[str]=[], not_tag="", sort: str="last_modified", limit: int=30, force_gpu=False, check_status=False):
566
  from huggingface_hub import HfApi
567
+ api = HfApi(token=HF_TOKEN)
568
  default_tags = ["diffusers"]
569
  if not sort: sort = "last_modified"
570
  limit = limit * 20 if check_status and force_gpu else limit * 5
571
  models = []
572
  try:
573
+ model_infos = api.list_models(author=author, #task="text-to-image",
574
  tags=list_uniq(default_tags + tags), cardData=True, sort=sort, limit=limit)
575
  except Exception as e:
576
  print(f"Error: Failed to list models.")
577
  print(e)
578
  return models
579
  for model in model_infos:
580
+ if not model.private and not model.gated or HF_TOKEN is not None:
581
  loadable = is_loadable(model.id, force_gpu) if check_status else True
582
  if not_tag and not_tag in model.tags or not loadable: continue
583
  models.append(model.id)