John6666 commited on
Commit
006f892
β€’
1 Parent(s): aad6757

Upload 3 files

Browse files
Files changed (3) hide show
  1. README.md +4 -2
  2. app.py +18 -12
  3. externalmod.py +26 -25
README.md CHANGED
@@ -1,5 +1,5 @@
1
  ---
2
- title: 866 AI Art Models 6 Outputs (Gradio 4.x, Single Gallery)
3
  emoji: πŸ›•πŸ›•
4
  colorFrom: green
5
  colorTo: blue
@@ -7,7 +7,9 @@ sdk: gradio
7
  sdk_version: 4.42.0
8
  app_file: app.py
9
  pinned: false
10
- duplicated_from: Yntec/Diffusion80XX
 
 
11
  short_description: Compare up to 6 image models!
12
  ---
13
 
 
1
  ---
2
+ title: 800+ AI Art Models 6 Outputs (Gradio 4.x, Single Gallery) (Huggingface Diffusion)
3
  emoji: πŸ›•πŸ›•
4
  colorFrom: green
5
  colorTo: blue
 
7
  sdk_version: 4.42.0
8
  app_file: app.py
9
  pinned: false
10
+ duplicated_from:
11
+ - Yntec/Diffusion80XX
12
+ - Yntec/HuggingfaceDiffusion
13
  short_description: Compare up to 6 image models!
14
  ---
15
 
app.py CHANGED
@@ -38,13 +38,13 @@ def extend_choices(choices):
38
 
39
  def update_imgbox(choices):
40
  choices_plus = extend_choices(choices[:num_models])
41
- return [gr.Image(None, label = m, visible = (m != 'NA')) for m in choices_plus]
42
 
43
 
44
  def random_choices():
45
  import random
46
  random.seed()
47
- return random.choices(models, k = num_models)
48
 
49
 
50
  # https://huggingface.co/docs/api-inference/detailed_parameters
@@ -67,12 +67,18 @@ async def infer(model_str, prompt, nprompt="", height=None, width=None, steps=No
67
  await asyncio.sleep(0)
68
  try:
69
  result = await asyncio.wait_for(task, timeout=timeout)
70
- except (Exception, asyncio.TimeoutError) as e:
71
  print(e)
72
  print(f"Task timed out: {model_str}")
73
  if not task.done(): task.cancel()
74
  result = None
75
- if task.done() and result is not None:
 
 
 
 
 
 
76
  with lock:
77
  png_path = "image.png"
78
  result.save(png_path)
@@ -82,8 +88,6 @@ async def infer(model_str, prompt, nprompt="", height=None, width=None, steps=No
82
 
83
 
84
  def gen_fn(model_str, prompt, nprompt="", height=None, width=None, steps=None, cfg=None, seed=-1):
85
- if model_str == 'NA':
86
- return None
87
  try:
88
  loop = asyncio.new_event_loop()
89
  result = loop.run_until_complete(infer(model_str, prompt, nprompt,
@@ -92,6 +96,7 @@ def gen_fn(model_str, prompt, nprompt="", height=None, width=None, steps=None, c
92
  print(e)
93
  print(f"Task aborted: {model_str}")
94
  result = None
 
95
  finally:
96
  loop.close()
97
  return result
@@ -106,11 +111,12 @@ def add_gallery(image, model_str, gallery):
106
 
107
  CSS="""
108
  .gradio-container { max-width: 1200px; margin: 0 auto; !important; }
109
- .output { width=112px; height=112px; !important; }
110
- .gallery { width=100%; min_height=768px; !important; }
111
  .guide { text-align: center; !important; }
112
  """
113
 
 
114
  with gr.Blocks(theme='Nymbo/Nymbo_Theme', fill_width=True, css=CSS) as demo:
115
  with gr.Tab('The Dream'):
116
  with gr.Column(scale=2):
@@ -126,9 +132,9 @@ with gr.Blocks(theme='Nymbo/Nymbo_Theme', fill_width=True, css=CSS) as demo:
126
  cfg = gr.Slider(label="Guidance scale", info="If 0, the default value is used.", maximum=30.0, step=0.1, value=0)
127
  seed = gr.Slider(label="Seed", info="Randomize Seed if -1.", minimum=-1, maximum=MAX_SEED, step=1, value=-1)
128
  with gr.Row():
129
- gen_button = gr.Button(f'Generate up to {int(num_models)} images in up to 3 minutes total', scale=3)
130
  random_button = gr.Button(f'Random {int(num_models)} 🎲', variant='secondary', scale=1)
131
- stop_button = gr.Button('Stop', variant='secondary', interactive=False, scale=1)
132
  gen_button.click(lambda: gr.update(interactive=True), None, stop_button)
133
  gr.Markdown("Scroll down to see more images and select models.", elem_classes="guide")
134
 
@@ -174,8 +180,8 @@ with gr.Blocks(theme='Nymbo/Nymbo_Theme', fill_width=True, css=CSS) as demo:
174
  seed2 = gr.Slider(label="Seed", info="Randomize Seed if -1.", minimum=-1, maximum=MAX_SEED, step=1, value=-1)
175
  num_images = gr.Slider(1, max_images, value=max_images, step=1, label='Number of images')
176
  with gr.Row():
177
- gen_button2 = gr.Button('Generate', scale=2)
178
- stop_button2 = gr.Button('Stop', variant='secondary', interactive=False, scale=1)
179
  gen_button2.click(lambda: gr.update(interactive=True), None, stop_button2)
180
 
181
  with gr.Column(scale=1):
 
38
 
39
  def update_imgbox(choices):
40
  choices_plus = extend_choices(choices[:num_models])
41
+ return [gr.Image(None, label=m, visible=(m!='NA')) for m in choices_plus]
42
 
43
 
44
  def random_choices():
45
  import random
46
  random.seed()
47
+ return random.choices(models, k=num_models)
48
 
49
 
50
  # https://huggingface.co/docs/api-inference/detailed_parameters
 
67
  await asyncio.sleep(0)
68
  try:
69
  result = await asyncio.wait_for(task, timeout=timeout)
70
+ except asyncio.TimeoutError as e:
71
  print(e)
72
  print(f"Task timed out: {model_str}")
73
  if not task.done(): task.cancel()
74
  result = None
75
+ raise Exception(f"Task timed out: {model_str}")
76
+ except Exception as e:
77
+ print(e)
78
+ if not task.done(): task.cancel()
79
+ result = None
80
+ raise Exception(e)
81
+ if task.done() and result is not None and not isinstance(result, tuple):
82
  with lock:
83
  png_path = "image.png"
84
  result.save(png_path)
 
88
 
89
 
90
  def gen_fn(model_str, prompt, nprompt="", height=None, width=None, steps=None, cfg=None, seed=-1):
 
 
91
  try:
92
  loop = asyncio.new_event_loop()
93
  result = loop.run_until_complete(infer(model_str, prompt, nprompt,
 
96
  print(e)
97
  print(f"Task aborted: {model_str}")
98
  result = None
99
+ raise gr.Error(f"Task aborted: {model_str}, Error: {e}")
100
  finally:
101
  loop.close()
102
  return result
 
111
 
112
  CSS="""
113
  .gradio-container { max-width: 1200px; margin: 0 auto; !important; }
114
+ .output { width=112px; height=112px; max_width=112px; max_height=112px; !important; }
115
+ .gallery { min_width=512px; min_height=512px; max_height=1024px; !important; }
116
  .guide { text-align: center; !important; }
117
  """
118
 
119
+
120
  with gr.Blocks(theme='Nymbo/Nymbo_Theme', fill_width=True, css=CSS) as demo:
121
  with gr.Tab('The Dream'):
122
  with gr.Column(scale=2):
 
132
  cfg = gr.Slider(label="Guidance scale", info="If 0, the default value is used.", maximum=30.0, step=0.1, value=0)
133
  seed = gr.Slider(label="Seed", info="Randomize Seed if -1.", minimum=-1, maximum=MAX_SEED, step=1, value=-1)
134
  with gr.Row():
135
+ gen_button = gr.Button(f'Generate up to {int(num_models)} images in up to 3 minutes total', variant='primary', scale=3)
136
  random_button = gr.Button(f'Random {int(num_models)} 🎲', variant='secondary', scale=1)
137
+ stop_button = gr.Button('Stop', variant='stop', interactive=False, scale=1)
138
  gen_button.click(lambda: gr.update(interactive=True), None, stop_button)
139
  gr.Markdown("Scroll down to see more images and select models.", elem_classes="guide")
140
 
 
180
  seed2 = gr.Slider(label="Seed", info="Randomize Seed if -1.", minimum=-1, maximum=MAX_SEED, step=1, value=-1)
181
  num_images = gr.Slider(1, max_images, value=max_images, step=1, label='Number of images')
182
  with gr.Row():
183
+ gen_button2 = gr.Button('Generate', variant='primary', scale=2)
184
+ stop_button2 = gr.Button('Stop', variant='stop', interactive=False, scale=1)
185
  gen_button2.click(lambda: gr.update(interactive=True), None, stop_button2)
186
 
187
  with gr.Column(scale=1):
externalmod.py CHANGED
@@ -9,7 +9,7 @@ import re
9
  import tempfile
10
  import warnings
11
  from pathlib import Path
12
- from typing import TYPE_CHECKING, Callable
13
 
14
  import httpx
15
  import huggingface_hub
@@ -33,6 +33,7 @@ if TYPE_CHECKING:
33
  from gradio.interface import Interface
34
 
35
 
 
36
  server_timeout = 600
37
 
38
 
@@ -40,7 +41,7 @@ server_timeout = 600
40
  def load(
41
  name: str,
42
  src: str | None = None,
43
- hf_token: str | None = None,
44
  alias: str | None = None,
45
  **kwargs,
46
  ) -> Blocks:
@@ -51,7 +52,7 @@ def load(
51
  Parameters:
52
  name: the name of the model (e.g. "gpt2" or "facebook/bart-base") or space (e.g. "flax-community/spanish-gpt2"), can include the `src` as prefix (e.g. "models/facebook/bart-base")
53
  src: the source of the model: `models` or `spaces` (or leave empty if source is provided as a prefix in `name`)
54
- hf_token: optional access token for loading private Hugging Face Hub models or spaces. Find your token here: https://huggingface.co/settings/tokens. Warning: only provide this if you are loading a trusted private Space as it can be read by the Space you are loading.
55
  alias: optional string used as the name of the loaded model instead of the default name (only applies if loading a Space running Gradio 2.x)
56
  Returns:
57
  a Gradio Blocks object for the given model
@@ -68,7 +69,7 @@ def load(
68
  def load_blocks_from_repo(
69
  name: str,
70
  src: str | None = None,
71
- hf_token: str | None = None,
72
  alias: str | None = None,
73
  **kwargs,
74
  ) -> Blocks:
@@ -92,7 +93,7 @@ def load_blocks_from_repo(
92
  if src.lower() not in factory_methods:
93
  raise ValueError(f"parameter: src must be one of {factory_methods.keys()}")
94
 
95
- if hf_token is not None:
96
  if Context.hf_token is not None and Context.hf_token != hf_token:
97
  warnings.warn(
98
  """You are loading a model/Space with a different access token than the one you used to load a previous model/Space. This is not recommended, as it may cause unexpected behavior."""
@@ -103,12 +104,16 @@ def load_blocks_from_repo(
103
  return blocks
104
 
105
 
106
- def from_model(model_name: str, hf_token: str | None, alias: str | None, **kwargs):
 
 
107
  model_url = f"https://huggingface.co/{model_name}"
108
  api_url = f"https://api-inference.huggingface.co/models/{model_name}"
109
  print(f"Fetching model from: {model_url}")
110
 
111
- headers = {"Authorization": f"Bearer {hf_token}"} if hf_token is not None else {}
 
 
112
  response = httpx.request("GET", api_url, headers=headers)
113
  if response.status_code != 200:
114
  raise ModelNotFoundError(
@@ -371,7 +376,11 @@ def from_model(model_name: str, hf_token: str | None, alias: str | None, **kwarg
371
  def query_huggingface_inference_endpoints(*data, **kwargs):
372
  if preprocess is not None:
373
  data = preprocess(*data)
374
- data = fn(*data, **kwargs) # type: ignore
 
 
 
 
375
  if postprocess is not None:
376
  data = postprocess(data) # type: ignore
377
  return data
@@ -383,7 +392,7 @@ def from_model(model_name: str, hf_token: str | None, alias: str | None, **kwarg
383
  "inputs": inputs,
384
  "outputs": outputs,
385
  "title": model_name,
386
- # "examples": examples,
387
  }
388
 
389
  kwargs = dict(interface_info, **kwargs)
@@ -394,19 +403,12 @@ def from_model(model_name: str, hf_token: str | None, alias: str | None, **kwarg
394
  def from_spaces(
395
  space_name: str, hf_token: str | None, alias: str | None, **kwargs
396
  ) -> Blocks:
397
- client = Client(
398
- space_name,
399
- hf_token=hf_token,
400
- download_files=False,
401
- _skip_components=False,
402
- )
403
-
404
  space_url = f"https://huggingface.co/spaces/{space_name}"
405
 
406
  print(f"Fetching Space from: {space_url}")
407
 
408
  headers = {}
409
- if hf_token is not None:
410
  headers["Authorization"] = f"Bearer {hf_token}"
411
 
412
  iframe_url = (
@@ -443,8 +445,7 @@ def from_spaces(
443
  "Blocks or Interface locally. You may find this Guide helpful: "
444
  "https://gradio.app/using_blocks_like_functions/"
445
  )
446
- if client.app_version < version.Version("4.0.0b14"):
447
- return from_spaces_blocks(space=space_name, hf_token=hf_token)
448
 
449
 
450
  def from_spaces_blocks(space: str, hf_token: str | None) -> Blocks:
@@ -489,7 +490,7 @@ def from_spaces_interface(
489
  config = external_utils.streamline_spaces_interface(config)
490
  api_url = f"{iframe_url}/api/predict/"
491
  headers = {"Content-Type": "application/json"}
492
- if hf_token is not None:
493
  headers["Authorization"] = f"Bearer {hf_token}"
494
 
495
  # The function should call the API with preprocessed data
@@ -529,7 +530,7 @@ def gr_Interface_load(
529
  src: str | None = None,
530
  hf_token: str | None = None,
531
  alias: str | None = None,
532
- **kwargs,
533
  ) -> Blocks:
534
  try:
535
  return load_blocks_from_repo(name, src, hf_token, alias)
@@ -543,8 +544,8 @@ def list_uniq(l):
543
 
544
 
545
  def get_status(model_name: str):
546
- from huggingface_hub import InferenceClient
547
- client = InferenceClient(timeout=10)
548
  return client.get_model_status(model_name)
549
 
550
 
@@ -563,7 +564,7 @@ def is_loadable(model_name: str, force_gpu: bool = False):
563
 
564
  def find_model_list(author: str="", tags: list[str]=[], not_tag="", sort: str="last_modified", limit: int=30, force_gpu=False, check_status=False):
565
  from huggingface_hub import HfApi
566
- api = HfApi()
567
  default_tags = ["diffusers"]
568
  if not sort: sort = "last_modified"
569
  limit = limit * 20 if check_status and force_gpu else limit * 5
@@ -576,7 +577,7 @@ def find_model_list(author: str="", tags: list[str]=[], not_tag="", sort: str="l
576
  print(e)
577
  return models
578
  for model in model_infos:
579
- if not model.private and not model.gated:
580
  loadable = is_loadable(model.id, force_gpu) if check_status else True
581
  if not_tag and not_tag in model.tags or not loadable: continue
582
  models.append(model.id)
 
9
  import tempfile
10
  import warnings
11
  from pathlib import Path
12
+ from typing import TYPE_CHECKING, Callable, Literal
13
 
14
  import httpx
15
  import huggingface_hub
 
33
  from gradio.interface import Interface
34
 
35
 
36
+ HF_TOKEN = os.environ.get("HF_TOKEN") if os.environ.get("HF_TOKEN") else None # If private or gated models aren't used, ENV setting is unnecessary.
37
  server_timeout = 600
38
 
39
 
 
41
  def load(
42
  name: str,
43
  src: str | None = None,
44
+ hf_token: str | Literal[False] | None = None,
45
  alias: str | None = None,
46
  **kwargs,
47
  ) -> Blocks:
 
52
  Parameters:
53
  name: the name of the model (e.g. "gpt2" or "facebook/bart-base") or space (e.g. "flax-community/spanish-gpt2"), can include the `src` as prefix (e.g. "models/facebook/bart-base")
54
  src: the source of the model: `models` or `spaces` (or leave empty if source is provided as a prefix in `name`)
55
+ hf_token: optional access token for loading private Hugging Face Hub models or spaces. Will default to the locally saved token if not provided. Pass `token=False` if you don't want to send your token to the server. Find your token here: https://huggingface.co/settings/tokens. Warning: only provide a token if you are loading a trusted private Space as it can be read by the Space you are loading.
56
  alias: optional string used as the name of the loaded model instead of the default name (only applies if loading a Space running Gradio 2.x)
57
  Returns:
58
  a Gradio Blocks object for the given model
 
69
  def load_blocks_from_repo(
70
  name: str,
71
  src: str | None = None,
72
+ hf_token: str | Literal[False] | None = None,
73
  alias: str | None = None,
74
  **kwargs,
75
  ) -> Blocks:
 
93
  if src.lower() not in factory_methods:
94
  raise ValueError(f"parameter: src must be one of {factory_methods.keys()}")
95
 
96
+ if hf_token is not None and hf_token is not False:
97
  if Context.hf_token is not None and Context.hf_token != hf_token:
98
  warnings.warn(
99
  """You are loading a model/Space with a different access token than the one you used to load a previous model/Space. This is not recommended, as it may cause unexpected behavior."""
 
104
  return blocks
105
 
106
 
107
+ def from_model(
108
+ model_name: str, hf_token: str | Literal[False] | None, alias: str | None, **kwargs
109
+ ):
110
  model_url = f"https://huggingface.co/{model_name}"
111
  api_url = f"https://api-inference.huggingface.co/models/{model_name}"
112
  print(f"Fetching model from: {model_url}")
113
 
114
+ headers = (
115
+ {} if hf_token in [False, None] else {"Authorization": f"Bearer {hf_token}"}
116
+ )
117
  response = httpx.request("GET", api_url, headers=headers)
118
  if response.status_code != 200:
119
  raise ModelNotFoundError(
 
376
  def query_huggingface_inference_endpoints(*data, **kwargs):
377
  if preprocess is not None:
378
  data = preprocess(*data)
379
+ try:
380
+ data = fn(*data, **kwargs) # type: ignore
381
+ except huggingface_hub.utils.HfHubHTTPError as e:
382
+ if "429" in str(e):
383
+ raise TooManyRequestsError() from e
384
  if postprocess is not None:
385
  data = postprocess(data) # type: ignore
386
  return data
 
392
  "inputs": inputs,
393
  "outputs": outputs,
394
  "title": model_name,
395
+ #"examples": examples,
396
  }
397
 
398
  kwargs = dict(interface_info, **kwargs)
 
403
  def from_spaces(
404
  space_name: str, hf_token: str | None, alias: str | None, **kwargs
405
  ) -> Blocks:
 
 
 
 
 
 
 
406
  space_url = f"https://huggingface.co/spaces/{space_name}"
407
 
408
  print(f"Fetching Space from: {space_url}")
409
 
410
  headers = {}
411
+ if hf_token not in [False, None]:
412
  headers["Authorization"] = f"Bearer {hf_token}"
413
 
414
  iframe_url = (
 
445
  "Blocks or Interface locally. You may find this Guide helpful: "
446
  "https://gradio.app/using_blocks_like_functions/"
447
  )
448
+ return from_spaces_blocks(space=space_name, hf_token=hf_token)
 
449
 
450
 
451
  def from_spaces_blocks(space: str, hf_token: str | None) -> Blocks:
 
490
  config = external_utils.streamline_spaces_interface(config)
491
  api_url = f"{iframe_url}/api/predict/"
492
  headers = {"Content-Type": "application/json"}
493
+ if hf_token not in [False, None]:
494
  headers["Authorization"] = f"Bearer {hf_token}"
495
 
496
  # The function should call the API with preprocessed data
 
530
  src: str | None = None,
531
  hf_token: str | None = None,
532
  alias: str | None = None,
533
+ **kwargs, # ignore
534
  ) -> Blocks:
535
  try:
536
  return load_blocks_from_repo(name, src, hf_token, alias)
 
544
 
545
 
546
  def get_status(model_name: str):
547
+ from huggingface_hub import AsyncInferenceClient
548
+ client = AsyncInferenceClient(token=HF_TOKEN, timeout=10)
549
  return client.get_model_status(model_name)
550
 
551
 
 
564
 
565
  def find_model_list(author: str="", tags: list[str]=[], not_tag="", sort: str="last_modified", limit: int=30, force_gpu=False, check_status=False):
566
  from huggingface_hub import HfApi
567
+ api = HfApi(token=HF_TOKEN)
568
  default_tags = ["diffusers"]
569
  if not sort: sort = "last_modified"
570
  limit = limit * 20 if check_status and force_gpu else limit * 5
 
577
  print(e)
578
  return models
579
  for model in model_infos:
580
+ if not model.private and not model.gated or HF_TOKEN is not None:
581
  loadable = is_loadable(model.id, force_gpu) if check_status else True
582
  if not_tag and not_tag in model.tags or not loadable: continue
583
  models.append(model.id)