KingNish commited on
Commit
d2cbb30
1 Parent(s): d29f160

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +27 -80
app.py CHANGED
@@ -1,19 +1,14 @@
1
- #!/usr/bin/env python
2
  from __future__ import annotations
3
- import argparse
4
  import os
5
- import sys
6
  import random
 
7
  import gradio as gr
8
  import numpy as np
9
  import uuid
10
- import spaces
11
- from diffusers import ConsistencyDecoderVAE, DPMSolverMultistepScheduler, Transformer2DModel, AutoencoderKL
12
  import torch
13
  from typing import Tuple
14
  from datetime import datetime
15
- from peft import PeftModel
16
- from diffusers_patches import pixart_sigma_init_patched_inputs, PixArtSigmaPipeline
17
 
18
 
19
  DESCRIPTION = """ # Instant Image
@@ -91,36 +86,13 @@ style_list = [
91
  styles = {k["name"]: (k["prompt"], k["negative_prompt"]) for k in style_list}
92
  STYLE_NAMES = list(styles.keys())
93
  DEFAULT_STYLE_NAME = "(No style)"
94
- SCHEDULE_NAME = ["DPM-Solver"]
95
- DEFAULT_SCHEDULE_NAME = "DPM-Solver"
96
  NUM_IMAGES_PER_PROMPT = 1
97
 
98
- def apply_style(style_name: str, positive: str, negative: str = "") -> Tuple[str, str]:
99
- p, n = styles.get(style_name, styles[DEFAULT_STYLE_NAME])
100
- if not negative:
101
- negative = ""
102
- return p.replace("{prompt}", positive), n + negative
103
-
104
-
105
  if torch.cuda.is_available():
106
- weight_dtype = torch.float16
107
- T5_token_max_length = 300
108
 
109
- # tmp patches for diffusers PixArtSigmaPipeline Implementation
110
- print(
111
- "Changing _init_patched_inputs method of diffusers.models.Transformer2DModel "
112
- "using scripts.diffusers_patches.pixart_sigma_init_patched_inputs")
113
- setattr(Transformer2DModel, '_init_patched_inputs', pixart_sigma_init_patched_inputs)
114
-
115
- transformer = Transformer2DModel.from_pretrained(
116
- "PixArt-alpha/PixArt-Sigma-XL-2-1024-MS",
117
- subfolder='transformer',
118
- torch_dtype=weight_dtype,
119
- )
120
- pipe = PixArtSigmaPipeline.from_pretrained(
121
- "PixArt-alpha/pixart_sigma_sdxlvae_T5_diffusers",
122
- transformer=transformer,
123
- torch_dtype=weight_dtype,
124
  use_safetensors=True,
125
  )
126
 
@@ -141,7 +113,6 @@ if torch.cuda.is_available():
141
  pipe.transformer = torch.compile(pipe.transformer, mode="reduce-overhead", fullgraph=True)
142
  print("Model Compiled!")
143
 
144
-
145
  def save_image(img):
146
  unique_name = str(uuid.uuid4()) + ".png"
147
  img.save(unique_name)
@@ -152,10 +123,6 @@ def randomize_seed_fn(seed: int, randomize_seed: bool) -> int:
152
  seed = random.randint(0, MAX_SEED)
153
  return seed
154
 
155
-
156
- @torch.no_grad()
157
- @torch.inference_mode()
158
- @spaces.GPU(duration=30)
159
  def generate(
160
  prompt: str,
161
  negative_prompt: str = "",
@@ -163,11 +130,9 @@ def generate(
163
  use_negative_prompt: bool = False,
164
  num_imgs: int = 1,
165
  seed: int = 0,
166
- width: int = 2560,
167
- height: int = 2560,
168
- schedule: str = 'DPM-Solver',
169
- dpms_guidance_scale: float = 3.5,
170
- dpms_inference_steps: int = 9,
171
  randomize_seed: bool = False,
172
  use_resolution_binning: bool = True,
173
  progress=gr.Progress(track_tqdm=True),
@@ -175,15 +140,7 @@ def generate(
175
  seed = int(randomize_seed_fn(seed, randomize_seed))
176
  generator = torch.Generator().manual_seed(seed)
177
 
178
- if schedule == 'DPM-Solver':
179
- if not isinstance(pipe.scheduler, DPMSolverMultistepScheduler):
180
- pipe.scheduler = DPMSolverMultistepScheduler()
181
- num_inference_steps = dpms_inference_steps
182
- guidance_scale = dpms_guidance_scale
183
- else:
184
- raise ValueError(f"Unknown schedule: {schedule}")
185
-
186
- if not use_negative_prompt:
187
  negative_prompt = None # type: ignore
188
  prompt, negative_prompt = apply_style(style, prompt, negative_prompt)
189
 
@@ -195,10 +152,9 @@ def generate(
195
  guidance_scale=guidance_scale,
196
  num_inference_steps=num_inference_steps,
197
  generator=generator,
198
- num_images_per_prompt=num_imgs,
199
  use_resolution_binning=use_resolution_binning,
200
  output_type="pil",
201
- max_sequence_length=T5_token_max_length,
202
  ).images
203
 
204
  image_paths = [save_image(img) for img in images]
@@ -242,16 +198,13 @@ with gr.Blocks(css="style.css") as demo:
242
  with gr.Group():
243
  with gr.Row():
244
  use_negative_prompt = gr.Checkbox(label="Use negative prompt", value=False, visible=True)
245
- with gr.Row(visible=True):
246
- schedule = gr.Radio(
247
- show_label=True,
248
- container=True,
249
- interactive=True,
250
- choices=SCHEDULE_NAME,
251
- value=DEFAULT_SCHEDULE_NAME,
252
- label="Sampler Schedule",
253
- visible=True,
254
- )
255
  num_imgs = gr.Slider(
256
  label="Num Images",
257
  minimum=1,
@@ -287,29 +240,23 @@ with gr.Blocks(css="style.css") as demo:
287
  minimum=256,
288
  maximum=MAX_IMAGE_SIZE,
289
  step=32,
290
- value=2560,
291
  )
292
  height = gr.Slider(
293
  label="Height",
294
  minimum=256,
295
  maximum=MAX_IMAGE_SIZE,
296
  step=32,
297
- value=2560,
298
- )
299
- with gr.Row():
300
- dpms_guidance_scale = gr.Slider(
301
- label="Temprature",
302
- minimum=3,
303
- maximum=4,
304
- step=0.1,
305
- value=3.5,
306
  )
307
- dpms_inference_steps = gr.Slider(
308
- label="Steps",
309
- minimum=5,
310
- maximum=25,
311
- step=1,
312
- value=9,
 
 
313
  )
314
 
315
  gr.Examples(
 
 
1
  from __future__ import annotations
 
2
  import os
 
3
  import random
4
+ import uuid
5
  import gradio as gr
6
  import numpy as np
7
  import uuid
8
+ from diffusers import PixArtAlphaPipeline, LCMScheduler
 
9
  import torch
10
  from typing import Tuple
11
  from datetime import datetime
 
 
12
 
13
 
14
  DESCRIPTION = """ # Instant Image
 
86
  styles = {k["name"]: (k["prompt"], k["negative_prompt"]) for k in style_list}
87
  STYLE_NAMES = list(styles.keys())
88
  DEFAULT_STYLE_NAME = "(No style)"
 
 
89
  NUM_IMAGES_PER_PROMPT = 1
90
 
 
 
 
 
 
 
 
91
  if torch.cuda.is_available():
 
 
92
 
93
+ pipe = PixArtAlphaPipeline.from_pretrained(
94
+ "PixArt-alpha/PixArt-LCM-XL-2-1024-MS",
95
+ torch_dtype=torch.float16,
 
 
 
 
 
 
 
 
 
 
 
 
96
  use_safetensors=True,
97
  )
98
 
 
113
  pipe.transformer = torch.compile(pipe.transformer, mode="reduce-overhead", fullgraph=True)
114
  print("Model Compiled!")
115
 
 
116
  def save_image(img):
117
  unique_name = str(uuid.uuid4()) + ".png"
118
  img.save(unique_name)
 
123
  seed = random.randint(0, MAX_SEED)
124
  return seed
125
 
 
 
 
 
126
  def generate(
127
  prompt: str,
128
  negative_prompt: str = "",
 
130
  use_negative_prompt: bool = False,
131
  num_imgs: int = 1,
132
  seed: int = 0,
133
+ width: int = 1024,
134
+ height: int = 1024,
135
+ num_inference_steps: int = 4,
 
 
136
  randomize_seed: bool = False,
137
  use_resolution_binning: bool = True,
138
  progress=gr.Progress(track_tqdm=True),
 
140
  seed = int(randomize_seed_fn(seed, randomize_seed))
141
  generator = torch.Generator().manual_seed(seed)
142
 
143
+ if not use_negative_prompt:
 
 
 
 
 
 
 
 
144
  negative_prompt = None # type: ignore
145
  prompt, negative_prompt = apply_style(style, prompt, negative_prompt)
146
 
 
152
  guidance_scale=guidance_scale,
153
  num_inference_steps=num_inference_steps,
154
  generator=generator,
155
+ num_images_per_prompt=NUM_IMAGES_PER_PROMPT,
156
  use_resolution_binning=use_resolution_binning,
157
  output_type="pil",
 
158
  ).images
159
 
160
  image_paths = [save_image(img) for img in images]
 
198
  with gr.Group():
199
  with gr.Row():
200
  use_negative_prompt = gr.Checkbox(label="Use negative prompt", value=False, visible=True)
201
+ negative_prompt = gr.Text(
202
+ label="Negative prompt",
203
+ max_lines=1,
204
+ placeholder="Enter a negative prompt",
205
+ visible=True,
206
+ )
207
+
 
 
 
208
  num_imgs = gr.Slider(
209
  label="Num Images",
210
  minimum=1,
 
240
  minimum=256,
241
  maximum=MAX_IMAGE_SIZE,
242
  step=32,
243
+ value=1024,
244
  )
245
  height = gr.Slider(
246
  label="Height",
247
  minimum=256,
248
  maximum=MAX_IMAGE_SIZE,
249
  step=32,
250
+ value=1024,
 
 
 
 
 
 
 
 
251
  )
252
+ with gr.Row():
253
+ inference_steps = gr.Slider(
254
+ label="Steps",
255
+ minimum=1,
256
+ maximum=30,
257
+ step=1,
258
+ value=6,
259
+ ) value=9,
260
  )
261
 
262
  gr.Examples(