harpomaxx commited on
Commit
caf0600
1 Parent(s): 48a9003

fix unknown parameters

Browse files
Files changed (1) hide show
  1. app.py +7 -5
app.py CHANGED
@@ -1,8 +1,9 @@
1
  PATH = 'harpomaxx/deeplili' #stable diffusion 1.5
 
2
  import torch
3
- #from transformers import CLIPTextModel, CLIPTokenizer
4
- #from diffusers import AutoencoderKL, UNet2DConditionModel, PNDMScheduler
5
- #from diffusers import UniPCMultistepScheduler
6
  from diffusers import StableDiffusionPipeline
7
  from PIL import Image
8
  from tqdm.auto import tqdm
@@ -12,7 +13,7 @@ import gradio as gr
12
  guidance_scale = 8.5 # Scale for classifier-free guidance
13
 
14
 
15
- pipe = StableDiffusionPipeline.from_pretrained(PATH,local_files_only=False ).to("cpu")
16
  guidance_scale = 8.5
17
 
18
  def generate_images(prompt, guidance_scale, n_samples, num_inference_steps):
@@ -26,7 +27,7 @@ def generate_images(prompt, guidance_scale, n_samples, num_inference_steps):
26
 
27
  def gr_generate_images(prompt: str, num_images: int, num_inference: int):
28
  prompt = prompt + "sks style"
29
- images = generate_images(prompt, tokenizer, text_encoder, unet, vae, scheduler, guidance_scale, num_images, num_inference)
30
  return images
31
 
32
  with gr.Blocks() as demo:
@@ -110,4 +111,5 @@ with gr.Blocks() as demo:
110
  )
111
 
112
  if __name__ == "__main__":
 
113
  demo.queue().launch(share=True)
 
1
  PATH = 'harpomaxx/deeplili' #stable diffusion 1.5
2
+ from PIL import Image
3
  import torch
4
+ from transformers import CLIPTextModel, CLIPTokenizer
5
+ from diffusers import AutoencoderKL, UNet2DConditionModel, PNDMScheduler
6
+ from diffusers import UniPCMultistepScheduler
7
  from diffusers import StableDiffusionPipeline
8
  from PIL import Image
9
  from tqdm.auto import tqdm
 
13
  guidance_scale = 8.5 # Scale for classifier-free guidance
14
 
15
 
16
+ pipe = StableDiffusionPipeline.from_pretrained(PATH,local_files_only=False ).to("cuda")
17
  guidance_scale = 8.5
18
 
19
  def generate_images(prompt, guidance_scale, n_samples, num_inference_steps):
 
27
 
28
  def gr_generate_images(prompt: str, num_images: int, num_inference: int):
29
  prompt = prompt + "sks style"
30
+ images = generate_images(prompt, guidance_scale, num_images, num_inference)
31
  return images
32
 
33
  with gr.Blocks() as demo:
 
111
  )
112
 
113
  if __name__ == "__main__":
114
+ #demo.launch(share=True)
115
  demo.queue().launch(share=True)