mochifz commited on
Commit
ea0bede
1 Parent(s): 75aa778

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +5 -3
app.py CHANGED
@@ -67,12 +67,14 @@ def Generate(image_input, prompt, negative_prompt, strength, guidance_scale, num
67
  return image, f"{minutes:02d}:{seconds:02d}"
68
  def Loading(model):
69
  global text2img, img2img
70
- text2img = StableDiffusionPipeline.from_pretrained(model, torch_dtype=torch.float16, use_safetensors=True).to(device)
71
- text2img.safety_checker = None
72
- text2img.scheduler = EulerDiscreteScheduler.from_config(text2img.scheduler.config)
73
  if device == "cuda":
 
74
  text2img.enable_xformers_memory_efficient_attention()
75
  text2img.vae.enable_xformers_memory_efficient_attention()
 
 
 
 
76
  img2img = StableDiffusionImg2ImgPipeline(**text2img.components)
77
  return model
78
  with gr.Blocks() as demo:
 
67
  return image, f"{minutes:02d}:{seconds:02d}"
68
  def Loading(model):
69
  global text2img, img2img
 
 
 
70
  if device == "cuda":
71
+ text2img = StableDiffusionPipeline.from_pretrained(model, torch_dtype=torch.float16, variant="fp16", use_safetensors=True).to(device)
72
  text2img.enable_xformers_memory_efficient_attention()
73
  text2img.vae.enable_xformers_memory_efficient_attention()
74
+ else:
75
+ text2img = StableDiffusionPipeline.from_pretrained(model, use_safetensors=True).to(device)
76
+ text2img.safety_checker = None
77
+ text2img.scheduler = EulerDiscreteScheduler.from_config(text2img.scheduler.config)
78
  img2img = StableDiffusionImg2ImgPipeline(**text2img.components)
79
  return model
80
  with gr.Blocks() as demo: