akameswa commited on
Commit
1f7fddd
1 Parent(s): 900f786

Update src/pipelines/inpainting.py

Browse files
Files changed (1) hide show
  1. src/pipelines/inpainting.py +34 -35
src/pipelines/inpainting.py CHANGED
@@ -1,42 +1,41 @@
1
- # import torch
2
- # import spaces
3
- # import gradio as gr
4
- # from src.util.base import *
5
- # from src.util.params import *
6
- # from diffusers import AutoPipelineForInpainting
7
 
8
- # inpaint_pipe = AutoPipelineForInpainting.from_pretrained(inpaint_model_path).to(torch_device)
9
- # # inpaint_pipe = AutoPipelineForInpainting.from_pipe(pipe).to(torch_device)
10
 
11
- # @spaces.GPU(enable_queue=True)
12
- # def inpaint(dict, num_inference_steps, seed, prompt="", progress=gr.Progress()):
13
- # progress(0)
14
- # mask = dict["mask"].convert("RGB").resize((imageHeight, imageWidth))
15
- # init_image = dict["image"].convert("RGB").resize((imageHeight, imageWidth))
16
- # output = inpaint_pipe(
17
- # prompt=prompt,
18
- # image=init_image,
19
- # mask_image=mask,
20
- # guidance_scale=guidance_scale,
21
- # num_inference_steps=num_inference_steps,
22
- # generator=torch.Generator().manual_seed(seed),
23
- # )
24
- # progress(1)
25
 
26
- # fname = "inpainting"
27
- # tab_config = {
28
- # "Tab": "Inpainting",
29
- # "Prompt": prompt,
30
- # "Number of Inference Steps per Image": num_inference_steps,
31
- # "Seed": seed,
32
- # }
33
 
34
- # imgs_list = []
35
- # imgs_list.append((output.images[0], "Inpainted Image"))
36
- # imgs_list.append((mask, "Mask"))
37
 
38
- # export_as_zip(imgs_list, fname, tab_config)
39
- # return output.images[0], f"outputs/{fname}.zip"
40
 
41
 
42
- # __all__ = ["inpaint"]
 
1
+ import torch
2
+ import spaces
3
+ import gradio as gr
4
+ from src.util.base import *
5
+ from src.util.params import *
6
+ from diffusers import AutoPipelineForInpainting
7
 
8
+ inpaint_pipe = AutoPipelineForInpainting.from_pretrained(inpaint_model_path).to(torch_device)
 
9
 
10
+ @spaces.GPU(enable_queue=True)
11
+ def inpaint(dict, num_inference_steps, seed, prompt="", progress=gr.Progress()):
12
+ progress(0)
13
+ mask = dict["layers"][0].convert("RGB").resize((imageHeight, imageWidth))
14
+ init_image = dict["background"].convert("RGB").resize((imageHeight, imageWidth))
15
+ output = inpaint_pipe(
16
+ prompt=prompt,
17
+ image=init_image,
18
+ mask_image=mask,
19
+ guidance_scale=guidance_scale,
20
+ num_inference_steps=num_inference_steps,
21
+ generator=torch.Generator().manual_seed(seed),
22
+ )
23
+ progress(1)
24
 
25
+ fname = "inpainting"
26
+ tab_config = {
27
+ "Tab": "Inpainting",
28
+ "Prompt": prompt,
29
+ "Number of Inference Steps per Image": num_inference_steps,
30
+ "Seed": seed,
31
+ }
32
 
33
+ imgs_list = []
34
+ imgs_list.append((output.images[0], "Inpainted Image"))
35
+ imgs_list.append((mask, "Mask"))
36
 
37
+ export_as_zip(imgs_list, fname, tab_config)
38
+ return output.images[0], f"outputs/{fname}.zip"
39
 
40
 
41
+ __all__ = ["inpaint"]