File size: 5,885 Bytes
0362d60 9196578 04c85ef a752c0b 9196578 a752c0b 9196578 82166ab 9196578 a752c0b 9196578 82166ab 9196578 82166ab 9196578 0362d60 9196578 82166ab 9196578 82166ab 9196578 82166ab 9196578 0362d60 9196578 82166ab a546565 9196578 a752c0b 9196578 82166ab 9196578 82166ab 9196578 82166ab a752c0b 9196578 a546565 a752c0b a546565 a752c0b 6763cf2 627b7a9 ea0bede a752c0b 9196578 82166ab 9196578 82166ab 9196578 82166ab 9196578 82166ab 9196578 82166ab 9196578 82166ab a752c0b 82166ab 75aa778 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 |
import gradio as gr
import torch, random, time
from diffusers import StableDiffusionPipeline, StableDiffusionImg2ImgPipeline
from diffusers import StableDiffusionXLPipeline, StableDiffusionXLImg2ImgPipeline
translations = {
'en': {
'model_name': 'Model Path',
'loading': 'Loading',
'input': 'Input Image',
'prompt': 'Prompt',
'negative_prompt': 'Negative Prompt',
'generate': 'Generate',
'strength': 'Strength',
'scale': 'Guidance Scale',
'steps': 'Number of Inference Steps',
'width': 'Width',
'height': 'Height',
'seed': 'Seed',
},
'zh': {
'model_name': '模型路径',
'loading': '载入',
'input': '输入图像',
'prompt': '提示',
'negative_prompt': '负面提示',
'generate': '生成',
'strength': '强度',
'scale': '引导系数',
'steps': '迭代步数',
'width': '宽度',
'height': '高度',
'seed': '机数种子',
}
}
language='zh'
def generate_new_seed():
return random.randint(1, 2147483647)
def update_language(new_language):
return [
gr.Textbox.update(label=translations[new_language]['model_name']),
gr.Button.update(value=translations[new_language]['loading']),
gr.Image.update(label=translations[new_language]['input']),
gr.Textbox.update(label=translations[new_language]['prompt']),
gr.Textbox.update(label=translations[new_language]['negative_prompt']),
gr.Button.update(value=translations[new_language]['generate']),
gr.Slider.update(label=translations[new_language]['strength']),
gr.Slider.update(label=translations[new_language]['scale']),
gr.Slider.update(label=translations[new_language]['steps']),
gr.Slider.update(label=translations[new_language]['width']),
gr.Slider.update(label=translations[new_language]['height']),
gr.Number.update(label=translations[new_language]['seed'])
]
text2img = None
img2img = None
def Generate(image_input, prompt, negative_prompt, strength, scale, steps, width, height, seed):
global text2img, img2img
if seed == -1:
seed = generate_new_seed()
generator = torch.Generator().manual_seed(int(seed))
start_time = time.time()
if image_input is None:
image = text2img(prompt=prompt, negative_prompt=negative_prompt, guidance_scale=scale, num_inference_steps=steps, width=width, height=height, num_images_per_prompt=1, generator=generator).images[0]
else:
image = img2img(image=image_input, strength=0.75, prompt=prompt, negative_prompt=negative_prompt, guidance_scale=scale, num_inference_steps=steps, width=width, height=height, num_images_per_prompt=1, generator=generator).images[0]
minutes, seconds = divmod(round(time.time() - start_time), 60)
return image, f"seed:{seed}\ntime:{minutes:02d}:{seconds:02d}"
def Loading(model_name, is_xl, is_cuda):
global text2img, img2img
if is_xl == False:
is_xl ='xl' in model_name.lower()
device = "cuda" if is_cuda else "cpu"
pipeline_class = StableDiffusionXLPipeline if is_xl else StableDiffusionPipeline
if is_cuda:
text2img = pipeline_class.from_pretrained(model_name, torch_dtype=torch.float16, variant="fp16", use_safetensors=True).to(device)
else:
text2img = pipeline_class.from_pretrained(model_name, use_safetensors=True).to(device)
if is_cuda:
text2img.enable_xformers_memory_efficient_attention()
text2img.vae.enable_xformers_memory_efficient_attention()
text2img.safety_checker = None
img2img = (StableDiffusionXLImg2ImgPipeline if is_xl else StableDiffusionImg2ImgPipeline)(**text2img.components)
return model_name
with gr.Blocks() as demo:
with gr.Row():
model_name = gr.Textbox(value="nota-ai/bk-sdm-tiny-2m", label=translations[language]['model_name'])
with gr.Column():
is_xl = gr.Checkbox(label="SDXL")
is_cuda = gr.Checkbox(label="cuda", value=torch.cuda.is_available())
loading = gr.Button(translations[language]['loading'], variant='primary')
set_language = gr.Dropdown(list(translations.keys()), label="Language", value=language)
with gr.Row():
with gr.Column(scale=2):
with gr.Column():
image_input = gr.Image(label=translations[language]['input'])
strength = gr.Slider(minimum=0, maximum=1, value=0.8, step=0.01, label=translations[language]['strength'])
prompt = gr.Textbox("space warrior, beautiful, female, ultrarealistic, soft lighting, 8k", label=translations[language]['prompt'], lines=3)
negative_prompt = gr.Textbox("deformed, distorted, disfigured, poorly drawn, bad anatomy, wrong anatomy, extra limb, missing limb, floating limbs, mutated hands and fingers, disconnected limbs, mutation, mutated, ugly, disgusting, blurry, amputation,lowres,jpeg artifacts,username,logo,signature,watermark,monochrome,greyscale", label=translations[language]['negative_prompt'], lines=3)
with gr.Row():
width = gr.Slider(minimum=512, maximum=2048, value=512, step=8, label=translations[language]['width'])
height = gr.Slider(minimum=512, maximum=2048, value=512, step=8, label=translations[language]['height'])
with gr.Row():
scale = gr.Slider(minimum=1, maximum=15, value=7.5, step=0.5, label=translations[language]['scale'])
steps = gr.Slider(minimum=1, maximum=100, value=50, step=1, label=translations[language]['steps'])
with gr.Row():
seed = gr.Slider(minimum=-1, step=1, maximum=2147483647, value=-1, label=translations[language]['seed'])
set_seed = gr.Button("🎲")
with gr.Column(scale=3):
image_output = gr.Image()
text_output = gr.Textbox()
generate = gr.Button(translations[language]['generate'], variant='primary')
set_seed.click(generate_new_seed, None, seed)
generate.click(Generate, [image_input, prompt, negative_prompt, strength, scale, steps, width, height, seed], [image_output, text_output])
loading.click(Loading, [model_name, is_xl, is_cuda], model_name)
set_language.change(update_language, set_language, [model_name, loading, image_input, prompt, negative_prompt, generate, strength, scale, steps, width, height, seed])
demo.queue().launch()
|