mochifz's picture
Update app.py
82166ab verified
raw
history blame contribute delete
No virus
5.89 kB
import gradio as gr
import torch, random, time
from diffusers import StableDiffusionPipeline, StableDiffusionImg2ImgPipeline
from diffusers import StableDiffusionXLPipeline, StableDiffusionXLImg2ImgPipeline
translations = {
'en': {
'model_name': 'Model Path',
'loading': 'Loading',
'input': 'Input Image',
'prompt': 'Prompt',
'negative_prompt': 'Negative Prompt',
'generate': 'Generate',
'strength': 'Strength',
'scale': 'Guidance Scale',
'steps': 'Number of Inference Steps',
'width': 'Width',
'height': 'Height',
'seed': 'Seed',
},
'zh': {
'model_name': '模型路径',
'loading': '载入',
'input': '输入图像',
'prompt': '提示',
'negative_prompt': '负面提示',
'generate': '生成',
'strength': '强度',
'scale': '引导系数',
'steps': '迭代步数',
'width': '宽度',
'height': '高度',
'seed': '机数种子',
}
}
language='zh'
def generate_new_seed():
return random.randint(1, 2147483647)
def update_language(new_language):
return [
gr.Textbox.update(label=translations[new_language]['model_name']),
gr.Button.update(value=translations[new_language]['loading']),
gr.Image.update(label=translations[new_language]['input']),
gr.Textbox.update(label=translations[new_language]['prompt']),
gr.Textbox.update(label=translations[new_language]['negative_prompt']),
gr.Button.update(value=translations[new_language]['generate']),
gr.Slider.update(label=translations[new_language]['strength']),
gr.Slider.update(label=translations[new_language]['scale']),
gr.Slider.update(label=translations[new_language]['steps']),
gr.Slider.update(label=translations[new_language]['width']),
gr.Slider.update(label=translations[new_language]['height']),
gr.Number.update(label=translations[new_language]['seed'])
]
text2img = None
img2img = None
def Generate(image_input, prompt, negative_prompt, strength, scale, steps, width, height, seed):
global text2img, img2img
if seed == -1:
seed = generate_new_seed()
generator = torch.Generator().manual_seed(int(seed))
start_time = time.time()
if image_input is None:
image = text2img(prompt=prompt, negative_prompt=negative_prompt, guidance_scale=scale, num_inference_steps=steps, width=width, height=height, num_images_per_prompt=1, generator=generator).images[0]
else:
image = img2img(image=image_input, strength=0.75, prompt=prompt, negative_prompt=negative_prompt, guidance_scale=scale, num_inference_steps=steps, width=width, height=height, num_images_per_prompt=1, generator=generator).images[0]
minutes, seconds = divmod(round(time.time() - start_time), 60)
return image, f"seed:{seed}\ntime:{minutes:02d}:{seconds:02d}"
def Loading(model_name, is_xl, is_cuda):
global text2img, img2img
if is_xl == False:
is_xl ='xl' in model_name.lower()
device = "cuda" if is_cuda else "cpu"
pipeline_class = StableDiffusionXLPipeline if is_xl else StableDiffusionPipeline
if is_cuda:
text2img = pipeline_class.from_pretrained(model_name, torch_dtype=torch.float16, variant="fp16", use_safetensors=True).to(device)
else:
text2img = pipeline_class.from_pretrained(model_name, use_safetensors=True).to(device)
if is_cuda:
text2img.enable_xformers_memory_efficient_attention()
text2img.vae.enable_xformers_memory_efficient_attention()
text2img.safety_checker = None
img2img = (StableDiffusionXLImg2ImgPipeline if is_xl else StableDiffusionImg2ImgPipeline)(**text2img.components)
return model_name
with gr.Blocks() as demo:
with gr.Row():
model_name = gr.Textbox(value="nota-ai/bk-sdm-tiny-2m", label=translations[language]['model_name'])
with gr.Column():
is_xl = gr.Checkbox(label="SDXL")
is_cuda = gr.Checkbox(label="cuda", value=torch.cuda.is_available())
loading = gr.Button(translations[language]['loading'], variant='primary')
set_language = gr.Dropdown(list(translations.keys()), label="Language", value=language)
with gr.Row():
with gr.Column(scale=2):
with gr.Column():
image_input = gr.Image(label=translations[language]['input'])
strength = gr.Slider(minimum=0, maximum=1, value=0.8, step=0.01, label=translations[language]['strength'])
prompt = gr.Textbox("space warrior, beautiful, female, ultrarealistic, soft lighting, 8k", label=translations[language]['prompt'], lines=3)
negative_prompt = gr.Textbox("deformed, distorted, disfigured, poorly drawn, bad anatomy, wrong anatomy, extra limb, missing limb, floating limbs, mutated hands and fingers, disconnected limbs, mutation, mutated, ugly, disgusting, blurry, amputation,lowres,jpeg artifacts,username,logo,signature,watermark,monochrome,greyscale", label=translations[language]['negative_prompt'], lines=3)
with gr.Row():
width = gr.Slider(minimum=512, maximum=2048, value=512, step=8, label=translations[language]['width'])
height = gr.Slider(minimum=512, maximum=2048, value=512, step=8, label=translations[language]['height'])
with gr.Row():
scale = gr.Slider(minimum=1, maximum=15, value=7.5, step=0.5, label=translations[language]['scale'])
steps = gr.Slider(minimum=1, maximum=100, value=50, step=1, label=translations[language]['steps'])
with gr.Row():
seed = gr.Slider(minimum=-1, step=1, maximum=2147483647, value=-1, label=translations[language]['seed'])
set_seed = gr.Button("🎲")
with gr.Column(scale=3):
image_output = gr.Image()
text_output = gr.Textbox()
generate = gr.Button(translations[language]['generate'], variant='primary')
set_seed.click(generate_new_seed, None, seed)
generate.click(Generate, [image_input, prompt, negative_prompt, strength, scale, steps, width, height, seed], [image_output, text_output])
loading.click(Loading, [model_name, is_xl, is_cuda], model_name)
set_language.change(update_language, set_language, [model_name, loading, image_input, prompt, negative_prompt, generate, strength, scale, steps, width, height, seed])
demo.queue().launch()