import gradio as gr import numpy as np import random from diffusers import DiffusionPipeline import torch torch_dtype, device = ( (torch.float16, torch.device("cuda")) if torch.cuda.is_available() else (torch.float32, torch.device("cpu")) ) model_repo_id = "black-forest-labs/FLUX.1-dev" pipe = DiffusionPipeline.from_pretrained(model_repo_id, torch_dtype=torch_dtype).to(device) pipe.load_lora_weights("pepper13/flux-anime") def infer(prompt, randomize_seed, width, height, guidance_scale, num_inference_steps): image = pipe( prompt=prompt, guidance_scale=guidance_scale, num_inference_steps=num_inference_steps, width=width, height=height ).images[0] return image with gr.Blocks() as demo: with gr.Column(elem_id="col-container"): with gr.Row(): prompt = gr.Text(label="Prompt", show_label=False, placeholder="Enter your prompt") run_button = gr.Button("Generate", scale=0) result = gr.Image(label="Result", show_label=False) with gr.Accordion("Advanced Settings", open=False): with gr.Row(): width = gr.Slider(label="Width", minimum=256, maximum=1024, step=32, value=512) height = gr.Slider(label="Height", minimum=256, maximum=1024, step=32, value=512) with gr.Row(): guidance_scale = gr.Slider(label="Guidance scale", minimum=0.1, maximum=10.0, step=0.1, value=7.0) num_inference_steps = gr.Slider(label="Number of inference steps", minimum=1, maximum=50, step=1, value=20) gr.on( triggers=[run_button.click, prompt.submit], fn=infer, inputs=[prompt, negative_prompt, width, height, guidance_scale, num_inference_steps], outputs=[result] ) demo.launch()