aiqtech commited on
Commit
a320607
โ€ข
1 Parent(s): 39fc2d4

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +33 -30
app.py CHANGED
@@ -7,7 +7,7 @@ import gradio as gr
7
  import numpy as np
8
  import os
9
  from huggingface_hub import snapshot_download
10
- from transformers import CLIPVisionModelWithProjection,CLIPImageProcessor
11
  from SAK.pipelines.pipeline_stable_diffusion_xl_chatglm_256_ipadapter_FaceID import StableDiffusionXLPipeline
12
  from SAK.models.modeling_chatglm import ChatGLMModel
13
  from SAK.models.tokenization_chatglm import ChatGLMTokenizer
@@ -18,8 +18,6 @@ from PIL import Image
18
  from insightface.app import FaceAnalysis
19
  from insightface.data import get_image as ins_get_image
20
 
21
-
22
-
23
  device = "cuda"
24
  ckpt_dir = snapshot_download(repo_id="SunderAli17/SAK")
25
  ckpt_dir_faceid = snapshot_download(repo_id="SunderAli17/SAK-IP-Adapter-FaceTransform-Plus")
@@ -44,6 +42,16 @@ pipe = StableDiffusionXLPipeline(
44
  force_zeros_for_empty_prompt = False,
45
  )
46
 
 
 
 
 
 
 
 
 
 
 
47
  class FaceInfoGenerator():
48
  def __init__(self, root_dir = "./.insightface/"):
49
  self.app = FaceAnalysis(name = 'antelopev2', root = root_dir, providers=['CUDAExecutionProvider', 'CPUExecutionProvider'])
@@ -86,6 +94,7 @@ def infer(prompt,
86
  guidance_scale = 5.0,
87
  num_inference_steps = 50
88
  ):
 
89
  if randomize_seed:
90
  seed = random.randint(0, MAX_SEED)
91
  generator = torch.Generator().manual_seed(seed)
@@ -104,11 +113,11 @@ def infer(prompt,
104
  face_embeds = face_embeds.to(device, dtype = torch.float16)
105
 
106
  image = pipe(
107
- prompt = prompt,
108
  negative_prompt = negative_prompt,
109
  height = 1024,
110
  width = 1024,
111
- num_inference_steps= num_inference_steps,
112
  guidance_scale = guidance_scale,
113
  num_images_per_prompt = 1,
114
  generator = generator,
@@ -116,25 +125,18 @@ def infer(prompt,
116
  face_insightface_embeds = face_embeds
117
  ).images[0]
118
 
119
- return image, seed
120
-
121
 
122
  examples = [
123
  ["wearing a full suit sitting in a restaurant with candle lights", "image/test0.png"],
124
  ["Wild cowboy hat with western town and horses in the background", "image/test1.png"]
125
  ]
126
 
127
-
128
-
129
-
130
  def load_description(fp):
131
  with open(fp, 'r', encoding='utf-8') as f:
132
  content = f.read()
133
  return content
134
 
135
-
136
-
137
-
138
  css = """
139
  footer {
140
  visibility: hidden;
@@ -142,66 +144,67 @@ footer {
142
  """
143
 
144
  with gr.Blocks(theme="Nymbo/Nymbo_Theme", css=css) as SAK:
145
-
146
  with gr.Row():
147
  with gr.Column(elem_id="col-left"):
148
  with gr.Row():
149
  prompt = gr.Textbox(
150
- label="Prompt",
151
- placeholder="Enter your prompt",
152
  lines=2
153
  )
154
  with gr.Row():
155
- image = gr.Image(label="Image", type="pil")
156
- with gr.Accordion("Advanced Settings", open=False):
157
  negative_prompt = gr.Textbox(
158
- label="Negative prompt",
159
- placeholder="Enter a negative prompt",
160
  visible=True,
 
161
  )
162
  seed = gr.Slider(
163
- label="Seed",
164
  minimum=0,
165
  maximum=MAX_SEED,
166
  step=1,
167
  value=0,
168
  )
169
- randomize_seed = gr.Checkbox(label="Randomize seed", value=True)
170
  with gr.Row():
171
  guidance_scale = gr.Slider(
172
- label="Guidance scale",
173
  minimum=0.0,
174
  maximum=10.0,
175
  step=0.1,
176
  value=5.0,
177
  )
178
  num_inference_steps = gr.Slider(
179
- label="Number of inference steps",
180
  minimum=10,
181
  maximum=50,
182
  step=1,
183
  value=25,
184
  )
185
  with gr.Row():
186
- button = gr.Button("Run", elem_id="button")
187
 
188
  with gr.Column(elem_id="col-right"):
189
- result = gr.Image(label="Result", show_label=False)
190
- seed_used = gr.Number(label="Seed Used")
 
 
191
 
192
  with gr.Row():
193
  gr.Examples(
194
  fn = infer,
195
  examples = examples,
196
  inputs = [prompt, image],
197
- outputs = [result, seed_used],
198
  )
199
 
200
  button.click(
201
  fn = infer,
202
  inputs = [prompt, image, negative_prompt, seed, randomize_seed, guidance_scale, num_inference_steps],
203
- outputs = [result, seed_used]
204
  )
205
 
206
-
207
  SAK.queue().launch(debug=True, share=True)
 
7
  import numpy as np
8
  import os
9
  from huggingface_hub import snapshot_download
10
+ from transformers import CLIPVisionModelWithProjection, CLIPImageProcessor, pipeline
11
  from SAK.pipelines.pipeline_stable_diffusion_xl_chatglm_256_ipadapter_FaceID import StableDiffusionXLPipeline
12
  from SAK.models.modeling_chatglm import ChatGLMModel
13
  from SAK.models.tokenization_chatglm import ChatGLMTokenizer
 
18
  from insightface.app import FaceAnalysis
19
  from insightface.data import get_image as ins_get_image
20
 
 
 
21
  device = "cuda"
22
  ckpt_dir = snapshot_download(repo_id="SunderAli17/SAK")
23
  ckpt_dir_faceid = snapshot_download(repo_id="SunderAli17/SAK-IP-Adapter-FaceTransform-Plus")
 
42
  force_zeros_for_empty_prompt = False,
43
  )
44
 
45
+ # ๋ฒˆ์—ญ ๋ชจ๋ธ ์ดˆ๊ธฐํ™”
46
+ translator = pipeline("translation", model="Helsinki-NLP/opus-mt-ko-en")
47
+
48
+ # ํ”„๋กฌํ”„ํŠธ ์ฒ˜๋ฆฌ ํ•จ์ˆ˜ ์ถ”๊ฐ€
49
+ def process_prompt(prompt):
50
+ if any('\u3131' <= char <= '\u3163' or '\uac00' <= char <= '\ud7a3' for char in prompt):
51
+ translated = translator(prompt)[0]['translation_text']
52
+ return prompt, translated
53
+ return prompt, prompt
54
+
55
  class FaceInfoGenerator():
56
  def __init__(self, root_dir = "./.insightface/"):
57
  self.app = FaceAnalysis(name = 'antelopev2', root = root_dir, providers=['CUDAExecutionProvider', 'CPUExecutionProvider'])
 
94
  guidance_scale = 5.0,
95
  num_inference_steps = 50
96
  ):
97
+ original_prompt, english_prompt = process_prompt(prompt)
98
  if randomize_seed:
99
  seed = random.randint(0, MAX_SEED)
100
  generator = torch.Generator().manual_seed(seed)
 
113
  face_embeds = face_embeds.to(device, dtype = torch.float16)
114
 
115
  image = pipe(
116
+ prompt = english_prompt,
117
  negative_prompt = negative_prompt,
118
  height = 1024,
119
  width = 1024,
120
+ num_inference_steps = num_inference_steps,
121
  guidance_scale = guidance_scale,
122
  num_images_per_prompt = 1,
123
  generator = generator,
 
125
  face_insightface_embeds = face_embeds
126
  ).images[0]
127
 
128
+ return image, seed, original_prompt, english_prompt
 
129
 
130
  examples = [
131
  ["wearing a full suit sitting in a restaurant with candle lights", "image/test0.png"],
132
  ["Wild cowboy hat with western town and horses in the background", "image/test1.png"]
133
  ]
134
 
 
 
 
135
  def load_description(fp):
136
  with open(fp, 'r', encoding='utf-8') as f:
137
  content = f.read()
138
  return content
139
 
 
 
 
140
  css = """
141
  footer {
142
  visibility: hidden;
 
144
  """
145
 
146
  with gr.Blocks(theme="Nymbo/Nymbo_Theme", css=css) as SAK:
 
147
  with gr.Row():
148
  with gr.Column(elem_id="col-left"):
149
  with gr.Row():
150
  prompt = gr.Textbox(
151
+ label="ํ”„๋กฌํ”„ํŠธ",
152
+ placeholder="ํ”„๋กฌํ”„ํŠธ๋ฅผ ์ž…๋ ฅํ•˜์„ธ์š” (ํ•œ๊ธ€ ๋˜๋Š” ์˜์–ด)",
153
  lines=2
154
  )
155
  with gr.Row():
156
+ image = gr.Image(label="์ด๋ฏธ์ง€", type="pil")
157
+ with gr.Accordion("๊ณ ๊ธ‰ ์„ค์ •", open=False):
158
  negative_prompt = gr.Textbox(
159
+ label="๋„ค๊ฑฐํ‹ฐ๋ธŒ ํ”„๋กฌํ”„ํŠธ",
160
+ placeholder="๋„ค๊ฑฐํ‹ฐ๋ธŒ ํ”„๋กฌํ”„ํŠธ๋ฅผ ์ž…๋ ฅํ•˜์„ธ์š”",
161
  visible=True,
162
+ value="nsfw, ์–ผ๊ตด ๊ทธ๋ฆผ์ž, ์ €ํ•ด์ƒ๋„, jpeg ์•„ํ‹ฐํŒฉํŠธ, ํ๋ฆฟํ•จ, ์—ด์•…ํ•จ, ๋„ค์˜จ ์กฐ๋ช…"
163
  )
164
  seed = gr.Slider(
165
+ label="์‹œ๋“œ",
166
  minimum=0,
167
  maximum=MAX_SEED,
168
  step=1,
169
  value=0,
170
  )
171
+ randomize_seed = gr.Checkbox(label="์‹œ๋“œ ๋ฌด์ž‘์œ„ํ™”", value=True)
172
  with gr.Row():
173
  guidance_scale = gr.Slider(
174
+ label="๊ฐ€์ด๋˜์Šค ์Šค์ผ€์ผ",
175
  minimum=0.0,
176
  maximum=10.0,
177
  step=0.1,
178
  value=5.0,
179
  )
180
  num_inference_steps = gr.Slider(
181
+ label="์ถ”๋ก  ๋‹จ๊ณ„ ์ˆ˜",
182
  minimum=10,
183
  maximum=50,
184
  step=1,
185
  value=25,
186
  )
187
  with gr.Row():
188
+ button = gr.Button("์‹คํ–‰", elem_id="button")
189
 
190
  with gr.Column(elem_id="col-right"):
191
+ result = gr.Image(label="๊ฒฐ๊ณผ", show_label=False)
192
+ seed_used = gr.Number(label="์‚ฌ์šฉ๋œ ์‹œ๋“œ")
193
+ original_prompt_display = gr.Textbox(label="์›๋ณธ ํ”„๋กฌํ”„ํŠธ")
194
+ english_prompt_display = gr.Textbox(label="์˜์–ด ํ”„๋กฌํ”„ํŠธ")
195
 
196
  with gr.Row():
197
  gr.Examples(
198
  fn = infer,
199
  examples = examples,
200
  inputs = [prompt, image],
201
+ outputs = [result, seed_used, original_prompt_display, english_prompt_display],
202
  )
203
 
204
  button.click(
205
  fn = infer,
206
  inputs = [prompt, image, negative_prompt, seed, randomize_seed, guidance_scale, num_inference_steps],
207
+ outputs = [result, seed_used, original_prompt_display, english_prompt_display]
208
  )
209
 
 
210
  SAK.queue().launch(debug=True, share=True)