Spaces:
Runtime error
Runtime error
Upload app.py
Browse files
app.py
CHANGED
|
@@ -74,9 +74,6 @@ args = parser.parse_args()
|
|
| 74 |
|
| 75 |
args.device = "cuda"
|
| 76 |
|
| 77 |
-
base_path = 'feishen29/IMAGDressing-v1'
|
| 78 |
-
|
| 79 |
-
|
| 80 |
vae = AutoencoderKL.from_pretrained('stabilityai/sd-vae-ft-mse').to(dtype=torch.float16, device=args.device)
|
| 81 |
tokenizer = CLIPTokenizer.from_pretrained("SG161222/Realistic_Vision_V4.0_noVAE", subfolder="tokenizer")
|
| 82 |
text_encoder = CLIPTextModel.from_pretrained("SG161222/Realistic_Vision_V4.0_noVAE", subfolder="text_encoder").to(dtype=torch.float16, device=args.device)
|
|
@@ -292,24 +289,19 @@ def dress_process(garm_img, face_img, pose_img, prompt, cloth_guidance_scale, ca
|
|
| 292 |
# return result[OutputKeys.OUTPUT_IMG]
|
| 293 |
return output[0]
|
| 294 |
|
| 295 |
-
|
| 296 |
-
|
| 297 |
-
garm_list = os.listdir(os.path.join(example_path, "cloth", 'cloth'))
|
| 298 |
-
garm_list_path = [os.path.join(example_path, "cloth", 'cloth', garm) for garm in garm_list]
|
| 299 |
-
|
| 300 |
-
face_list = os.listdir(os.path.join(example_path, "face", 'face'))
|
| 301 |
-
face_list_path = [os.path.join(example_path, "face", 'face', face) for face in face_list]
|
| 302 |
-
|
| 303 |
-
pose_list = os.listdir(os.path.join(example_path, "pose", 'pose'))
|
| 304 |
-
pose_list_path = [os.path.join(example_path, "pose", 'pose', pose) for pose in pose_list]
|
| 305 |
|
|
|
|
|
|
|
| 306 |
|
|
|
|
|
|
|
| 307 |
|
| 308 |
-
|
|
|
|
| 309 |
|
| 310 |
-
def process_image(image):
|
| 311 |
|
| 312 |
-
return image
|
| 313 |
|
| 314 |
image_blocks = gr.Blocks().queue()
|
| 315 |
with image_blocks as demo:
|
|
@@ -335,8 +327,6 @@ with image_blocks as demo:
|
|
| 335 |
example = gr.Examples(
|
| 336 |
inputs=imgs,
|
| 337 |
examples_per_page=10,
|
| 338 |
-
fn=process_image,
|
| 339 |
-
outputs=imgs,
|
| 340 |
examples=face_list_path
|
| 341 |
)
|
| 342 |
with gr.Row():
|
|
@@ -350,8 +340,6 @@ with image_blocks as demo:
|
|
| 350 |
example = gr.Examples(
|
| 351 |
inputs=pose_img,
|
| 352 |
examples_per_page=8,
|
| 353 |
-
fn=process_image,
|
| 354 |
-
outputs=pose_img,
|
| 355 |
examples=pose_list_path)
|
| 356 |
|
| 357 |
# with gr.Column():
|
|
|
|
| 74 |
|
| 75 |
args.device = "cuda"
|
| 76 |
|
|
|
|
|
|
|
|
|
|
| 77 |
vae = AutoencoderKL.from_pretrained('stabilityai/sd-vae-ft-mse').to(dtype=torch.float16, device=args.device)
|
| 78 |
tokenizer = CLIPTokenizer.from_pretrained("SG161222/Realistic_Vision_V4.0_noVAE", subfolder="tokenizer")
|
| 79 |
text_encoder = CLIPTextModel.from_pretrained("SG161222/Realistic_Vision_V4.0_noVAE", subfolder="text_encoder").to(dtype=torch.float16, device=args.device)
|
|
|
|
| 289 |
# return result[OutputKeys.OUTPUT_IMG]
|
| 290 |
return output[0]
|
| 291 |
|
| 292 |
+
base_path = 'yisol/IDM-VTON'
|
| 293 |
+
example_path = os.path.join(os.path.dirname(__file__), 'example')
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 294 |
|
| 295 |
+
garm_list = os.listdir(os.path.join(example_path,"cloth"))
|
| 296 |
+
garm_list_path = [os.path.join(example_path,"cloth",garm) for garm in garm_list]
|
| 297 |
|
| 298 |
+
face_list = os.listdir(os.path.join(example_path,"face"))
|
| 299 |
+
face_list_path = [os.path.join(example_path,"face",face) for face in face_list]
|
| 300 |
|
| 301 |
+
pose_list = os.listdir(os.path.join(example_path,"pose"))
|
| 302 |
+
pose_list_path = [os.path.join(example_path,"pose",pose) for pose in pose_list]
|
| 303 |
|
|
|
|
| 304 |
|
|
|
|
| 305 |
|
| 306 |
image_blocks = gr.Blocks().queue()
|
| 307 |
with image_blocks as demo:
|
|
|
|
| 327 |
example = gr.Examples(
|
| 328 |
inputs=imgs,
|
| 329 |
examples_per_page=10,
|
|
|
|
|
|
|
| 330 |
examples=face_list_path
|
| 331 |
)
|
| 332 |
with gr.Row():
|
|
|
|
| 340 |
example = gr.Examples(
|
| 341 |
inputs=pose_img,
|
| 342 |
examples_per_page=8,
|
|
|
|
|
|
|
| 343 |
examples=pose_list_path)
|
| 344 |
|
| 345 |
# with gr.Column():
|