leonelhs commited on
Commit
2475ba1
·
1 Parent(s): 75b33dc

parce whit annotations

Browse files
Files changed (3) hide show
  1. .gitignore +4 -0
  2. app.py +58 -23
  3. utils.py +0 -50
.gitignore CHANGED
@@ -2,4 +2,8 @@
2
  __pycache__/
3
  .gradio
4
  playground.py
 
 
 
 
5
  resnet18-5c106cde.pth
 
2
  __pycache__/
3
  .gradio
4
  playground.py
5
+ makeup.py
6
+ test.py
7
+ parsing_map_on_im.jpg
8
+ parsing_map_on_im.png
9
  resnet18-5c106cde.pth
app.py CHANGED
@@ -22,13 +22,13 @@
22
  # - [BiSeNet] [https://github.com/CoinCheung/BiSeNet]
23
 
24
  import gradio as gr
25
- import numpy as np
26
  import torch
 
27
  from PIL import Image
28
  from huggingface_hub import hf_hub_download
29
-
30
  from bisnet import BiSeNet
31
- from utils import vis_parsing_maps, decode_segmentation_masks, image_to_tensor
32
 
33
  REPO_ID = "leonelhs/faceparser"
34
  MODEL_NAME = "79999_iter.pth"
@@ -40,30 +40,64 @@ model_path = hf_hub_download(repo_id=REPO_ID, filename=MODEL_NAME)
40
  model.load_state_dict(torch.load(model_path, map_location=device))
41
  model.eval()
42
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
43
 
44
- def makeOverlay(image, mask):
45
- prediction_mask = np.asarray(mask)
46
- image = image.resize((512, 512), Image.BILINEAR)
47
- dark_map, overlay = vis_parsing_maps(image, prediction_mask)
48
- colormap = decode_segmentation_masks(dark_map)
49
- return overlay, colormap
50
-
51
-
52
- def makeMask(image):
53
  with torch.no_grad():
54
- image = image.resize((512, 512), Image.BILINEAR)
55
  input_tensor = image_to_tensor(image)
56
  input_tensor = torch.unsqueeze(input_tensor, 0)
57
  if torch.cuda.is_available():
58
  input_tensor = input_tensor.cuda()
59
- output = model(input_tensor)[0]
60
- return output.squeeze(0).cpu().numpy().argmax(0)
61
-
62
-
63
- def predict(image):
64
- mask = makeMask(image)
65
- overlay, colormap = makeOverlay(image, mask)
66
- return overlay
67
 
68
 
69
  aboutme = r"""
@@ -93,7 +127,7 @@ with gr.Blocks(title="Face Parser") as app:
93
  inp = gr.Image(type="pil", label="Upload Image")
94
  btn_predict = gr.Button("Parse")
95
  with gr.Column(scale=2):
96
- out = gr.Image(type="pil", label="Output image")
97
 
98
  btn_predict.click(predict, inputs=[inp], outputs=[out])
99
 
@@ -101,4 +135,5 @@ with gr.Blocks(title="Face Parser") as app:
101
  with app.route("About this", "/about"):
102
  gr.Markdown(aboutme)
103
 
104
- app.launch()
 
 
22
  # - [BiSeNet] [https://github.com/CoinCheung/BiSeNet]
23
 
24
  import gradio as gr
25
+ import cv2
26
  import torch
27
+ import numpy as np
28
  from PIL import Image
29
  from huggingface_hub import hf_hub_download
30
+ import torchvision.transforms as transforms
31
  from bisnet import BiSeNet
 
32
 
33
  REPO_ID = "leonelhs/faceparser"
34
  MODEL_NAME = "79999_iter.pth"
 
40
  model.load_state_dict(torch.load(model_path, map_location=device))
41
  model.eval()
42
 
43
+ part_colors = [
44
+ {"part": "background", "color": [255, 0, 0]},
45
+ {"part": "face", "color": [219, 79, 66]},
46
+ {"part": "right_brow", "color": [255, 170, 0]},
47
+ {"part": "left_brow", "color": [255, 0, 85]},
48
+ {"part": "right_eye", "color": [255, 0, 170]},
49
+ {"part": "left_eye", "color": [ 0, 255, 0]},
50
+ {"part": "glasses", "color": [ 85, 255, 0]},
51
+ {"part": "right_ear", "color": [170, 255, 0]},
52
+ {"part": "left_ear", "color": [ 0, 255, 85]},
53
+ {"part": "earrings", "color": [ 0, 255, 170]},
54
+ {"part": "nose", "color": [ 0, 0, 255]},
55
+ {"part": "teeth", "color": [ 85, 0, 255]},
56
+ {"part": "upper_lip", "color": [170, 0, 255]},
57
+ {"part": "lower_lip", "color": [ 0, 85, 255]},
58
+ {"part": "neck", "color": [ 0, 170, 255]},
59
+ {"part": "collar", "color": [255, 255, 0]},
60
+ {"part": "cloths", "color": [255, 255, 85]},
61
+ {"part": "hair", "color": [199, 21, 133]},
62
+ {"part": "crown", "color": [255, 0, 255]},
63
+ {"part": "extra20", "color": [255, 85, 255]},
64
+ {"part": "extra21", "color": [255, 170, 255]},
65
+ {"part": "extra22", "color": [ 0, 255, 255]},
66
+ {"part": "extra23", "color": [ 85, 255, 255]},
67
+ {"part": "extra24", "color": [170, 255, 255]},
68
+ ]
69
+
70
+ def image_to_tensor(image):
71
+ return transforms.Compose([
72
+ transforms.ToTensor(),
73
+ transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225)),
74
+ ])(image)
75
+
76
+ def parse_face(mask):
77
+
78
+ num_of_class = np.max(mask)
79
+ face_parts = []
80
+
81
+ for index in range(1, num_of_class + 1):
82
+ face_part = np.where(mask == index)
83
+ canvas = np.full((512, 512, 3), 255, dtype=np.uint8)
84
+ canvas[face_part[0], face_part[1], :] = part_colors[index]["color"]
85
+ canvas = cv2.cvtColor(canvas, cv2.COLOR_BGR2GRAY)
86
+ face_parts.append((canvas, part_colors[index]["part"]))
87
+
88
+ return face_parts
89
 
90
+ def predict(image):
 
 
 
 
 
 
 
 
91
  with torch.no_grad():
92
+ image = image.resize((512, 512), Image.Resampling.BILINEAR)
93
  input_tensor = image_to_tensor(image)
94
  input_tensor = torch.unsqueeze(input_tensor, 0)
95
  if torch.cuda.is_available():
96
  input_tensor = input_tensor.cuda()
97
+ mask = model(input_tensor)[0]
98
+ mask = mask.squeeze(0).cpu().numpy().argmax(0)
99
+ sections = parse_face(mask)
100
+ return image, sections
 
 
 
 
101
 
102
 
103
  aboutme = r"""
 
127
  inp = gr.Image(type="pil", label="Upload Image")
128
  btn_predict = gr.Button("Parse")
129
  with gr.Column(scale=2):
130
+ out = gr.AnnotatedImage(label="Face parsed annotated")
131
 
132
  btn_predict.click(predict, inputs=[inp], outputs=[out])
133
 
 
135
  with app.route("About this", "/about"):
136
  gr.Markdown(aboutme)
137
 
138
+ app.launch(share=False, debug=True, show_error=True, mcp_server=True, pwa=True)
139
+ app.queue()
utils.py DELETED
@@ -1,50 +0,0 @@
1
- import cv2
2
- import numpy as np
3
- import torchvision.transforms as transforms
4
-
5
- # Colors for all 20 parts
6
- part_colors = [[255, 0, 0], [255, 85, 0], [255, 170, 0], [255, 0, 85], [255, 0, 170],
7
- [0, 255, 0], [85, 255, 0], [170, 255, 0], [0, 255, 85], [0, 255, 170],
8
- [0, 0, 255], [85, 0, 255], [170, 0, 255], [0, 85, 255], [0, 170, 255],
9
- [255, 255, 0], [255, 255, 85], [255, 255, 170], [255, 0, 255], [255, 85, 255],
10
- [255, 170, 255], [0, 255, 255], [85, 255, 255], [170, 255, 255]]
11
-
12
- colormap = np.array(part_colors, dtype=np.uint8)
13
-
14
-
15
- def image_to_tensor(image):
16
- return transforms.Compose([
17
- transforms.ToTensor(),
18
- transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225)),
19
- ])(image)
20
-
21
-
22
- def decode_segmentation_masks(mask, n_classes=20):
23
- red = np.zeros_like(mask).astype(np.uint8)
24
- green = np.zeros_like(mask).astype(np.uint8)
25
- blue = np.zeros_like(mask).astype(np.uint8)
26
- for chanel in range(0, n_classes):
27
- idx = mask == chanel
28
- red[idx] = colormap[chanel, 0]
29
- green[idx] = colormap[chanel, 1]
30
- blue[idx] = colormap[chanel, 2]
31
- return np.stack([red, green, blue], axis=2)
32
-
33
-
34
- def vis_parsing_maps(image: np.array, parsing_anno, stride=1):
35
- image = np.array(image)
36
- vis_im = image.copy().astype(np.uint8)
37
- vis_parsing_anno = parsing_anno.copy().astype(np.uint8)
38
- vis_parsing_anno = cv2.resize(vis_parsing_anno, None, fx=stride, fy=stride, interpolation=cv2.INTER_NEAREST)
39
- vis_parsing_anno_color = np.zeros((vis_parsing_anno.shape[0], vis_parsing_anno.shape[1], 3)) + 255
40
-
41
- num_of_class = np.max(vis_parsing_anno)
42
-
43
- for pi in range(1, num_of_class + 1):
44
- index = np.where(vis_parsing_anno == pi)
45
- vis_parsing_anno_color[index[0], index[1], :] = part_colors[pi]
46
-
47
- vis_parsing_anno_color = vis_parsing_anno_color.astype(np.uint8)
48
- vis_im = cv2.addWeighted(cv2.cvtColor(vis_im, cv2.COLOR_RGB2BGR), 0.4, vis_parsing_anno_color, 0.6, 0)
49
-
50
- return vis_parsing_anno, vis_im