| |
| |
| |
| |
| |
|
|
| import sys |
| import argparse |
|
|
| import numpy as np |
| import cv2 as cv |
|
|
| |
| opencv_python_version = lambda str_version: tuple(map(int, (str_version.split(".")))) |
| assert opencv_python_version(cv.__version__) >= opencv_python_version("4.10.0"), \ |
| "Please install latest opencv-python for benchmark: python3 -m pip install --upgrade opencv-python" |
|
|
| from sface import SFace |
|
|
| sys.path.append('../face_detection_yunet') |
| from yunet import YuNet |
|
|
| |
| backend_target_pairs = [ |
| [cv.dnn.DNN_BACKEND_OPENCV, cv.dnn.DNN_TARGET_CPU], |
| [cv.dnn.DNN_BACKEND_CUDA, cv.dnn.DNN_TARGET_CUDA], |
| [cv.dnn.DNN_BACKEND_CUDA, cv.dnn.DNN_TARGET_CUDA_FP16], |
| [cv.dnn.DNN_BACKEND_TIMVX, cv.dnn.DNN_TARGET_NPU], |
| [cv.dnn.DNN_BACKEND_CANN, cv.dnn.DNN_TARGET_NPU] |
| ] |
|
|
| parser = argparse.ArgumentParser( |
| description="SFace: Sigmoid-Constrained Hypersphere Loss for Robust Face Recognition (https://ieeexplore.ieee.org/document/9318547)") |
| parser.add_argument('--target', '-t', type=str, |
| help='Usage: Set path to the input image 1 (target face).') |
| parser.add_argument('--query', '-q', type=str, |
| help='Usage: Set path to the input image 2 (query).') |
| parser.add_argument('--model', '-m', type=str, default='face_recognition_sface_2021dec.onnx', |
| help='Usage: Set model path, defaults to face_recognition_sface_2021dec.onnx.') |
| parser.add_argument('--backend_target', '-bt', type=int, default=0, |
| help='''Choose one of the backend-target pair to run this demo: |
| {:d}: (default) OpenCV implementation + CPU, |
| {:d}: CUDA + GPU (CUDA), |
| {:d}: CUDA + GPU (CUDA FP16), |
| {:d}: TIM-VX + NPU, |
| {:d}: CANN + NPU |
| '''.format(*[x for x in range(len(backend_target_pairs))])) |
| parser.add_argument('--dis_type', type=int, choices=[0, 1], default=0, |
| help='Usage: Distance type. \'0\': cosine, \'1\': norm_l1. Defaults to \'0\'') |
| parser.add_argument('--save', '-s', action='store_true', |
| help='Usage: Specify to save file with results (i.e. bounding box, confidence level). Invalid in case of camera input.') |
| parser.add_argument('--vis', '-v', action='store_true', |
| help='Usage: Specify to open a new window to show results. Invalid in case of camera input.') |
| args = parser.parse_args() |
|
|
| def visualize(img1, faces1, img2, faces2, matches, scores, target_size=[512, 512]): |
| out1 = img1.copy() |
| out2 = img2.copy() |
| matched_box_color = (0, 255, 0) |
| mismatched_box_color = (0, 0, 255) |
|
|
| |
| padded_out1 = np.zeros((target_size[0], target_size[1], 3)).astype(np.uint8) |
| h1, w1, _ = out1.shape |
| ratio1 = min(target_size[0] / out1.shape[0], target_size[1] / out1.shape[1]) |
| new_h1 = int(h1 * ratio1) |
| new_w1 = int(w1 * ratio1) |
| resized_out1 = cv.resize(out1, (new_w1, new_h1), interpolation=cv.INTER_LINEAR).astype(np.float32) |
| top = max(0, target_size[0] - new_h1) // 2 |
| bottom = top + new_h1 |
| left = max(0, target_size[1] - new_w1) // 2 |
| right = left + new_w1 |
| padded_out1[top : bottom, left : right] = resized_out1 |
|
|
| |
| bbox1 = faces1[0][:4] * ratio1 |
| x, y, w, h = bbox1.astype(np.int32) |
| cv.rectangle(padded_out1, (x + left, y + top), (x + left + w, y + top + h), matched_box_color, 2) |
|
|
| |
| padded_out2 = np.zeros((target_size[0], target_size[1], 3)).astype(np.uint8) |
| h2, w2, _ = out2.shape |
| ratio2 = min(target_size[0] / out2.shape[0], target_size[1] / out2.shape[1]) |
| new_h2 = int(h2 * ratio2) |
| new_w2 = int(w2 * ratio2) |
| resized_out2 = cv.resize(out2, (new_w2, new_h2), interpolation=cv.INTER_LINEAR).astype(np.float32) |
| top = max(0, target_size[0] - new_h2) // 2 |
| bottom = top + new_h2 |
| left = max(0, target_size[1] - new_w2) // 2 |
| right = left + new_w2 |
| padded_out2[top : bottom, left : right] = resized_out2 |
|
|
| |
| assert faces2.shape[0] == len(matches), "number of faces2 needs to match matches" |
| assert len(matches) == len(scores), "number of matches needs to match number of scores" |
| for index, match in enumerate(matches): |
| bbox2 = faces2[index][:4] * ratio2 |
| x, y, w, h = bbox2.astype(np.int32) |
| box_color = matched_box_color if match else mismatched_box_color |
| cv.rectangle(padded_out2, (x + left, y + top), (x + left + w, y + top + h), box_color, 2) |
|
|
| score = scores[index] |
| text_color = matched_box_color if match else mismatched_box_color |
| cv.putText(padded_out2, "{:.2f}".format(score), (x + left, y + top - 5), cv.FONT_HERSHEY_DUPLEX, 0.4, text_color) |
|
|
| return np.concatenate([padded_out1, padded_out2], axis=1) |
|
|
| if __name__ == '__main__': |
| backend_id = backend_target_pairs[args.backend_target][0] |
| target_id = backend_target_pairs[args.backend_target][1] |
| |
| recognizer = SFace(modelPath=args.model, |
| disType=args.dis_type, |
| backendId=backend_id, |
| targetId=target_id) |
| |
| detector = YuNet(modelPath='../face_detection_yunet/face_detection_yunet_2023mar.onnx', |
| inputSize=[320, 320], |
| confThreshold=0.9, |
| nmsThreshold=0.3, |
| topK=5000, |
| backendId=backend_id, |
| targetId=target_id) |
|
|
| img1 = cv.imread(args.target) |
| img2 = cv.imread(args.query) |
|
|
| |
| detector.setInputSize([img1.shape[1], img1.shape[0]]) |
| faces1 = detector.infer(img1) |
| assert faces1.shape[0] > 0, 'Cannot find a face in {}'.format(args.target) |
| detector.setInputSize([img2.shape[1], img2.shape[0]]) |
| faces2 = detector.infer(img2) |
| assert faces2.shape[0] > 0, 'Cannot find a face in {}'.format(args.query) |
|
|
| |
| scores = [] |
| matches = [] |
| for face in faces2: |
| result = recognizer.match(img1, faces1[0][:-1], img2, face[:-1]) |
| scores.append(result[0]) |
| matches.append(result[1]) |
|
|
| |
| image = visualize(img1, faces1, img2, faces2, matches, scores) |
|
|
| |
| if args.save: |
| print('Resutls saved to result.jpg\n') |
| cv.imwrite('result.jpg', image) |
|
|
| |
| if args.vis: |
| cv.namedWindow("SFace Demo", cv.WINDOW_AUTOSIZE) |
| cv.imshow("SFace Demo", image) |
| cv.waitKey(0) |
|
|