From 4a900d408b94cdf858b70a3680f2d1093114489a Mon Sep 17 00:00:00 2001 From: "James P. Ketrenos" Date: Sat, 14 Jan 2023 16:00:18 -0800 Subject: [PATCH] Remove "retinaface" from being used Signed-off-by: James P. Ketrenos --- Dockerfile | 3 +- ketrface/cluster.py | 2 ++ ketrface/detect.py | 81 ++++++++++++++++++++++----------------------- 3 files changed, 42 insertions(+), 44 deletions(-) diff --git a/Dockerfile b/Dockerfile index d007197..269750d 100644 --- a/Dockerfile +++ b/Dockerfile @@ -26,9 +26,8 @@ RUN DEBIAN_FRONTEND=NONINTERACTIVE apt-get install -y \ # Install the latest npm and npx RUN npm install --global npm@latest -# Install deepface and retina-face +# Install deepface RUN pip install deepface -RUN pip install retina-face RUN pip install piexif # numpy 1.24 deprecated float; deepface is still using it, so we need to diff --git a/ketrface/cluster.py b/ketrface/cluster.py index 95d8c00..8c5f3ea 100644 --- a/ketrface/cluster.py +++ b/ketrface/cluster.py @@ -21,6 +21,8 @@ pictures_path = merge_config_path(config['path'], config['picturesPath']) faces_path = merge_config_path(config['path'], config['facesPath']) db_path = merge_config_path(config['path'], config["db"]["photos"]["host"]) html_base = config['basePath'] +if html_base == "/": + html_base = "." # TODO # Switch to using DBSCAN diff --git a/ketrface/detect.py b/ketrface/detect.py index 605be33..fd6b2aa 100644 --- a/ketrface/detect.py +++ b/ketrface/detect.py @@ -7,8 +7,6 @@ import argparse from PIL import Image, ImageOps from deepface import DeepFace -from deepface.detectors import FaceDetector -from retinaface import RetinaFace import numpy as np import cv2 @@ -24,8 +22,8 @@ faces_path = merge_config_path(config['path'], config['facesPath']) db_path = merge_config_path(config['path'], config["db"]["photos"]["host"]) html_base = config['basePath'] -model_name = 'VGG-Face' # 'ArcFace' -detector_backend = 'mtcnn' # 'retinaface' +model_name = 'VGG-Face' +detector_backend = 'mtcnn' model = DeepFace.build_model(model_name) # Derived from @@ -70,44 +68,38 @@ def variance_of_laplacian(image): def extract_faces( img, threshold=0.95, allow_upscaling = True, focus_threshold = 100): - if detector_backend == 'retinaface': - faces = RetinaFace.detect_faces( - img_path = img, - threshold = threshold, - model = model, - allow_upscaling = allow_upscaling) - elif detector_backend == 'mtcnn': - img_rgb = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) # mtcnn expects RGB + img_rgb = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) # mtcnn expects RGB - redirect_on() - res = face_detector.detect_faces(img_rgb) - redirect_off() + redirect_on() + res = face_detector.detect_faces(img_rgb) + redirect_off() - faces = {} - if type(res) == list: - for i, face in enumerate(res): - if threshold > face['confidence']: - continue - x = face['box'][0] - y = face['box'][1] - w = face['box'][2] - h = face['box'][3] - # If face is less than 2.5% of the image width and height, - # skip it (too small) -- filters out likely blurry faces in - # large group photos where the actual face may exceed - # min_face_size passed to MTCNN - if 0.025 > w / img.shape[0] and 0.025 > h / img.shape[1]: - print(f'Dropping due to small face size: {w / img.shape[0]} x {h / img.shape[1]}') - continue - faces[f'face_{i+1}'] = { # standardize properties - 'facial_area': [ x, y, x + w, y + h ], - 'landmarks': { - 'left_eye': list(face['keypoints']['left_eye']), - 'right_eye': list(face['keypoints']['right_eye']), - }, - 'score': face['confidence'], - - } + if type(res) != list: + return None + + faces = {} + for i, face in enumerate(res): + if threshold > face['confidence']: + continue + x = face['box'][0] + y = face['box'][1] + w = face['box'][2] + h = face['box'][3] + # If face is less than 2.5% of the image width and height, + # skip it (too small) -- filters out likely blurry faces in + # large group photos where the actual face may exceed + # min_face_size passed to MTCNN + if 0.025 > w / img.shape[0] and 0.025 > h / img.shape[1]: + print(f'Dropping due to small face size: {w / img.shape[0]} x {h / img.shape[1]}') + continue + faces[f'face_{i+1}'] = { # standardize properties + 'facial_area': [ x, y, x + w, y + h ], + 'landmarks': { + 'left_eye': list(face['keypoints']['left_eye']), + 'right_eye': list(face['keypoints']['right_eye']), + }, + 'score': face['confidence'], + } # Re-implementation of 'extract_faces' with the addition of keeping a # copy of the face image for caching on disk @@ -190,8 +182,13 @@ def extract_faces( parser = argparse.ArgumentParser(description = 'Detect faces in images.') -parser.add_argument('photos', metavar='PHOTO', type=int, nargs='*', - help='PHOTO ID to scan (default: all unscanned photos)') +parser.add_argument( + 'photos', + metavar = 'PHOTO', + type=int, + nargs='*', + help = 'PHOTO ID to scan (default: all unscanned photos)' +) args = parser.parse_args() print(args)