Remove "retinaface" from being used

Signed-off-by: James P. Ketrenos <james.p.ketrenos@intel.com>
This commit is contained in:
James P. Ketrenos 2023-01-14 16:00:18 -08:00
parent 3c8eeba2d0
commit 4a900d408b
3 changed files with 42 additions and 44 deletions

View File

@ -26,9 +26,8 @@ RUN DEBIAN_FRONTEND=NONINTERACTIVE apt-get install -y \
# Install the latest npm and npx # Install the latest npm and npx
RUN npm install --global npm@latest RUN npm install --global npm@latest
# Install deepface and retina-face # Install deepface
RUN pip install deepface RUN pip install deepface
RUN pip install retina-face
RUN pip install piexif RUN pip install piexif
# numpy 1.24 deprecated float; deepface is still using it, so we need to # numpy 1.24 deprecated float; deepface is still using it, so we need to

View File

@ -21,6 +21,8 @@ pictures_path = merge_config_path(config['path'], config['picturesPath'])
faces_path = merge_config_path(config['path'], config['facesPath']) faces_path = merge_config_path(config['path'], config['facesPath'])
db_path = merge_config_path(config['path'], config["db"]["photos"]["host"]) db_path = merge_config_path(config['path'], config["db"]["photos"]["host"])
html_base = config['basePath'] html_base = config['basePath']
if html_base == "/":
html_base = "."
# TODO # TODO
# Switch to using DBSCAN # Switch to using DBSCAN

View File

@ -7,8 +7,6 @@ import argparse
from PIL import Image, ImageOps from PIL import Image, ImageOps
from deepface import DeepFace from deepface import DeepFace
from deepface.detectors import FaceDetector
from retinaface import RetinaFace
import numpy as np import numpy as np
import cv2 import cv2
@ -24,8 +22,8 @@ faces_path = merge_config_path(config['path'], config['facesPath'])
db_path = merge_config_path(config['path'], config["db"]["photos"]["host"]) db_path = merge_config_path(config['path'], config["db"]["photos"]["host"])
html_base = config['basePath'] html_base = config['basePath']
model_name = 'VGG-Face' # 'ArcFace' model_name = 'VGG-Face'
detector_backend = 'mtcnn' # 'retinaface' detector_backend = 'mtcnn'
model = DeepFace.build_model(model_name) model = DeepFace.build_model(model_name)
# Derived from # Derived from
@ -70,44 +68,38 @@ def variance_of_laplacian(image):
def extract_faces( def extract_faces(
img, threshold=0.95, allow_upscaling = True, focus_threshold = 100): img, threshold=0.95, allow_upscaling = True, focus_threshold = 100):
if detector_backend == 'retinaface': img_rgb = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) # mtcnn expects RGB
faces = RetinaFace.detect_faces(
img_path = img,
threshold = threshold,
model = model,
allow_upscaling = allow_upscaling)
elif detector_backend == 'mtcnn':
img_rgb = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) # mtcnn expects RGB
redirect_on() redirect_on()
res = face_detector.detect_faces(img_rgb) res = face_detector.detect_faces(img_rgb)
redirect_off() redirect_off()
faces = {} if type(res) != list:
if type(res) == list: return None
for i, face in enumerate(res):
if threshold > face['confidence']:
continue
x = face['box'][0]
y = face['box'][1]
w = face['box'][2]
h = face['box'][3]
# If face is less than 2.5% of the image width and height,
# skip it (too small) -- filters out likely blurry faces in
# large group photos where the actual face may exceed
# min_face_size passed to MTCNN
if 0.025 > w / img.shape[0] and 0.025 > h / img.shape[1]:
print(f'Dropping due to small face size: {w / img.shape[0]} x {h / img.shape[1]}')
continue
faces[f'face_{i+1}'] = { # standardize properties
'facial_area': [ x, y, x + w, y + h ],
'landmarks': {
'left_eye': list(face['keypoints']['left_eye']),
'right_eye': list(face['keypoints']['right_eye']),
},
'score': face['confidence'],
} faces = {}
for i, face in enumerate(res):
if threshold > face['confidence']:
continue
x = face['box'][0]
y = face['box'][1]
w = face['box'][2]
h = face['box'][3]
# If face is less than 2.5% of the image width and height,
# skip it (too small) -- filters out likely blurry faces in
# large group photos where the actual face may exceed
# min_face_size passed to MTCNN
if 0.025 > w / img.shape[0] and 0.025 > h / img.shape[1]:
print(f'Dropping due to small face size: {w / img.shape[0]} x {h / img.shape[1]}')
continue
faces[f'face_{i+1}'] = { # standardize properties
'facial_area': [ x, y, x + w, y + h ],
'landmarks': {
'left_eye': list(face['keypoints']['left_eye']),
'right_eye': list(face['keypoints']['right_eye']),
},
'score': face['confidence'],
}
# Re-implementation of 'extract_faces' with the addition of keeping a # Re-implementation of 'extract_faces' with the addition of keeping a
# copy of the face image for caching on disk # copy of the face image for caching on disk
@ -190,8 +182,13 @@ def extract_faces(
parser = argparse.ArgumentParser(description = 'Detect faces in images.') parser = argparse.ArgumentParser(description = 'Detect faces in images.')
parser.add_argument('photos', metavar='PHOTO', type=int, nargs='*', parser.add_argument(
help='PHOTO ID to scan (default: all unscanned photos)') 'photos',
metavar = 'PHOTO',
type=int,
nargs='*',
help = 'PHOTO ID to scan (default: all unscanned photos)'
)
args = parser.parse_args() args = parser.parse_args()
print(args) print(args)