Compare commits

..

No commits in common. "8c83eceefa2921e56ab2c27f6c8f75d8b58be1ff" and "3c8eeba2d0a7e75fbf35be3439d49f2fbdf08012" have entirely different histories.

4 changed files with 47 additions and 55 deletions

View File

@ -26,8 +26,9 @@ RUN DEBIAN_FRONTEND=NONINTERACTIVE apt-get install -y \
# Install the latest npm and npx
RUN npm install --global npm@latest
# Install deepface
# Install deepface and retina-face
RUN pip install deepface
RUN pip install retina-face
RUN pip install piexif
# numpy 1.24 deprecated float; deepface is still using it, so we need to

View File

@ -21,8 +21,6 @@ pictures_path = merge_config_path(config['path'], config['picturesPath'])
faces_path = merge_config_path(config['path'], config['facesPath'])
db_path = merge_config_path(config['path'], config["db"]["photos"]["host"])
html_base = config['basePath']
if html_base == "/":
html_base = "."
# TODO
# Switch to using DBSCAN

View File

@ -7,6 +7,8 @@ import argparse
from PIL import Image, ImageOps
from deepface import DeepFace
from deepface.detectors import FaceDetector
from retinaface import RetinaFace
import numpy as np
import cv2
@ -22,8 +24,8 @@ faces_path = merge_config_path(config['path'], config['facesPath'])
db_path = merge_config_path(config['path'], config["db"]["photos"]["host"])
html_base = config['basePath']
model_name = 'VGG-Face'
detector_backend = 'mtcnn'
model_name = 'VGG-Face' # 'ArcFace'
detector_backend = 'mtcnn' # 'retinaface'
model = DeepFace.build_model(model_name)
# Derived from
@ -68,38 +70,44 @@ def variance_of_laplacian(image):
def extract_faces(
img, threshold=0.95, allow_upscaling = True, focus_threshold = 100):
img_rgb = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) # mtcnn expects RGB
if detector_backend == 'retinaface':
faces = RetinaFace.detect_faces(
img_path = img,
threshold = threshold,
model = model,
allow_upscaling = allow_upscaling)
elif detector_backend == 'mtcnn':
img_rgb = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) # mtcnn expects RGB
redirect_on()
res = face_detector.detect_faces(img_rgb)
redirect_off()
redirect_on()
res = face_detector.detect_faces(img_rgb)
redirect_off()
if type(res) != list:
return None
faces = {}
for i, face in enumerate(res):
if threshold > face['confidence']:
continue
x = face['box'][0]
y = face['box'][1]
w = face['box'][2]
h = face['box'][3]
# If face is less than 2.5% of the image width and height,
# skip it (too small) -- filters out likely blurry faces in
# large group photos where the actual face may exceed
# min_face_size passed to MTCNN
if 0.025 > w / img.shape[0] and 0.025 > h / img.shape[1]:
print(f'Dropping due to small face size: {w / img.shape[0]} x {h / img.shape[1]}')
continue
faces[f'face_{i+1}'] = { # standardize properties
'facial_area': [ x, y, x + w, y + h ],
'landmarks': {
'left_eye': list(face['keypoints']['left_eye']),
'right_eye': list(face['keypoints']['right_eye']),
},
'score': face['confidence'],
}
faces = {}
if type(res) == list:
for i, face in enumerate(res):
if threshold > face['confidence']:
continue
x = face['box'][0]
y = face['box'][1]
w = face['box'][2]
h = face['box'][3]
# If face is less than 2.5% of the image width and height,
# skip it (too small) -- filters out likely blurry faces in
# large group photos where the actual face may exceed
# min_face_size passed to MTCNN
if 0.025 > w / img.shape[0] and 0.025 > h / img.shape[1]:
print(f'Dropping due to small face size: {w / img.shape[0]} x {h / img.shape[1]}')
continue
faces[f'face_{i+1}'] = { # standardize properties
'facial_area': [ x, y, x + w, y + h ],
'landmarks': {
'left_eye': list(face['keypoints']['left_eye']),
'right_eye': list(face['keypoints']['right_eye']),
},
'score': face['confidence'],
}
# Re-implementation of 'extract_faces' with the addition of keeping a
# copy of the face image for caching on disk
@ -182,13 +190,8 @@ def extract_faces(
parser = argparse.ArgumentParser(description = 'Detect faces in images.')
parser.add_argument(
'photos',
metavar = 'PHOTO',
type=int,
nargs='*',
help = 'PHOTO ID to scan (default: all unscanned photos)'
)
parser.add_argument('photos', metavar='PHOTO', type=int, nargs='*',
help='PHOTO ID to scan (default: all unscanned photos)')
args = parser.parse_args()
print(args)
@ -206,7 +209,7 @@ with conn:
for i, row in enumerate(rows):
photoId, photoFaces, albumPath, photoFilename = row
img_path = f'{base}{albumPath}{photoFilename}'
print(f'Processing {i+1}/{count}: photoId = {photoId}: {img_path}')
print(f'Processing {i+1}/{count}: {img_path}')
try:
img = Image.open(img_path)
img = ImageOps.exif_transpose(img) # auto-rotate if needed
@ -244,8 +247,6 @@ with conn:
'descriptorId': faceDescriptorId,
})
print(f'Face added to database with faceId = {faceId}')
path = f'{faces_path}/{"{:02d}".format(faceId % 100)}'
try:
os.makedirs(path)

View File

@ -7,18 +7,10 @@ import uu
from io import BytesIO
from ketrface.util import *
from ketrface.config import *
config = read_config()
html_path = merge_config_path(config['path'], 'frontend')
pictures_path = merge_config_path(config['path'], config['picturesPath'])
faces_path = merge_config_path(config['path'], config['facesPath'])
db_path = merge_config_path(config['path'], config["db"]["photos"]["host"])
html_base = config['basePath']
face_base = "../"
faceId = int(sys.argv[1])
path = f'{faces_path}/{"{:02d}".format(faceId % 100)}'
path = f'{face_base}faces/{"{:02d}".format(faceId % 10)}'
img = Image.open(f'{path}/{faceId}.jpg')
exif_dict = piexif.load(img.info["exif"])