125 lines
4.1 KiB
Python
125 lines
4.1 KiB
Python
import sys
|
|
import json
|
|
import piexif
|
|
from PIL import Image
|
|
from deepface import DeepFace
|
|
from retinaface import RetinaFace
|
|
import numpy as np
|
|
|
|
class NpEncoder(json.JSONEncoder):
|
|
def default(self, obj):
|
|
if isinstance(obj, np.integer):
|
|
return int(obj)
|
|
if isinstance(obj, np.floating):
|
|
return float(obj)
|
|
if isinstance(obj, np.ndarray):
|
|
return obj.tolist()
|
|
|
|
models = ["VGG-Face", "Facenet", "Facenet512", "OpenFace", "DeepFace", "DeepID", "ArcFace", "Dlib", "SFace"]
|
|
|
|
def extract_faces(img_path, threshold=0.9, model = None, align = True, allow_upscaling = True):
|
|
img = RetinaFace.get_image(img_path)
|
|
faces = RetinaFace.detect_faces(img_path = img, threshold = threshold, model = model, allow_upscaling = allow_upscaling)
|
|
#faces = DeepFace.detectFace(img_path = img, target_size = (224, 224), detector_backend = 'retinaface')
|
|
|
|
# Re-implementation of 'extract_faces' with the addition of keeping a
|
|
# copy of the face image for caching on disk
|
|
if type(faces) == dict:
|
|
for key in faces:
|
|
print(key)
|
|
identity = faces[key]
|
|
facial_area = identity["facial_area"]
|
|
|
|
if align == True:
|
|
width = facial_area[2] - facial_area[0]
|
|
height = facial_area[3] - facial_area[1]
|
|
x = facial_area[0] + width * 0.5
|
|
y = facial_area[1] + height * 0.5
|
|
|
|
# Make thumbnail a square crop
|
|
if width > height:
|
|
height = width
|
|
else:
|
|
width = height
|
|
|
|
landmarks = identity["landmarks"]
|
|
left_eye = landmarks["left_eye"]
|
|
right_eye = landmarks["right_eye"]
|
|
nose = landmarks["nose"]
|
|
|
|
# translate the landmarks to be centered on array
|
|
left_eye[0] -= x
|
|
left_eye[1] -= y
|
|
right_eye[0] -= x
|
|
right_eye[1] -= y
|
|
nose[0] -= x
|
|
nose[1] -= y
|
|
|
|
width *= 0.25
|
|
height *= 0.25
|
|
|
|
left = round(x - width * 0.5)
|
|
right = round(left + width)
|
|
top = round(y - height * 0.5)
|
|
bottom = round(top + height)
|
|
|
|
facial_img = img[left: right, top: bottom]
|
|
|
|
print(left, right, top, bottom, left_eye, right_eye, nose)
|
|
aligned = RetinaFace.postprocess.alignment_procedure(facial_img, left_eye, right_eye, nose)
|
|
|
|
image = Image.fromarray(aligned)
|
|
image.resize(size = (224,224), resample = Image.ANTIALIAS)
|
|
resized = np.asarray(image)
|
|
else:
|
|
facial_img = img[facial_area[1]: facial_area[3], facial_area[0]: facial_area[2]]
|
|
resized = facial_img
|
|
|
|
identity['vector'] = DeepFace.represent(img_path = resized, model_name = 'ArcFace', detector_backend = 'opencv', enforce_detection = False)
|
|
|
|
identity['image'] = resized[:, :, ::-1]
|
|
|
|
return faces
|
|
|
|
#face verification
|
|
img1_path = sys.argv[1]
|
|
|
|
faces = extract_faces(img1_path)
|
|
for key in faces:
|
|
face = faces[key]
|
|
print(f'Face {key}...')
|
|
image = Image.fromarray(face['image'])
|
|
|
|
#face['analysis'] = DeepFace.analyze(img_path = img, actions = ['age', 'gender', 'race', 'emotion'], enforce_detection = False)
|
|
#face['analysis'] = DeepFace.analyze(img, actions = ['emotion'])
|
|
|
|
# TODO: Add additional meta-data allowing back referencing to original
|
|
# photo
|
|
face['version'] = 1 # version 1 doesn't add much...
|
|
|
|
data = {k: face[k] for k in set(list(face.keys())) - set(['image'])}
|
|
json_str = json.dumps(data, ensure_ascii=False, indent=2, cls=NpEncoder)
|
|
|
|
with open(f'{key}.json', 'w', encoding = 'utf-8') as f:
|
|
f.write(json_str)
|
|
|
|
# Encode this data into the JPG as Exif
|
|
exif_ifd = {piexif.ExifIFD.UserComment: json_str.encode()}
|
|
exif_dict = {"0th": {}, "Exif": exif_ifd, "1st": {},
|
|
"thumbnail": None, "GPS": {}}
|
|
image.save(f'{key}.jpg', exif = piexif.dump(exif_dict))
|
|
|
|
#df = DeepFace.find(img, db_path = '/db')
|
|
#print(df.head())
|
|
|
|
#img2_path = sys.argv[2]
|
|
#print("image 1: ", img1_path);
|
|
#print("image 2: ", img2_path);
|
|
#result = DeepFace.verify(img1_path = img1_path, img2_path = img2_path, #model_name = models[1])
|
|
#print("result: ", result)
|
|
|
|
#face recognition
|
|
#df = DeepFace.find(img_path = img1_path, db_path = "./db/deepface", model_name = models[1])
|
|
|
|
#print("df: ", df)
|