VGG-Face and mtcnn are working MUCH better. Exif compress/encode/decode/decompress working
Signed-off-by: James Ketrenos <james_git@ketrenos.com>
This commit is contained in:
parent
6847a35ca5
commit
798712aa7f
@ -15,19 +15,34 @@ import cv2
|
|||||||
import uu
|
import uu
|
||||||
from io import BytesIO
|
from io import BytesIO
|
||||||
|
|
||||||
|
original = None
|
||||||
|
|
||||||
|
def redirect_on():
|
||||||
|
global original
|
||||||
|
if original == None:
|
||||||
|
original = sys.stdout
|
||||||
|
sys.stdout = open(os.devnull, 'w')
|
||||||
|
|
||||||
|
def redirect_off():
|
||||||
|
global original
|
||||||
|
if original != None:
|
||||||
|
sys.stdout.close()
|
||||||
|
sys.stdout = original
|
||||||
|
original = None
|
||||||
|
|
||||||
def zlib_uuencode(databytes, name='<data>'):
|
def zlib_uuencode(databytes, name='<data>'):
|
||||||
''' Compress databytes with zlib & uuencode the result '''
|
''' Compress databytes with zlib & uuencode the result '''
|
||||||
inbuff = BytesIO(zlib.compress(databytes, 9))
|
inbuff = BytesIO(zlib.compress(databytes, 9))
|
||||||
outbuff = BytesIO()
|
outbuff = BytesIO()
|
||||||
uu.encode(inbuff, outbuff, name=name)
|
uu.encode(inbuff, outbuff, name=name)
|
||||||
return outbuff.getvalue()
|
return outbuff.getvalue()
|
||||||
|
|
||||||
def zlib_uudecode(databytes):
|
def zlib_uudecode(databytes):
|
||||||
''' uudecode databytes and decompress the result with zlib '''
|
''' uudecode databytes and decompress the result with zlib '''
|
||||||
inbuff = BytesIO(databytes)
|
inbuff = BytesIO(databytes)
|
||||||
outbuff = BytesIO()
|
outbuff = BytesIO()
|
||||||
uu.decode(inbuff, outbuff)
|
uu.decode(inbuff, outbuff)
|
||||||
return zlib.decompress(outbuff.getvalue())
|
return zlib.decompress(outbuff.getvalue())
|
||||||
|
|
||||||
class NpEncoder(json.JSONEncoder):
|
class NpEncoder(json.JSONEncoder):
|
||||||
def default(self, obj):
|
def default(self, obj):
|
||||||
@ -64,7 +79,7 @@ def alignment_procedure(img, left_eye, right_eye):
|
|||||||
dY = right_eye[1] - left_eye[1]
|
dY = right_eye[1] - left_eye[1]
|
||||||
dX = right_eye[0] - left_eye[0]
|
dX = right_eye[0] - left_eye[0]
|
||||||
radians = np.arctan2(dY, dX)
|
radians = np.arctan2(dY, dX)
|
||||||
rotation = 180 * radians / np.pi
|
rotation = 180 + 180 * radians / np.pi
|
||||||
|
|
||||||
if True:
|
if True:
|
||||||
img = img.rotate(
|
img = img.rotate(
|
||||||
@ -83,22 +98,26 @@ def extract_faces(img, threshold=0.75, allow_upscaling = True):
|
|||||||
allow_upscaling = allow_upscaling)
|
allow_upscaling = allow_upscaling)
|
||||||
elif detector_backend == 'mtcnn':
|
elif detector_backend == 'mtcnn':
|
||||||
img_rgb = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) # mtcnn expects RGB
|
img_rgb = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) # mtcnn expects RGB
|
||||||
|
|
||||||
|
redirect_on()
|
||||||
res = face_detector.detect_faces(img_rgb)
|
res = face_detector.detect_faces(img_rgb)
|
||||||
|
redirect_off()
|
||||||
|
|
||||||
faces = {}
|
faces = {}
|
||||||
if type(res) == list:
|
if type(res) == list:
|
||||||
for i, face in enumerate(res):
|
for i, face in enumerate(res):
|
||||||
|
x = face['box'][0]
|
||||||
|
y = face['box'][1]
|
||||||
|
w = face['box'][2]
|
||||||
|
h = face['box'][3]
|
||||||
faces[f'face_{i+1}'] = { # standardize properties
|
faces[f'face_{i+1}'] = { # standardize properties
|
||||||
'facial_area': [
|
'facial_area': [ x, y, x + w, y + h ],
|
||||||
face['box'][1], face['box'][1] + face['box'][3],
|
|
||||||
face['box'][0], face['box'][0] + face['box'][2],
|
|
||||||
],
|
|
||||||
'landmarks': {
|
'landmarks': {
|
||||||
'left_eye': list(face['keypoints']['left_eye']),
|
'left_eye': list(face['keypoints']['left_eye']),
|
||||||
'right_eye': list(face['keypoints']['right_eye']),
|
'right_eye': list(face['keypoints']['right_eye']),
|
||||||
},
|
},
|
||||||
'score': face['confidence'],
|
'score': face['confidence'],
|
||||||
}
|
}
|
||||||
print(face, faces[f'face_{i+1}'])
|
|
||||||
|
|
||||||
# Re-implementation of 'extract_faces' with the addition of keeping a
|
# Re-implementation of 'extract_faces' with the addition of keeping a
|
||||||
# copy of the face image for caching on disk
|
# copy of the face image for caching on disk
|
||||||
@ -111,8 +130,8 @@ def extract_faces(img, threshold=0.75, allow_upscaling = True):
|
|||||||
left_eye = landmarks["left_eye"]
|
left_eye = landmarks["left_eye"]
|
||||||
right_eye = landmarks["right_eye"]
|
right_eye = landmarks["right_eye"]
|
||||||
|
|
||||||
markup = True
|
# markup = True
|
||||||
# markup = False
|
markup = False
|
||||||
if markup == True: # Draw the face rectangle and eyes
|
if markup == True: # Draw the face rectangle and eyes
|
||||||
cv2.rectangle(img,
|
cv2.rectangle(img,
|
||||||
(int(facial_area[0]), int(facial_area[1])),
|
(int(facial_area[0]), int(facial_area[1])),
|
||||||
@ -155,27 +174,28 @@ def extract_faces(img, threshold=0.75, allow_upscaling = True):
|
|||||||
image = image.resize(size = input_shape, resample = Image.LANCZOS)
|
image = image.resize(size = input_shape, resample = Image.LANCZOS)
|
||||||
resized = np.asarray(image)
|
resized = np.asarray(image)
|
||||||
|
|
||||||
|
redirect_on()
|
||||||
identity['vector'] = DeepFace.represent(
|
identity['vector'] = DeepFace.represent(
|
||||||
img_path = resized,
|
img_path = resized,
|
||||||
model_name = model_name,
|
model_name = model_name,
|
||||||
model = model, # pre-built
|
model = model, # pre-built
|
||||||
detector_backend = detector_backend,
|
detector_backend = detector_backend,
|
||||||
enforce_detection = False)
|
enforce_detection = False)
|
||||||
|
redirect_off()
|
||||||
|
|
||||||
|
redirect_on()
|
||||||
identity["face"] = {
|
identity["face"] = {
|
||||||
'top': facial_area[1] / img.shape[0],
|
'top': facial_area[1] / img.shape[0],
|
||||||
'left': facial_area[0] / img.shape[1],
|
'left': facial_area[0] / img.shape[1],
|
||||||
'bottom': facial_area[3] / img.shape[0],
|
'bottom': facial_area[3] / img.shape[0],
|
||||||
'right': facial_area[2] / img.shape[1]
|
'right': facial_area[2] / img.shape[1]
|
||||||
}
|
}
|
||||||
|
redirect_off()
|
||||||
|
|
||||||
identity['image'] = Image.fromarray(resized)
|
identity['image'] = Image.fromarray(resized)
|
||||||
|
|
||||||
return faces
|
return faces
|
||||||
|
|
||||||
#face verification
|
|
||||||
#img_path = sys.argv[1]
|
|
||||||
|
|
||||||
def create_connection(db_file):
|
def create_connection(db_file):
|
||||||
""" create a database connection to the SQLite database
|
""" create a database connection to the SQLite database
|
||||||
specified by db_file
|
specified by db_file
|
||||||
@ -315,5 +335,4 @@ with conn:
|
|||||||
"thumbnail": None, "GPS": {}}
|
"thumbnail": None, "GPS": {}}
|
||||||
image.save(f'{path}/{faceId}.jpg', exif = piexif.dump(exif_dict))
|
image.save(f'{path}/{faceId}.jpg', exif = piexif.dump(exif_dict))
|
||||||
|
|
||||||
exit(1)
|
|
||||||
update_face_count(conn, photoId, len(faces))
|
update_face_count(conn, photoId, len(faces))
|
||||||
|
Loading…
x
Reference in New Issue
Block a user