VGG-Face and mtcnn are working MUCH better. Exif compress/encode/decode/decompress working

Signed-off-by: James Ketrenos <james_git@ketrenos.com>
This commit is contained in:
James Ketr 2023-01-06 13:29:26 -08:00
parent 6847a35ca5
commit 798712aa7f

View File

@ -15,19 +15,34 @@ import cv2
import uu
from io import BytesIO
original = None
def redirect_on():
global original
if original == None:
original = sys.stdout
sys.stdout = open(os.devnull, 'w')
def redirect_off():
global original
if original != None:
sys.stdout.close()
sys.stdout = original
original = None
def zlib_uuencode(databytes, name='<data>'):
''' Compress databytes with zlib & uuencode the result '''
inbuff = BytesIO(zlib.compress(databytes, 9))
outbuff = BytesIO()
uu.encode(inbuff, outbuff, name=name)
return outbuff.getvalue()
''' Compress databytes with zlib & uuencode the result '''
inbuff = BytesIO(zlib.compress(databytes, 9))
outbuff = BytesIO()
uu.encode(inbuff, outbuff, name=name)
return outbuff.getvalue()
def zlib_uudecode(databytes):
''' uudecode databytes and decompress the result with zlib '''
inbuff = BytesIO(databytes)
outbuff = BytesIO()
uu.decode(inbuff, outbuff)
return zlib.decompress(outbuff.getvalue())
''' uudecode databytes and decompress the result with zlib '''
inbuff = BytesIO(databytes)
outbuff = BytesIO()
uu.decode(inbuff, outbuff)
return zlib.decompress(outbuff.getvalue())
class NpEncoder(json.JSONEncoder):
def default(self, obj):
@ -64,7 +79,7 @@ def alignment_procedure(img, left_eye, right_eye):
dY = right_eye[1] - left_eye[1]
dX = right_eye[0] - left_eye[0]
radians = np.arctan2(dY, dX)
rotation = 180 * radians / np.pi
rotation = 180 + 180 * radians / np.pi
if True:
img = img.rotate(
@ -83,22 +98,26 @@ def extract_faces(img, threshold=0.75, allow_upscaling = True):
allow_upscaling = allow_upscaling)
elif detector_backend == 'mtcnn':
img_rgb = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) # mtcnn expects RGB
redirect_on()
res = face_detector.detect_faces(img_rgb)
redirect_off()
faces = {}
if type(res) == list:
for i, face in enumerate(res):
x = face['box'][0]
y = face['box'][1]
w = face['box'][2]
h = face['box'][3]
faces[f'face_{i+1}'] = { # standardize properties
'facial_area': [
face['box'][1], face['box'][1] + face['box'][3],
face['box'][0], face['box'][0] + face['box'][2],
],
'facial_area': [ x, y, x + w, y + h ],
'landmarks': {
'left_eye': list(face['keypoints']['left_eye']),
'right_eye': list(face['keypoints']['right_eye']),
},
'score': face['confidence'],
}
print(face, faces[f'face_{i+1}'])
# Re-implementation of 'extract_faces' with the addition of keeping a
# copy of the face image for caching on disk
@ -111,8 +130,8 @@ def extract_faces(img, threshold=0.75, allow_upscaling = True):
left_eye = landmarks["left_eye"]
right_eye = landmarks["right_eye"]
markup = True
# markup = False
# markup = True
markup = False
if markup == True: # Draw the face rectangle and eyes
cv2.rectangle(img,
(int(facial_area[0]), int(facial_area[1])),
@ -155,27 +174,28 @@ def extract_faces(img, threshold=0.75, allow_upscaling = True):
image = image.resize(size = input_shape, resample = Image.LANCZOS)
resized = np.asarray(image)
redirect_on()
identity['vector'] = DeepFace.represent(
img_path = resized,
model_name = model_name,
model = model, # pre-built
detector_backend = detector_backend,
enforce_detection = False)
redirect_off()
redirect_on()
identity["face"] = {
'top': facial_area[1] / img.shape[0],
'left': facial_area[0] / img.shape[1],
'bottom': facial_area[3] / img.shape[0],
'right': facial_area[2] / img.shape[1]
}
redirect_off()
identity['image'] = Image.fromarray(resized)
return faces
#face verification
#img_path = sys.argv[1]
def create_connection(db_file):
""" create a database connection to the SQLite database
specified by db_file
@ -315,5 +335,4 @@ with conn:
"thumbnail": None, "GPS": {}}
image.save(f'{path}/{faceId}.jpg', exif = piexif.dump(exif_dict))
exit(1)
update_face_count(conn, photoId, len(faces))