Face scanning working
Signed-off-by: James Ketrenos <james_git@ketrenos.com>
This commit is contained in:
parent
733b8feeba
commit
836b27ac54
@ -13,4 +13,6 @@
|
||||
!README.md
|
||||
!reset-db.sh
|
||||
!server
|
||||
!scanner
|
||||
!src
|
||||
!util
|
||||
|
20
Dockerfile
20
Dockerfile
@ -18,6 +18,8 @@ RUN DEBIAN_FRONTEND=NONINTERACTIVE apt-get install -y \
|
||||
RUN wget -qO- https://deb.nodesource.com/setup_18.x | bash -
|
||||
|
||||
RUN DEBIAN_FRONTEND=NONINTERACTIVE apt-get install -y \
|
||||
python2 \
|
||||
jhead \
|
||||
nodejs
|
||||
|
||||
# Install the latest npm and npx
|
||||
@ -26,22 +28,26 @@ RUN npm install --global npm@latest
|
||||
# Install deepface and retina-face
|
||||
RUN pip install deepface
|
||||
RUN pip install retina-face
|
||||
RUN pip install piexif
|
||||
|
||||
# numpy 1.24 deprecated float; deepface is still using it, so we need to
|
||||
# install <1.24
|
||||
RUN pip install "numpy<1.24"
|
||||
|
||||
RUN apt-get install -y python2
|
||||
|
||||
COPY /entrypoint.sh /
|
||||
|
||||
COPY . /website
|
||||
|
||||
COPY /package*json /website/
|
||||
|
||||
WORKDIR /website
|
||||
RUN npm upgrade && npm install
|
||||
|
||||
RUN DEBIAN_FRONTEND=NONINTERACTIVE apt-get install -y \
|
||||
jhead
|
||||
RUN pip install piexif
|
||||
COPY /*js /website/
|
||||
COPY /src /website/src
|
||||
COPY /scanner /website/scanner
|
||||
COPY /server /website/server
|
||||
COPY /frontend /website/frontend
|
||||
COPY /db /website/db
|
||||
COPY /config /website/config
|
||||
|
||||
|
||||
CMD [ "/entrypoint.sh" ]
|
||||
|
17
README.md
17
README.md
@ -1,3 +1,13 @@
|
||||
# Overview
|
||||
|
||||
This photo manager performs the following:
|
||||
|
||||
1. Scan a target directory looking for any photo that has been updated,
|
||||
removed, or added
|
||||
2. Process that image, extracting EXIF data, and adding to the DB
|
||||
3. Schedule backend processing of all photos that have not been face
|
||||
scanned with the latest FACE_SCANNER version
|
||||
|
||||
# To use the Docker
|
||||
|
||||
Create a symbolic link from 'photos' to where your photos
|
||||
@ -28,13 +38,6 @@ sudo apt-get install --yes nodejs
|
||||
sudo npm install --global npm@latest
|
||||
```
|
||||
|
||||
# Install BLAS to improve performance, and dev tools so
|
||||
# face-recognition can build.
|
||||
|
||||
```bash
|
||||
sudo apt install -y libopenblas-dev cmake
|
||||
```
|
||||
|
||||
###
|
||||
NEF processing uses darktable
|
||||
```
|
||||
|
@ -1,19 +1,6 @@
|
||||
version: '3.1'
|
||||
|
||||
services:
|
||||
|
||||
# db:
|
||||
# image: mariadb
|
||||
# restart: always
|
||||
# environment:
|
||||
# MYSQL_ROOT_PASSWORD: photos
|
||||
# PHOTOS_DB_USER: photos
|
||||
# PHOTOS_DB_PASSWD: ph0t0z
|
||||
# PHOTOS_DB: photos
|
||||
# volumes:
|
||||
# - ${PWD}/db:/var/lib/mysql
|
||||
# - ./init.sql:/data/application/init.sql
|
||||
|
||||
photos:
|
||||
build: .
|
||||
image: photos:latest
|
||||
@ -24,7 +11,7 @@ services:
|
||||
ports:
|
||||
- 8134:8123
|
||||
volumes:
|
||||
- ${PWD}/pictures:/photos
|
||||
- /multimedia/Dad:/pictures
|
||||
- ${PWD}/db:/db
|
||||
- ${PWD}:/website
|
||||
- ${PWD}/models:/root/.deepface
|
||||
|
@ -31,12 +31,12 @@
|
||||
"bootstrap": "^4.4.1",
|
||||
"concurrently": "^5.1.0",
|
||||
"config": "^3.3.8",
|
||||
"connect-sqlite3": "^0.9.13",
|
||||
"cookie-parser": "^1.4.4",
|
||||
"core-js": "^3.2.1",
|
||||
"exif-reader": "github:paras20xx/exif-reader",
|
||||
"express": "^4.18.2",
|
||||
"express-session": "^1.17.0",
|
||||
"face-api.js": "^0.22.0",
|
||||
"googleapis": "^110.0.0",
|
||||
"handlebars": "^4.7.7",
|
||||
"ldapauth-fork": "=4.2.0",
|
||||
@ -52,7 +52,8 @@
|
||||
"react-bootstrap": "^1.0.0-beta.16",
|
||||
"react-date-range": "^1.0.0-beta",
|
||||
"react-router-dom": "^5.0.1",
|
||||
"sequelize": "^6.0"
|
||||
"sequelize": "^6.0",
|
||||
"sharp": "^0.31.3"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@babel/cli": "^7.20.7",
|
||||
|
@ -66,6 +66,10 @@ function init() {
|
||||
type: Sequelize.INTEGER,
|
||||
defaultValue: -1 /* not scanned */
|
||||
},
|
||||
faceScannedWith: {
|
||||
type: Sequelize.INTEGER,
|
||||
defaultValue: 0
|
||||
},
|
||||
duplicate: {
|
||||
type: Sequelize.BOOLEAN,
|
||||
defaultValue: 0
|
||||
@ -125,6 +129,14 @@ function init() {
|
||||
key: 'id',
|
||||
}
|
||||
},
|
||||
scanVersion: {
|
||||
type: Sequelize.INTEGER,
|
||||
/*
|
||||
* 0 - original scan type
|
||||
* 1 - Retinaface w/ 0.25% face increase
|
||||
*/
|
||||
defaultValue: 0
|
||||
},
|
||||
identityDistance: { /* How far are markers from identity match? */
|
||||
type: Sequelize.DOUBLE,
|
||||
defaultValue: -1.0
|
||||
@ -159,6 +171,10 @@ function init() {
|
||||
key: 'id',
|
||||
}
|
||||
},
|
||||
model: {
|
||||
type: Sequelize.STRING,
|
||||
defaultValue: ""
|
||||
},
|
||||
descriptors: Sequelize.BLOB
|
||||
}, {
|
||||
timestamps: false
|
||||
|
287
server/detect.py
Normal file
287
server/detect.py
Normal file
@ -0,0 +1,287 @@
|
||||
import sys
|
||||
import json
|
||||
import os
|
||||
import piexif
|
||||
import sqlite3
|
||||
from sqlite3 import Error
|
||||
from PIL import Image
|
||||
from deepface import DeepFace
|
||||
from retinaface import RetinaFace
|
||||
import numpy as np
|
||||
|
||||
class NpEncoder(json.JSONEncoder):
|
||||
def default(self, obj):
|
||||
if isinstance(obj, np.integer):
|
||||
return int(obj)
|
||||
if isinstance(obj, np.floating):
|
||||
return float(obj)
|
||||
if isinstance(obj, np.ndarray):
|
||||
return obj.tolist()
|
||||
|
||||
models = ["VGG-Face", "Facenet", "Facenet512", "OpenFace", "DeepFace", "DeepID", "ArcFace", "Dlib", "SFace"]
|
||||
|
||||
model = DeepFace.build_model('ArcFace')
|
||||
input_shape = DeepFace.functions.find_input_shape(model)
|
||||
|
||||
|
||||
# Adapted from DeepFace
|
||||
# https://github.com/serengil/deepface/blob/master/deepface/commons/functions.py
|
||||
#
|
||||
# Modified to use bicubic resampling and clip expansion, as well as to
|
||||
# take a PIL Image instead of numpy array
|
||||
def alignment_procedure(img, left_eye, right_eye):
|
||||
"""
|
||||
Given left and right eye coordinates in image, rotate around point
|
||||
between eyes such that eyes are horizontal
|
||||
:param img: Image (not np.array)
|
||||
:param left_eye: Eye appearing on the left (right eye of person)
|
||||
:param right_eye: Eye appearing on the right (left eye of person)
|
||||
:return: adjusted image
|
||||
"""
|
||||
dY = right_eye[1] - left_eye[1]
|
||||
dX = right_eye[0] - left_eye[0]
|
||||
rotation = -np.atan2(dY, dX)
|
||||
# cosRotation = np.cos(rotation)
|
||||
# sinRotation = np.sin(rotation)
|
||||
# eyeDistance = np.sqrt(dY * dY + dX * dX)
|
||||
# mid_x = left_eye[0] + 0.5 * dX
|
||||
# mid_y = left_eye[1] + 0.5 * dY
|
||||
# prime_x = mid_x * cosRotation - mid_y * sinRotation
|
||||
# prime_y = mid_y * cosRotation - mid_x * sinRotation
|
||||
|
||||
img = img.rotate(
|
||||
angle = np.pi * rotation,
|
||||
resample=Image.BICUBIC,
|
||||
expand=True)
|
||||
|
||||
return img
|
||||
|
||||
def extract_faces(img, threshold=0.9, model = None, allow_upscaling = True):
|
||||
faces = RetinaFace.detect_faces(img_path = img, threshold = threshold, model = model, allow_upscaling = allow_upscaling)
|
||||
#faces = DeepFace.detectFace(img_path = img, target_size = (224, 224), detector_backend = 'retinaface')
|
||||
|
||||
# Re-implementation of 'extract_faces' with the addition of keeping a
|
||||
# copy of the face image for caching on disk
|
||||
if type(faces) == dict:
|
||||
for key in faces:
|
||||
identity = faces[key]
|
||||
facial_area = identity["facial_area"]
|
||||
|
||||
width = facial_area[2] - facial_area[0]
|
||||
height = facial_area[3] - facial_area[1]
|
||||
x = facial_area[0] + width * 0.5
|
||||
y = facial_area[1] + height * 0.5
|
||||
|
||||
# Make thumbnail a square crop
|
||||
if width > height:
|
||||
height = width
|
||||
else:
|
||||
width = height
|
||||
|
||||
landmarks = identity["landmarks"]
|
||||
left_eye = landmarks["left_eye"]
|
||||
right_eye = landmarks["right_eye"]
|
||||
nose = landmarks["nose"]
|
||||
|
||||
# translate the landmarks to be centered on array
|
||||
left_eye[0] -= x
|
||||
left_eye[1] -= y
|
||||
right_eye[0] -= x
|
||||
right_eye[1] -= y
|
||||
nose[0] -= x
|
||||
nose[1] -= y
|
||||
|
||||
width *= 1.25
|
||||
height *= 1.25
|
||||
|
||||
left = max(round(x - width * 0.5), facial_area[0])
|
||||
right = min(round(left + width), facial_area[2])
|
||||
top = max(round(y - height * 0.5), facial_area[1])
|
||||
bottom = min(round(top + height), facial_area[3])
|
||||
|
||||
facial_img = img[top: bottom, left: right]
|
||||
|
||||
# Eye order is reversed as the routine does them backwards
|
||||
aligned = RetinaFace.postprocess.alignment_procedure(facial_img, right_eye, left_eye, nose)
|
||||
|
||||
image = Image.fromarray(aligned)
|
||||
image = image.resize(size = input_shape, resample = Image.LANCZOS)
|
||||
resized = np.asarray(image)
|
||||
|
||||
identity['vector'] = DeepFace.represent(
|
||||
img_path = resized,
|
||||
model_name = 'ArcFace',
|
||||
model = model, # pre-built
|
||||
detector_backend = 'retinaface',
|
||||
enforce_detection = False)
|
||||
|
||||
identity["face"] = {
|
||||
'top': facial_area[1] / img.shape[0],
|
||||
'left': facial_area[0] / img.shape[1],
|
||||
'bottom': facial_area[3] / img.shape[0],
|
||||
'right': facial_area[2] / img.shape[1]
|
||||
}
|
||||
|
||||
identity['image'] = resized #[:, :, ::-1]
|
||||
|
||||
return faces
|
||||
|
||||
#face verification
|
||||
#img_path = sys.argv[1]
|
||||
|
||||
def create_connection(db_file):
|
||||
""" create a database connection to the SQLite database
|
||||
specified by db_file
|
||||
:param db_file: database file
|
||||
:return: Connection object or None
|
||||
"""
|
||||
conn = None
|
||||
try:
|
||||
conn = sqlite3.connect(db_file)
|
||||
except Error as e:
|
||||
print(e)
|
||||
|
||||
return conn
|
||||
|
||||
def create_face(conn, face):
|
||||
"""
|
||||
Create a new face in the faces table
|
||||
:param conn:
|
||||
:param face:
|
||||
:return: face id
|
||||
"""
|
||||
sql = '''
|
||||
INSERT INTO faces(photoId,scanVersion,faceConfidence,top,left,bottom,right)
|
||||
VALUES(?,?,?,?,?,?,?)
|
||||
'''
|
||||
cur = conn.cursor()
|
||||
cur.execute(sql, (
|
||||
face['photoId'],
|
||||
face['scanVersion'],
|
||||
face['faceConfidence'],
|
||||
face['top'],
|
||||
face['left'],
|
||||
face['bottom'],
|
||||
face['right']
|
||||
))
|
||||
conn.commit()
|
||||
return cur.lastrowid
|
||||
|
||||
def create_face_descriptor(conn, faceId, descriptor):
|
||||
"""
|
||||
Create a new face in the faces table
|
||||
:param conn:
|
||||
:param faceId:
|
||||
:param descriptor:
|
||||
:return: descriptor id
|
||||
"""
|
||||
sql = '''
|
||||
INSERT INTO facedescriptors(faceId,model,descriptors)
|
||||
VALUES(?,?,?)
|
||||
'''
|
||||
cur = conn.cursor()
|
||||
cur.execute(sql, (
|
||||
faceId,
|
||||
descriptor['model'],
|
||||
np.array(descriptor['descriptors'])
|
||||
))
|
||||
conn.commit()
|
||||
return cur.lastrowid
|
||||
|
||||
def update_face_count(conn, photoId, faces):
|
||||
"""
|
||||
Update the number of faces that have been matched on a photo
|
||||
:param conn:
|
||||
:param photoId:
|
||||
:param faces:
|
||||
:return: None
|
||||
"""
|
||||
sql = '''
|
||||
UPDATE photos SET faces=? WHERE id=?
|
||||
'''
|
||||
cur = conn.cursor()
|
||||
cur.execute(sql, (faces, photoId))
|
||||
conn.commit()
|
||||
return None
|
||||
|
||||
|
||||
base = '/pictures/'
|
||||
conn = create_connection('../db/photos.db')
|
||||
with conn:
|
||||
cur = conn.cursor()
|
||||
for row in cur.execute('''
|
||||
SELECT photos.id,photos.faces,albums.path,photos.filename FROM photos
|
||||
LEFT JOIN albums ON (albums.id=photos.albumId)
|
||||
WHERE photos.faces=-1
|
||||
'''):
|
||||
photoId, photoFaces, albumPath, photoFilename = row
|
||||
img_path = f'{base}{albumPath}{photoFilename}'
|
||||
print(f'Processing {img_path}')
|
||||
img = Image.open(img_path)
|
||||
img = img.convert()
|
||||
img = np.asarray(img)
|
||||
print(img.shape)
|
||||
faces = extract_faces(img)
|
||||
if faces is None:
|
||||
update_face_count(conn, photoId, 0)
|
||||
continue
|
||||
print(f'Handling {len(faces)} faces')
|
||||
for key in faces:
|
||||
face = faces[key]
|
||||
image = Image.fromarray(face['image'])
|
||||
|
||||
#face['analysis'] = DeepFace.analyze(img_path = img, actions = ['age', 'gender', 'race', 'emotion'], enforce_detection = False)
|
||||
#face['analysis'] = DeepFace.analyze(img, actions = ['emotion'])
|
||||
|
||||
# TODO: Add additional meta-data allowing back referencing to original
|
||||
# photo
|
||||
face['version'] = 1 # version 1 doesn't add much...
|
||||
|
||||
data = {k: face[k] for k in set(list(face.keys())) - set(['image', 'facial_area', 'landmarks'])}
|
||||
json_str = json.dumps(data, ensure_ascii=False, indent=2, cls=NpEncoder)
|
||||
|
||||
faceId = create_face(conn, {
|
||||
'photoId': photoId,
|
||||
'scanVersion': face['version'],
|
||||
'faceConfidence': face['score'],
|
||||
'top': face['face']['top'],
|
||||
'left': face['face']['left'],
|
||||
'bottom': face['face']['bottom'],
|
||||
'right': face['face']['right'],
|
||||
})
|
||||
|
||||
faceDescriptorId = create_face_descriptor(conn, faceId, {
|
||||
'model': 'RetinaFace',
|
||||
'descriptors': face['vector']
|
||||
})
|
||||
|
||||
path = f'faces/{faceId % 100}'
|
||||
try:
|
||||
os.mkdir(path)
|
||||
except FileExistsError:
|
||||
pass
|
||||
|
||||
with open(f'{path}/{faceId}.json', 'w', encoding = 'utf-8') as f:
|
||||
f.write(json_str)
|
||||
|
||||
# Encode this data into the JPG as Exif
|
||||
exif_ifd = {piexif.ExifIFD.UserComment: json_str.encode()}
|
||||
exif_dict = {"0th": {}, "Exif": exif_ifd, "1st": {},
|
||||
"thumbnail": None, "GPS": {}}
|
||||
image.save(f'{path}/{faceId}.jpg', exif = piexif.dump(exif_dict))
|
||||
|
||||
#df = DeepFace.find(img, db_path = '/db')
|
||||
#print(df.head())
|
||||
|
||||
update_face_count(conn, photoId, len(faces))
|
||||
|
||||
#img2_path = sys.argv[2]
|
||||
#print("image 1: ", img1_path);
|
||||
#print("image 2: ", img2_path);
|
||||
#result = DeepFace.verify(img1_path = img1_path, img2_path = img2_path, #model_name = models[1])
|
||||
#print("result: ", result)
|
||||
|
||||
#face recognition
|
||||
#df = DeepFace.find(img_path = img1_path, db_path = "./db/deepface", model_name = models[1])
|
||||
|
||||
#print("df: ", df)
|
@ -232,7 +232,7 @@ function processBlock(items) {
|
||||
var src = picturesPath + path + file,
|
||||
image = sharp(src);
|
||||
|
||||
return image.limitInputPixels(1073741824).metadata().then(function(metadata) {
|
||||
return image/*.limitInputPixels(1073741824)*/.metadata().then(function(metadata) {
|
||||
if (metadata.exif) {
|
||||
metadata.exif = exif(metadata.exif);
|
||||
delete metadata.exif.thumbnail;
|
||||
@ -494,8 +494,8 @@ function findOrCreateDBAlbum(transaction, album) {
|
||||
return photoDB.sequelize.query("INSERT INTO albums (path,parentId,name) VALUES(:path,:parentId,:name)", {
|
||||
replacements: album,
|
||||
transaction: transaction
|
||||
}).spread(function(results, metadata) {
|
||||
return metadata.lastID;
|
||||
}).then(array => {
|
||||
return array[1].lastID;
|
||||
});
|
||||
} else {
|
||||
return results[0].id;
|
||||
@ -506,7 +506,7 @@ function findOrCreateDBAlbum(transaction, album) {
|
||||
});
|
||||
}
|
||||
|
||||
function findOrUpdateDBAsset(transaction, asset) {
|
||||
const findOrUpdateDBAsset = async (transaction, asset) => {
|
||||
if (!asset.album || !asset.album.id) {
|
||||
let error = "Asset being processed without an album";
|
||||
setStatus(error, "warn");
|
||||
@ -515,37 +515,38 @@ function findOrUpdateDBAsset(transaction, asset) {
|
||||
|
||||
asset.albumId = asset.album.id;
|
||||
|
||||
return photoDB.sequelize.query(
|
||||
const results = await photoDB.sequelize.query(
|
||||
"SELECT id,DATETIME(scanned) AS scanned,size,DATETIME(modified) AS modified " +
|
||||
"FROM photos " +
|
||||
"WHERE albumId=:albumId AND filename=:filename", {
|
||||
replacements: asset,
|
||||
type: photoDB.sequelize.QueryTypes.SELECT
|
||||
}).then(function(results) {
|
||||
if (results.length == 0) {
|
||||
return photoDB.sequelize.query("INSERT INTO photos " +
|
||||
"(albumId,filename,name,size) VALUES(:albumId,:filename,:name,:size)", {
|
||||
replacements: asset,
|
||||
transaction: transaction
|
||||
}).spread(function(results, metadata) {
|
||||
asset.id = metadata.lastID;
|
||||
});
|
||||
}
|
||||
|
||||
asset.id = results[0].id;
|
||||
asset.scanned = new Date(results[0].scanned);
|
||||
asset.modified = new Date(results[0].modified);
|
||||
|
||||
/* If the size on disk changed, update the size entry in the DB. This shouldn't happen in
|
||||
* production unless someone modifies the file, then re-stamps the modified time */
|
||||
if (asset.size != results[0].size) {
|
||||
setStatus("File was modified with time-restamp (HASH regeneration will be queued): " + asset.filename);
|
||||
delete asset.scanned;
|
||||
delete asset.modified;
|
||||
}
|
||||
}).then(function() {
|
||||
return asset;
|
||||
});
|
||||
|
||||
if (results.length == 0) {
|
||||
return await photoDB.sequelize.query("INSERT INTO photos " +
|
||||
"(albumId,filename,name,size) VALUES(:albumId,:filename,:name,:size)", {
|
||||
replacements: asset,
|
||||
transaction: transaction
|
||||
}).then(array => {
|
||||
asset.id = array[1].lastID;
|
||||
return asset;
|
||||
});
|
||||
}
|
||||
|
||||
asset.id = results[0].id;
|
||||
asset.scanned = new Date(results[0].scanned);
|
||||
asset.modified = new Date(results[0].modified);
|
||||
|
||||
/* If the size on disk changed, update the size entry in the DB. This shouldn't happen in
|
||||
* production unless someone modifies the file, then re-stamps the modified time */
|
||||
if (asset.size != results[0].size) {
|
||||
setStatus("File was modified with time-restamp (HASH regeneration will be queued): " + asset.filename);
|
||||
delete asset.scanned;
|
||||
delete asset.modified;
|
||||
}
|
||||
|
||||
return asset;
|
||||
}
|
||||
|
||||
function computeHash(filepath) {
|
||||
|
127
test/detect.py
127
test/detect.py
@ -1,127 +0,0 @@
|
||||
import sys
|
||||
import json
|
||||
import piexif
|
||||
from PIL import Image
|
||||
from deepface import DeepFace
|
||||
from retinaface import RetinaFace
|
||||
import numpy as np
|
||||
|
||||
class NpEncoder(json.JSONEncoder):
|
||||
def default(self, obj):
|
||||
if isinstance(obj, np.integer):
|
||||
return int(obj)
|
||||
if isinstance(obj, np.floating):
|
||||
return float(obj)
|
||||
if isinstance(obj, np.ndarray):
|
||||
return obj.tolist()
|
||||
|
||||
models = ["VGG-Face", "Facenet", "Facenet512", "OpenFace", "DeepFace", "DeepID", "ArcFace", "Dlib", "SFace"]
|
||||
|
||||
def extract_faces(img_path, threshold=0.9, model = None, align = True, allow_upscaling = True):
|
||||
img = RetinaFace.get_image(img_path)
|
||||
faces = RetinaFace.detect_faces(img_path = img, threshold = threshold, model = model, allow_upscaling = allow_upscaling)
|
||||
#faces = DeepFace.detectFace(img_path = img, target_size = (224, 224), detector_backend = 'retinaface')
|
||||
|
||||
# Re-implementation of 'extract_faces' with the addition of keeping a
|
||||
# copy of the face image for caching on disk
|
||||
if type(faces) == dict:
|
||||
for key in faces:
|
||||
print(key)
|
||||
identity = faces[key]
|
||||
facial_area = identity["facial_area"]
|
||||
|
||||
if align == True:
|
||||
width = facial_area[2] - facial_area[0]
|
||||
height = facial_area[3] - facial_area[1]
|
||||
x = facial_area[0] + width * 0.5
|
||||
y = facial_area[1] + height * 0.5
|
||||
|
||||
# Make thumbnail a square crop
|
||||
if width > height:
|
||||
height = width
|
||||
else:
|
||||
width = height
|
||||
|
||||
landmarks = identity["landmarks"]
|
||||
left_eye = landmarks["left_eye"]
|
||||
right_eye = landmarks["right_eye"]
|
||||
nose = landmarks["nose"]
|
||||
|
||||
# translate the landmarks to be centered on array
|
||||
left_eye[0] -= x
|
||||
left_eye[1] -= y
|
||||
right_eye[0] -= x
|
||||
right_eye[1] -= y
|
||||
nose[0] -= x
|
||||
nose[1] -= y
|
||||
|
||||
width *= 1.25
|
||||
height *= 1.25
|
||||
|
||||
left = round(x - width * 0.5)
|
||||
right = round(left + width)
|
||||
top = round(y - height * 0.5)
|
||||
bottom = round(top + height)
|
||||
|
||||
facial_img = img[top: bottom, left: right]
|
||||
|
||||
# Eye order is reversed as the routine does them backwards
|
||||
aligned = RetinaFace.postprocess.alignment_procedure(facial_img, right_eye, left_eye, nose)
|
||||
|
||||
image = Image.fromarray(aligned)
|
||||
image = image.resize(size = (224,224), resample = Image.LANCZOS)
|
||||
resized = np.asarray(image)
|
||||
else:
|
||||
facial_img = img[
|
||||
facial_area[1]: facial_area[3],
|
||||
facial_area[0]: facial_area[2]
|
||||
]
|
||||
resized = facial_img
|
||||
|
||||
identity['vector'] = DeepFace.represent(img_path = resized, model_name = 'ArcFace', detector_backend = 'opencv', enforce_detection = False)
|
||||
|
||||
identity['image'] = resized[:, :, ::-1]
|
||||
|
||||
return faces
|
||||
|
||||
#face verification
|
||||
img1_path = sys.argv[1]
|
||||
|
||||
faces = extract_faces(img1_path)
|
||||
for key in faces:
|
||||
face = faces[key]
|
||||
print(f'Face {key}...')
|
||||
image = Image.fromarray(face['image'])
|
||||
|
||||
#face['analysis'] = DeepFace.analyze(img_path = img, actions = ['age', 'gender', 'race', 'emotion'], enforce_detection = False)
|
||||
#face['analysis'] = DeepFace.analyze(img, actions = ['emotion'])
|
||||
|
||||
# TODO: Add additional meta-data allowing back referencing to original
|
||||
# photo
|
||||
face['version'] = 1 # version 1 doesn't add much...
|
||||
|
||||
data = {k: face[k] for k in set(list(face.keys())) - set(['image'])}
|
||||
json_str = json.dumps(data, ensure_ascii=False, indent=2, cls=NpEncoder)
|
||||
|
||||
with open(f'{key}.json', 'w', encoding = 'utf-8') as f:
|
||||
f.write(json_str)
|
||||
|
||||
# Encode this data into the JPG as Exif
|
||||
exif_ifd = {piexif.ExifIFD.UserComment: json_str.encode()}
|
||||
exif_dict = {"0th": {}, "Exif": exif_ifd, "1st": {},
|
||||
"thumbnail": None, "GPS": {}}
|
||||
image.save(f'{key}.jpg', exif = piexif.dump(exif_dict))
|
||||
|
||||
#df = DeepFace.find(img, db_path = '/db')
|
||||
#print(df.head())
|
||||
|
||||
#img2_path = sys.argv[2]
|
||||
#print("image 1: ", img1_path);
|
||||
#print("image 2: ", img2_path);
|
||||
#result = DeepFace.verify(img1_path = img1_path, img2_path = img2_path, #model_name = models[1])
|
||||
#print("result: ", result)
|
||||
|
||||
#face recognition
|
||||
#df = DeepFace.find(img_path = img1_path, db_path = "./db/deepface", model_name = models[1])
|
||||
|
||||
#print("df: ", df)
|
Loading…
x
Reference in New Issue
Block a user