Align photos before descriptor extraction
Signed-off-by: James Ketrenos <james_gitlab@ketrenos.com>
This commit is contained in:
parent
4f7b677185
commit
0799e214f5
@ -43,6 +43,53 @@ let photoDB = null;
|
||||
|
||||
console.log("Loading pictures out of: " + picturesPath);
|
||||
|
||||
function alignFromLandmarks(image, landmarks) {
|
||||
const faceMargin = 0.3,
|
||||
width = 256, height = 256,
|
||||
dY = landmarks._positions[45]._y - landmarks._positions[36]._y,
|
||||
dX = landmarks._positions[45]._x - landmarks._positions[36]._x,
|
||||
mid = {
|
||||
x: landmarks._positions[36]._x + 0.5 * dX,
|
||||
y: landmarks._positions[36]._y + 0.5 * dY
|
||||
},
|
||||
rotation = -Math.atan2(dY, dX),
|
||||
cosRotation = Math.cos(rotation),
|
||||
sinRotation = Math.sin(rotation),
|
||||
eyeDistance = Math.sqrt(dY * dY + dX * dX),
|
||||
scale = width * (1.0 - 2. * faceMargin) / eyeDistance,
|
||||
canvas = createCanvas(width, height),
|
||||
ctx = canvas.getContext("2d");
|
||||
|
||||
const prime = {
|
||||
x: mid.x * cosRotation - mid.y * sinRotation,
|
||||
y: mid.y * cosRotation + mid.x * sinRotation
|
||||
};
|
||||
|
||||
mid.x = prime.x;
|
||||
mid.y = prime.y;
|
||||
|
||||
ctx.translate(
|
||||
0.5 * width - mid.x * scale,
|
||||
0.5 * height - (height * (0.5 - faceMargin)) - mid.y * scale);
|
||||
ctx.rotate(rotation);
|
||||
ctx.scale(scale, scale);
|
||||
ctx.drawImage(image, 0, 0);
|
||||
/*
|
||||
ctx.strokeStyle = "red";
|
||||
ctx.strokeWidth = "1";
|
||||
ctx.beginPath();
|
||||
landmarks._positions.forEach((point, index) => {
|
||||
if (index == 0) {
|
||||
ctx.moveTo(point._x, point._y);
|
||||
} else {
|
||||
ctx.lineTo(point._x, point._y);
|
||||
}
|
||||
});
|
||||
ctx.stroke();
|
||||
*/
|
||||
return canvas;
|
||||
}
|
||||
|
||||
process.stdout.write("Loading DB.");
|
||||
require("./db/photos").then(function(db) {
|
||||
process.stdout.write("done\n");
|
||||
@ -82,13 +129,15 @@ require("./db/photos").then(function(db) {
|
||||
/* Remove any existing face data for this photo */
|
||||
return photoDB.sequelize.query("SELECT id FROM faces WHERE photoId=:id", {
|
||||
replacements: photo,
|
||||
type: photoDB.sequelize.QueryTypes.SELECT,
|
||||
raw: true
|
||||
}).then((faces) => {
|
||||
/* For each face-id, remove any face-data files, and then remove all the entries
|
||||
* from the DB */
|
||||
return Promise.map(faces, (face) => {
|
||||
return Promise.map([ "-data.json", "-original.png" ], (suffix) => {
|
||||
const id = face.id,
|
||||
dataPath = faceData + "/" + (id % 100) + "/" + id + suffix;
|
||||
dataPath = faceData + (id % 100) + "/" + id + suffix;
|
||||
return exists(dataPath).then((result) => {
|
||||
if (result) {
|
||||
console.log(`...removing ${dataPath}`);
|
||||
@ -108,14 +157,17 @@ require("./db/photos").then(function(db) {
|
||||
new faceapi.SsdMobilenetv1Options({
|
||||
minConfidence: 0.8
|
||||
})
|
||||
).withFaceLandmarks().withFaceDescriptors();
|
||||
|
||||
).withFaceLandmarks();
|
||||
|
||||
if (detections.length > 0) {
|
||||
console.log(`...${detections.length} faces identified in ${photoPath}.`);
|
||||
}
|
||||
|
||||
return Promise.map(detections, (face) => {
|
||||
const detection = face.detection;
|
||||
return Promise.map(detections, async (face) => {
|
||||
const detection = face.detection,
|
||||
canvas = alignFromLandmarks(image, face.landmarks);
|
||||
face.descriptor = await faceapi.computeFaceDescriptor(canvas);
|
||||
|
||||
const width = detection._box._width,
|
||||
height = detection._box._height,
|
||||
replacements = {
|
||||
@ -137,24 +189,14 @@ require("./db/photos").then(function(db) {
|
||||
return mkdir(path).then(() => {
|
||||
const dataPath = `${path}/${id}-data.json`, data = [];
|
||||
console.log(`...writing descriptor data to ${dataPath}...`);
|
||||
/* Confert from sparse object to dense array */
|
||||
for (let i = 0; i < 128; i++) {
|
||||
data.push(face.descriptor[i]);
|
||||
}
|
||||
fs.writeFileSync(dataPath, JSON.stringify(data));
|
||||
}).then(() => {
|
||||
const canvas = createCanvas(200, 200),
|
||||
target = `${path}/${id}-original.png`,
|
||||
ctx = canvas.getContext('2d'),
|
||||
box = face.detection._box,
|
||||
aspect = box._width / box._height,
|
||||
dx = (aspect > 1.0) ? 200 : (200 * aspect),
|
||||
dy = (aspect < 1.0) ? 200 : (200 / aspect);
|
||||
ctx.fillStyle = "rgba(0, 0, 0, 0)";
|
||||
ctx.fillRect(0, 0, 200, 200);
|
||||
ctx.drawImage(image, box._x, box._y, box._width, box._height,
|
||||
Math.floor((200 - dx) * 0.5),
|
||||
Math.floor((200 - dy) * 0.5), dx, dy);
|
||||
console.log(`...writing face crop to ${target}.`);
|
||||
const target = `${path}/${id}-original.png`;
|
||||
console.log(`...writing aligned face crop to ${target}.`);
|
||||
fs.writeFileSync(target, canvas.toBuffer("image/png", {
|
||||
quality: 0.95,
|
||||
chromaSubsampling: false
|
||||
|
@ -24,6 +24,53 @@ require("./console-line.js"); /* Monkey-patch console.log with line numbers */
|
||||
const picturesPath = config.get("picturesPath").replace(/\/$/, "") + "/",
|
||||
faceData = picturesPath + "face-data/";
|
||||
|
||||
function alignFromLandmarks(image, landmarks) {
|
||||
const faceMargin = 0.3,
|
||||
width = 256, height = 256,
|
||||
dY = landmarks._positions[45]._y - landmarks._positions[36]._y,
|
||||
dX = landmarks._positions[45]._x - landmarks._positions[36]._x,
|
||||
mid = {
|
||||
x: landmarks._positions[36]._x + 0.5 * dX,
|
||||
y: landmarks._positions[36]._y + 0.5 * dY
|
||||
},
|
||||
rotation = -Math.atan2(dY, dX),
|
||||
cosRotation = Math.cos(rotation),
|
||||
sinRotation = Math.sin(rotation),
|
||||
eyeDistance = Math.sqrt(dY * dY + dX * dX),
|
||||
scale = width * (1.0 - 2. * faceMargin) / eyeDistance,
|
||||
canvas = createCanvas(width, height),
|
||||
ctx = canvas.getContext("2d");
|
||||
|
||||
const prime = {
|
||||
x: mid.x * cosRotation - mid.y * sinRotation,
|
||||
y: mid.y * cosRotation + mid.x * sinRotation
|
||||
};
|
||||
|
||||
mid.x = prime.x;
|
||||
mid.y = prime.y;
|
||||
|
||||
ctx.translate(
|
||||
0.5 * width - mid.x * scale,
|
||||
0.5 * height - (height * (0.5 - faceMargin)) - mid.y * scale);
|
||||
ctx.rotate(rotation);
|
||||
ctx.scale(scale, scale);
|
||||
ctx.drawImage(image, 0, 0);
|
||||
/*
|
||||
ctx.strokeStyle = "red";
|
||||
ctx.strokeWidth = "1";
|
||||
ctx.beginPath();
|
||||
landmarks._positions.forEach((point, index) => {
|
||||
if (index == 0) {
|
||||
ctx.moveTo(point._x, point._y);
|
||||
} else {
|
||||
ctx.lineTo(point._x, point._y);
|
||||
}
|
||||
});
|
||||
ctx.stroke();
|
||||
*/
|
||||
return canvas;
|
||||
}
|
||||
|
||||
process.stdout.write("Loading DB.");
|
||||
require("./db/photos").then(function(db) {
|
||||
process.stdout.write("done\n");
|
||||
@ -65,7 +112,6 @@ require("./db/photos").then(function(db) {
|
||||
console.log(`Scanning ${args.length} faces.`);
|
||||
return Promise.map(args, (arg) => {
|
||||
const file = arg;
|
||||
|
||||
let id = parseInt(arg);
|
||||
|
||||
let loader;
|
||||
@ -94,6 +140,20 @@ require("./db/photos").then(function(db) {
|
||||
|
||||
const file = photo.path + photo.filename;
|
||||
return canvas.loadImage(picturesPath + file).then(async (image) => {
|
||||
const detectors = await faceapi.detectAllFaces(image,
|
||||
new faceapi.SsdMobilenetv1Options({
|
||||
minConfidence: 0.8
|
||||
})
|
||||
).withFaceLandmarks();
|
||||
|
||||
detectors.forEach(async (detector) => {
|
||||
const canvas = alignFromLandmarks(image, detector.landmarks);
|
||||
const descriptor = await faceapi.computeFaceDescriptor(canvas);
|
||||
const data = [];
|
||||
/* Confert from sparse object to dense array */
|
||||
for (let i = 0; i < 128; i++) {
|
||||
data.push(descriptor[i]);
|
||||
}
|
||||
const detectors = [ {
|
||||
detection: {
|
||||
_box: {
|
||||
@ -112,17 +172,21 @@ require("./db/photos").then(function(db) {
|
||||
/* This is a file */
|
||||
console.log(`Loading ${file}...`);
|
||||
id = undefined;
|
||||
|
||||
loader = canvas.loadImage(picturesPath + file).then(async (image) => {
|
||||
const detectors = await faceapi.detectAllFaces(image,
|
||||
new faceapi.SsdMobilenetv1Options({
|
||||
minConfidence: 0.8
|
||||
})
|
||||
).withFaceLandmarks().withFaceDescriptors();
|
||||
detectors.forEach((detector) => {
|
||||
).withFaceLandmarks();
|
||||
|
||||
detectors.forEach(async (detector) => {
|
||||
const canvas = alignFromLandmarks(image, detector.landmarks);
|
||||
const descriptor = await faceapi.computeFaceDescriptor(canvas);
|
||||
const data = [];
|
||||
/* Confert from sparse object to dense array */
|
||||
for (let i = 0; i < 128; i++) {
|
||||
data.push(detector.descriptor[i]);
|
||||
data.push(descriptor[i]);
|
||||
}
|
||||
detector.descriptor = data;
|
||||
});
|
||||
|
Loading…
x
Reference in New Issue
Block a user