facecam2d/src/cv.cpp

99 lines
2.9 KiB
C++
Raw Normal View History

2021-01-06 00:10:55 +00:00
#include <opencv2/opencv.hpp>
#include <opencv2/face.hpp>
#include <iostream>
2021-01-06 00:10:55 +00:00
#include <graphics.hpp>
#include <paths.hpp>
2021-01-06 00:10:55 +00:00
cv::Ptr<cv::face::Facemark> facemark;
cv::CascadeClassifier faceDetector;
cv::VideoCapture vid;
cv::Mat frame, gray, small;
void initCV() {
//TODO: switch to DNN face detection
faceDetector = cv::CascadeClassifier (resolvePath("cvdata/haarcascade_frontalface_alt2.xml"));
2021-01-06 00:10:55 +00:00
facemark = cv::face::FacemarkLBF::create();
facemark->loadModel (resolvePath("cvdata/lbfmodel.yaml"));
2021-01-06 00:10:55 +00:00
vid = cv::VideoCapture (0);
}
//process image and send controls to graphics
void cvFrame() {
vid.read(frame);
cv::cvtColor (frame, gray, cv::COLOR_BGR2GRAY);
//downsample image for face detection, works too slow on full res
cv::pyrDown (gray, small);
cv::pyrDown (small, small);
std::vector<cv::Rect> faces;
faceDetector.detectMultiScale(small, faces);
//get biggest face
int biggestFace = 0;
int biggestArea = 0;
for (int i = 0; i < faces.size(); i++) {
//convert face region to full res, because we perform facemark on full res
faces[i] = cv::Rect (faces[i].x * 4, faces[i].y * 4, faces[i].width * 4, faces[i].height * 4);
int iArea = faces[i].area();
if (iArea > biggestArea) {
biggestFace = i;
biggestArea = iArea;
}
cv::rectangle (frame, faces[i], cv::Scalar (255, 255, 0));
}
std::vector<std::vector<cv::Point2f>> landmarks;
if (facemark->fit (frame, faces, landmarks)) {
//for (int i = 0; i < landmarks[biggestFace].size(); i++) {
// cv::circle (frame, landmarks[biggestFace][i], 2, cv::Scalar (255, 255, 255));
//}
cv::circle(frame, cv::Point2f(
(landmarks[biggestFace][2].x + landmarks[biggestFace][14].x) / 2,
(landmarks[biggestFace][2].y + landmarks[biggestFace][14].y) / 2
), 6, cv::Scalar(0, 0, 255));
cv::circle (frame, landmarks[biggestFace][30], 6, cv::Scalar (0, 255, 255));
cv::circle (frame, landmarks[biggestFace][66], 3, cv::Scalar (0, 255, 0));
cv::circle (frame, landmarks[biggestFace][62], 3, cv::Scalar (0, 255, 0));
//send control information to graphics
float faceSize = landmarks[biggestFace][14].x - landmarks[biggestFace][2].x;
2021-01-10 07:13:32 +00:00
updateModel(
//head position
glm::vec2(
2021-01-06 00:10:55 +00:00
(landmarks[biggestFace][2].x + landmarks[biggestFace][14].x) / 2
* 2 / (float)frame.cols - 1,
(landmarks[biggestFace][2].y + landmarks[biggestFace][14].y) / 2
* 2 / (float)frame.rows - 1
),
2021-01-10 07:13:32 +00:00
//face position
2021-01-06 00:10:55 +00:00
glm::vec2(
landmarks[biggestFace][30].x * 2 / (float)frame.cols - 1,
landmarks[biggestFace][30].y * 2 / (float)frame.rows - 1
),
2021-01-10 07:13:32 +00:00
//rotation
atanf((float)(landmarks[biggestFace][14].y - landmarks[biggestFace][2].y) /
(float)(landmarks[biggestFace][2].x - landmarks[biggestFace][14].x)),
//scale
2021-01-06 00:10:55 +00:00
faceSize * 6 / (float)frame.cols,
2021-01-10 07:13:32 +00:00
//mouth open/closed state
2021-01-06 00:10:55 +00:00
(landmarks[biggestFace][66].y - landmarks[biggestFace][62].y) / faceSize > 0.04f);
}
}
void cvShowFrame() {
cv::imshow("Video Input", frame);
cv::waitKey(32);
}