clean up main.cpp
This commit is contained in:
		
							parent
							
								
									43de93eb3a
								
							
						
					
					
						commit
						7d0760cc9f
					
				| 
						 | 
				
			
			@ -9,6 +9,6 @@ find_package( glm REQUIRED )
 | 
			
		|||
find_package( FreeGLUT REQUIRED )
 | 
			
		||||
include_directories( ${OpenCV_INCLUDE_DIRS} )
 | 
			
		||||
include_directories( ${PROJECT_SOURCE_DIR}/src )
 | 
			
		||||
add_executable( fc2d src/main.cpp src/graphics.cpp src/modelpart.cpp )
 | 
			
		||||
add_executable( fc2d src/main.cpp src/graphics.cpp src/modelpart.cpp src/cv.cpp )
 | 
			
		||||
target_link_libraries( fc2d ${OpenCV_LIBS} -lOpenGL -lglut -lGLEW )
 | 
			
		||||
set( CMAKE_BUILD_TYPE Debug )
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
							
								
								
									
										83
									
								
								src/cv.cpp
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										83
									
								
								src/cv.cpp
									
									
									
									
									
										Normal file
									
								
							| 
						 | 
				
			
			@ -0,0 +1,83 @@
 | 
			
		|||
#include <opencv2/opencv.hpp>
 | 
			
		||||
#include <opencv2/face.hpp>
 | 
			
		||||
 | 
			
		||||
#include <graphics.hpp>
 | 
			
		||||
 | 
			
		||||
cv::Ptr<cv::face::Facemark> facemark;
 | 
			
		||||
cv::CascadeClassifier faceDetector;
 | 
			
		||||
cv::VideoCapture vid;
 | 
			
		||||
cv::Mat frame, gray, small;
 | 
			
		||||
 | 
			
		||||
void initCV() {
 | 
			
		||||
	//TODO: switch to DNN face detection
 | 
			
		||||
	faceDetector = cv::CascadeClassifier ("haarcascade_frontalface_alt2.xml");
 | 
			
		||||
 | 
			
		||||
	facemark = cv::face::FacemarkLBF::create();
 | 
			
		||||
	facemark->loadModel ("lbfmodel.yaml");
 | 
			
		||||
 | 
			
		||||
	vid = cv::VideoCapture (0);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
//process image and send controls to graphics
 | 
			
		||||
void cvFrame() {
 | 
			
		||||
	vid.read(frame);
 | 
			
		||||
 | 
			
		||||
	cv::cvtColor (frame, gray, cv::COLOR_BGR2GRAY);
 | 
			
		||||
	//downsample image for face detection, works too slow on full res
 | 
			
		||||
	cv::pyrDown (gray, small);
 | 
			
		||||
	cv::pyrDown (small, small);
 | 
			
		||||
 | 
			
		||||
	std::vector<cv::Rect> faces;
 | 
			
		||||
	faceDetector.detectMultiScale(small, faces);
 | 
			
		||||
 | 
			
		||||
	//get biggest face
 | 
			
		||||
	int biggestFace = 0;
 | 
			
		||||
	int biggestArea = 0;
 | 
			
		||||
	for (int i = 0; i < faces.size(); i++) {
 | 
			
		||||
		//convert face region to full res, because we perform facemark on full res
 | 
			
		||||
		faces[i] = cv::Rect (faces[i].x * 4, faces[i].y * 4, faces[i].width * 4, faces[i].height * 4);
 | 
			
		||||
 | 
			
		||||
		int iArea = faces[i].area();
 | 
			
		||||
		if (iArea > biggestArea) {
 | 
			
		||||
			biggestFace = i;
 | 
			
		||||
			biggestArea = iArea;
 | 
			
		||||
		}
 | 
			
		||||
 | 
			
		||||
		cv::rectangle (frame, faces[i], cv::Scalar (255, 255, 0));
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	std::vector<std::vector<cv::Point2f>> landmarks;
 | 
			
		||||
 | 
			
		||||
	if (facemark->fit (frame, faces, landmarks)) {
 | 
			
		||||
		//for (int i = 0; i < landmarks[biggestFace].size(); i++) {
 | 
			
		||||
		//	cv::circle (frame, landmarks[biggestFace][i], 2, cv::Scalar (255, 255, 255));
 | 
			
		||||
		//}
 | 
			
		||||
		cv::circle(frame, cv::Point2f(
 | 
			
		||||
			(landmarks[biggestFace][2].x + landmarks[biggestFace][14].x) / 2,
 | 
			
		||||
			(landmarks[biggestFace][2].y + landmarks[biggestFace][14].y) / 2
 | 
			
		||||
					), 6, cv::Scalar(0, 0, 255));
 | 
			
		||||
		cv::circle (frame, landmarks[biggestFace][30], 6, cv::Scalar (0, 255, 255));
 | 
			
		||||
		cv::circle (frame, landmarks[biggestFace][66], 3, cv::Scalar (0, 255, 0));
 | 
			
		||||
		cv::circle (frame, landmarks[biggestFace][62], 3, cv::Scalar (0, 255, 0));
 | 
			
		||||
 | 
			
		||||
		//send control information to graphics
 | 
			
		||||
		float faceSize = landmarks[biggestFace][14].x - landmarks[biggestFace][2].x;
 | 
			
		||||
		updateModel(glm::vec2(
 | 
			
		||||
			(landmarks[biggestFace][2].x + landmarks[biggestFace][14].x) / 2
 | 
			
		||||
				* 2 / (float)frame.cols - 1,
 | 
			
		||||
			(landmarks[biggestFace][2].y + landmarks[biggestFace][14].y) / 2
 | 
			
		||||
				* 2 / (float)frame.rows - 1
 | 
			
		||||
			),
 | 
			
		||||
			glm::vec2(
 | 
			
		||||
			landmarks[biggestFace][30].x * 2 / (float)frame.cols - 1,
 | 
			
		||||
			landmarks[biggestFace][30].y * 2 / (float)frame.rows - 1
 | 
			
		||||
			),
 | 
			
		||||
			faceSize * 6 / (float)frame.cols,
 | 
			
		||||
			(landmarks[biggestFace][66].y - landmarks[biggestFace][62].y) / faceSize > 0.04f);
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
void cvShowFrame() {
 | 
			
		||||
	cv::imshow("Video Input", frame);
 | 
			
		||||
	cv::waitKey(32);
 | 
			
		||||
}
 | 
			
		||||
							
								
								
									
										10
									
								
								src/cv.hpp
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										10
									
								
								src/cv.hpp
									
									
									
									
									
										Normal file
									
								
							| 
						 | 
				
			
			@ -0,0 +1,10 @@
 | 
			
		|||
#ifndef CV_HPP
 | 
			
		||||
#define CV_HPP
 | 
			
		||||
 | 
			
		||||
void initCV();
 | 
			
		||||
 | 
			
		||||
void cvFrame();
 | 
			
		||||
 | 
			
		||||
void cvShowFrame();
 | 
			
		||||
 | 
			
		||||
#endif
 | 
			
		||||
							
								
								
									
										76
									
								
								src/main.cpp
									
									
									
									
									
								
							
							
						
						
									
										76
									
								
								src/main.cpp
									
									
									
									
									
								
							| 
						 | 
				
			
			@ -1,81 +1,17 @@
 | 
			
		|||
#include <opencv2/opencv.hpp>
 | 
			
		||||
#include <opencv2/face.hpp>
 | 
			
		||||
 | 
			
		||||
#include <graphics.hpp>
 | 
			
		||||
#include <cv.hpp>
 | 
			
		||||
 | 
			
		||||
#include <iostream>
 | 
			
		||||
 | 
			
		||||
int main () {
 | 
			
		||||
	initGraphics();
 | 
			
		||||
	initCV();
 | 
			
		||||
 | 
			
		||||
	//TODO: switch to DNN face detection
 | 
			
		||||
	cv::CascadeClassifier faceDetector ("haarcascade_frontalface_alt2.xml");
 | 
			
		||||
	while (true) {
 | 
			
		||||
		cvFrame();
 | 
			
		||||
 | 
			
		||||
	cv::Ptr<cv::face::Facemark> facemark = cv::face::FacemarkLBF::create();
 | 
			
		||||
	facemark->loadModel ("lbfmodel.yaml");
 | 
			
		||||
		graphicsFrame();
 | 
			
		||||
 | 
			
		||||
	cv::VideoCapture vid (0);
 | 
			
		||||
	
 | 
			
		||||
	cv::Mat frame, gray, small;
 | 
			
		||||
 | 
			
		||||
	while (vid.read(frame)) {
 | 
			
		||||
		cv::cvtColor (frame, gray, cv::COLOR_BGR2GRAY);
 | 
			
		||||
		//downsample image for face detection, works too slow on full res
 | 
			
		||||
		cv::pyrDown (gray, small);
 | 
			
		||||
		cv::pyrDown (small, small);
 | 
			
		||||
 | 
			
		||||
		std::vector<cv::Rect> faces;
 | 
			
		||||
		faceDetector.detectMultiScale(small, faces);
 | 
			
		||||
 | 
			
		||||
		//get biggest face
 | 
			
		||||
		int biggestFace = 0;
 | 
			
		||||
		int biggestArea = 0;
 | 
			
		||||
		for (int i = 0; i < faces.size(); i++) {
 | 
			
		||||
			//convert face region to full res, because we perform facemark on full res
 | 
			
		||||
			faces[i] = cv::Rect (faces[i].x * 4, faces[i].y * 4, faces[i].width * 4, faces[i].height * 4);
 | 
			
		||||
 | 
			
		||||
			int iArea = faces[i].area();
 | 
			
		||||
			if (iArea > biggestArea) {
 | 
			
		||||
				biggestFace = i;
 | 
			
		||||
				biggestArea = iArea;
 | 
			
		||||
			}
 | 
			
		||||
 | 
			
		||||
			cv::rectangle (frame, faces[i], cv::Scalar (255, 255, 0));
 | 
			
		||||
		}
 | 
			
		||||
 | 
			
		||||
		std::vector<std::vector<cv::Point2f>> landmarks;
 | 
			
		||||
 | 
			
		||||
		if (facemark->fit (frame, faces, landmarks)) {
 | 
			
		||||
			//for (int i = 0; i < landmarks[biggestFace].size(); i++) {
 | 
			
		||||
			//	cv::circle (frame, landmarks[biggestFace][i], 2, cv::Scalar (255, 255, 255));
 | 
			
		||||
			//}
 | 
			
		||||
			cv::circle(frame, cv::Point2f(
 | 
			
		||||
				(landmarks[biggestFace][2].x + landmarks[biggestFace][14].x) / 2,
 | 
			
		||||
				(landmarks[biggestFace][2].y + landmarks[biggestFace][14].y) / 2
 | 
			
		||||
						), 6, cv::Scalar(0, 0, 255));
 | 
			
		||||
			cv::circle (frame, landmarks[biggestFace][30], 6, cv::Scalar (0, 255, 255));
 | 
			
		||||
			cv::circle (frame, landmarks[biggestFace][66], 3, cv::Scalar (0, 255, 0));
 | 
			
		||||
			cv::circle (frame, landmarks[biggestFace][62], 3, cv::Scalar (0, 255, 0));
 | 
			
		||||
 | 
			
		||||
			//send control information to graphics
 | 
			
		||||
			float faceSize = landmarks[biggestFace][14].x - landmarks[biggestFace][2].x;
 | 
			
		||||
			updateModel(glm::vec2(
 | 
			
		||||
				(landmarks[biggestFace][2].x + landmarks[biggestFace][14].x) / 2
 | 
			
		||||
					* 2 / (float)frame.cols - 1,
 | 
			
		||||
				(landmarks[biggestFace][2].y + landmarks[biggestFace][14].y) / 2
 | 
			
		||||
					* 2 / (float)frame.rows - 1
 | 
			
		||||
				),
 | 
			
		||||
				glm::vec2(
 | 
			
		||||
				landmarks[biggestFace][30].x * 2 / (float)frame.cols - 1,
 | 
			
		||||
				landmarks[biggestFace][30].y * 2 / (float)frame.rows - 1
 | 
			
		||||
				),
 | 
			
		||||
				faceSize * 6 / (float)frame.cols,
 | 
			
		||||
				(landmarks[biggestFace][66].y - landmarks[biggestFace][62].y) / faceSize > 0.04f);
 | 
			
		||||
		}
 | 
			
		||||
 | 
			
		||||
		graphicsFrame ();
 | 
			
		||||
 | 
			
		||||
		cv::imshow ("Video Input", frame);
 | 
			
		||||
		cv::waitKey (33);
 | 
			
		||||
		cvShowFrame();
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
		Loading…
	
		Reference in a new issue