基于OpenCV读取摄像头实现单个人脸验证MFC程序

与上一篇博客类似,这篇博客介绍使用OpenCV实现的MFC程序,可以实现单个人脸的验证,并在图像和界面给出识别结果。效果图如下:

置信度一栏可以填写判定的阈值,默认为70。打开摄像头才能进行验证或拍照,拍照之前可以清除之前拍摄的训练图片,可以拍摄多张用于识别。其中mfc中的图像显示需要用到CvImage.cpp和CvImage.h两个文件,该代码在比较新的OpenCV内已经没有了,所以可以直接用我代码里的。

有人说代码的检测率不高,其实可以归结为两方面的原因,第一人脸检测率不高,这个可以通过嵌套检测嘴角、眼睛等来降低,或者背景、光照固定的话可以通过图像差分来解决;第二是识别方法本身的问题,如果想提高识别率,可以添加多张不同姿态、光照下的人脸作为训练的样本,如果有时间的话可以在采集图像时给出一个人脸框,引导用户对齐人脸进行采集,三星手机解除锁屏就有这么一个功能。

下面贴一下主要的代码:

VideoMFCDlg.cpp

// VideoMFCDlg.cpp : implementation file
//

#include "stdafx.h"
#include "VideoMFC.h"
#include "VideoMFCDlg.h"
#include "afxdialogex.h"

#ifdef _DEBUG
#define new DEBUG_NEW
#endif

CvCapture* capture;
CRect rect;
CDC *pDC;
HDC hDC;
CWnd *pwnd;
CvVideoWriter* writer = 0;
IplImage *resizeRes;//存放检测到的人脸
IplImage* faceGray;//存放检测到的人脸 灰度图像
bool bool_cameOpen = false;//全局变量 标志摄像头是否打开
bool bool_picNum = false;//全局变量 标志训练图片是否为空
bool bool_detec_reco = false;//全局变量
double dConfidence = 0;//置信度
int predictedLabel = 100000;

CvMemStorage* storage = 0;
CvHaarClassifierCascade* cascade = 0;
CvHaarClassifierCascade* nested_cascade = 0;
int use_nested_cascade = 0;
const char* cascade_name =
  "../data/haarcascades/haarcascade_frontalface_alt.xml";
const char* nested_cascade_name =
  "../data/haarcascade_eye_tree_eyeglasses.xml";
double scale = 1;
int num_components = 9;
double facethreshold = 9.0;
//cv::Ptr<cv::FaceRecognizer> model = cv::createFisherFaceRecognizer();
cv::Ptr<cv::FaceRecognizer> model = cv::createLBPHFaceRecognizer();//LBP的这个方法在单个人脸验证方面效果最好
//cv::Ptr<cv::FaceRecognizer> model = cv::createEigenFaceRecognizer();
vector<Mat> images;
vector<int> labels;

IplImage *frame, *frame_copy = 0;
IplImage *image = 0;
const char* scale_opt = "--scale="; // 分类器选项指示符号
int scale_opt_len = (int)strlen(scale_opt);
const char* cascade_opt = "--cascade=";
int cascade_opt_len = (int)strlen(cascade_opt);
const char* nested_cascade_opt = "--nested-cascade";
int nested_cascade_opt_len = (int)strlen(nested_cascade_opt);
int i;
const char* input_name = 0;

// CAboutDlg dialog used for App About
CString strConfidence = "70";
CEdit* pEdtConfidence;
CString strTip = "";
CEdit* pTip;

class CAboutDlg : public CDialogEx
{
public:
 CAboutDlg();

// Dialog Data
 enum { IDD = IDD_ABOUTBOX };

 protected:
 virtual void DoDataExchange(CDataExchange* pDX);  // DDX/DDV support

// Implementation
protected:
 DECLARE_MESSAGE_MAP()
};

CAboutDlg::CAboutDlg() : CDialogEx(CAboutDlg::IDD)
{
}

void CAboutDlg::DoDataExchange(CDataExchange* pDX)
{
 CDialogEx::DoDataExchange(pDX);
}

BEGIN_MESSAGE_MAP(CAboutDlg, CDialogEx)
END_MESSAGE_MAP()

// CVideoMFCDlg dialog

CVideoMFCDlg::CVideoMFCDlg(CWnd* pParent /*=NULL*/)
 : CDialogEx(CVideoMFCDlg::IDD, pParent)
{
 m_hIcon = AfxGetApp()->LoadIcon(IDR_MAINFRAME);
}

void CVideoMFCDlg::DoDataExchange(CDataExchange* pDX)
{
 CDialogEx::DoDataExchange(pDX);
}

BEGIN_MESSAGE_MAP(CVideoMFCDlg, CDialogEx)
 ON_WM_SYSCOMMAND()
 ON_WM_PAINT()
 ON_WM_QUERYDRAGICON()
 ON_BN_CLICKED(IDC_BUTTON1, &CVideoMFCDlg::OnBnClickedButton1)
 ON_WM_TIMER()
 ON_BN_CLICKED(IDC_BUTTON2, &CVideoMFCDlg::OnBnClickedButton2)
 ON_WM_CLOSE()
 ON_EN_CHANGE(IDC_EdtConfidence, &CVideoMFCDlg::OnEnChangeEdtconfidence)
 ON_BN_CLICKED(IDC_Photograph, &CVideoMFCDlg::OnBnClickedPhotograph)
 ON_BN_CLICKED(IDC_Recognize, &CVideoMFCDlg::OnBnClickedRecognize)
 ON_BN_CLICKED(IDC_ClearPictures, &CVideoMFCDlg::OnBnClickedClearpictures)
END_MESSAGE_MAP()

// CVideoMFCDlg message handlers
BOOL CVideoMFCDlg::OnDestroy()
{
 cvReleaseImage( &resizeRes );
 cvReleaseImage( &faceGray );
 return TRUE;
}
BOOL CVideoMFCDlg::OnInitDialog()
{
 CDialogEx::OnInitDialog();

 // Add "About..." menu item to system menu.

 // IDM_ABOUTBOX must be in the system command range.
 ASSERT((IDM_ABOUTBOX & 0xFFF0) == IDM_ABOUTBOX);
 ASSERT(IDM_ABOUTBOX < 0xF000);

 CMenu* pSysMenu = GetSystemMenu(FALSE);
 if (pSysMenu != NULL)
 {
 BOOL bNameValid;
 CString strAboutMenu;
 bNameValid = strAboutMenu.LoadString(IDS_ABOUTBOX);
 ASSERT(bNameValid);
 if (!strAboutMenu.IsEmpty())
 {
  pSysMenu->AppendMenu(MF_SEPARATOR);
  pSysMenu->AppendMenu(MF_STRING, IDM_ABOUTBOX, strAboutMenu);
 }
 }

 // Set the icon for this dialog. The framework does this automatically
 // when the application's main window is not a dialog
 SetIcon(m_hIcon, TRUE);  // Set big icon
 SetIcon(m_hIcon, FALSE); // Set small icon

 // TODO: Add extra initialization here
 pwnd = GetDlgItem(IDC_ShowImage);
 //pwnd->MoveWindow(35,30,352,288);
  pDC =pwnd->GetDC();
 //pDC =GetDC();
  hDC= pDC->GetSafeHdc();
 pwnd->GetClientRect(&rect);

 GetDlgItem(IDC_BUTTON2)->EnableWindow(false);
 GetDlgItem(IDC_Photograph)->EnableWindow(false);
 GetDlgItem(IDC_Recognize)->EnableWindow(false);
 pEdtConfidence = (CEdit*) GetDlgItem(IDC_EdtConfidence);
 pTip = (CEdit*) GetDlgItem(IDC_Tip);
 pEdtConfidence->SetWindowText("70");
 pEdtConfidence->GetWindowText(strConfidence);
 pTip->SetWindowText( strTip );
 if(read_img_number()>0)
 bool_picNum = true;
 else
 bool_picNum = false;
 return TRUE; // return TRUE unless you set the focus to a control
}

void CVideoMFCDlg::OnSysCommand(UINT nID, LPARAM lParam)
{
 if ((nID & 0xFFF0) == IDM_ABOUTBOX)
 {
 CAboutDlg dlgAbout;
 dlgAbout.DoModal();
 }
 else
 {
 CDialogEx::OnSysCommand(nID, lParam);
 }
}

// If you add a minimize button to your dialog, you will need the code below
// to draw the icon. For MFC applications using the document/view model,
// this is automatically done for you by the framework.

void CVideoMFCDlg::OnPaint()
{
 if (IsIconic())
 {
 CPaintDC dc(this); // device context for painting

 SendMessage(WM_ICONERASEBKGND, reinterpret_cast<WPARAM>(dc.GetSafeHdc()), 0);

 // Center icon in client rectangle
 int cxIcon = GetSystemMetrics(SM_CXICON);
 int cyIcon = GetSystemMetrics(SM_CYICON);
 CRect rect;
 GetClientRect(&rect);
 int x = (rect.Width() - cxIcon + 1) / 2;
 int y = (rect.Height() - cyIcon + 1) / 2;

 // Draw the icon
 dc.DrawIcon(x, y, m_hIcon);
 }
 else
 {
 CDialogEx::OnPaint();
 }
}

// The system calls this function to obtain the cursor to display while the user drags
// the minimized window.
HCURSOR CVideoMFCDlg::OnQueryDragIcon()
{
 return static_cast<HCURSOR>(m_hIcon);
}

/*****************************************打开摄像头*******************************************/
void CVideoMFCDlg::OnBnClickedButton1()
{
 // TODO: Add your control notification handler code here
 //AfxMessageBox("OK");
 if(!capture)
 {
 capture = cvCaptureFromCAM(0);
 //AfxMessageBox("OK");
 }

 if (!capture)
 {
 AfxMessageBox("无法打开摄像头");
 return;
 }
 //writer=cvCreateVideoWriter("MyVideo.avi",CV_FOURCC('x','v','I','D'),25,cvSize(640,480));
 // 测试
 IplImage* m_Frame;
 m_Frame=cvQueryFrame(capture);
 CvvImage m_CvvImage;
 m_CvvImage.CopyOf(m_Frame,1);
 if (true)
 {
 m_CvvImage.DrawToHDC(hDC, &rect);
 //cvWaitKey(10);
 }

 // 设置计时器,每10ms触发一次事件
 SetTimer(1,10,NULL);

 cascade = (CvHaarClassifierCascade*)cvLoad( cascade_name, 0, 0, 0 ); // 加载分类器
  if( !cascade )
  {
    MessageBox("无法加载分类器文件,请确认!");
  }
  storage = cvCreateMemStorage(0); // 创建内存存储器 

  //if( !input_name || (isdigit(input_name[0]) && input_name[1] == '\0') ) // 判断输入参数是视频序号,还是文件
    capture = cvCaptureFromCAM( !input_name ? 0 : input_name[0] - '0' ); // 创建视频读取结构
 /*
  else if( input_name )
  {
    image = cvLoadImage( input_name, 1 ); // 如果是图像则加载
    if( !image )
 {
      capture = cvCaptureFromAVI( input_name ); // 不是图像则尝试视频读取
  cvSetCaptureProperty(capture, CV_CAP_PROP_FRAME_WIDTH, 640);
  cvSetCaptureProperty(capture, CV_CAP_PROP_FRAME_HEIGHT, 480); ////////////////////////////////////////////////////////////////////
 }
  }*/
  //else
  //  image = cvLoadImage( "lena.bmp", 1 ); //都没有则调用程序所在目录的lena.jpg图片
  //cvNamedWindow( "result", 1 );

 GetDlgItem(IDC_BUTTON1)->EnableWindow(false);
 GetDlgItem(IDC_BUTTON2)->EnableWindow(true);
 GetDlgItem(IDC_Photograph)->EnableWindow(true);
 GetDlgItem(IDC_Recognize)->EnableWindow(true);
 bool_detec_reco = false;
 bool_cameOpen = true;
}

/********************************************设置定时器*********************************************/
void CVideoMFCDlg::OnTimer(UINT_PTR nIDEvent)
{
 //显示摄像头
 IplImage* m_Frame;
 m_Frame=cvQueryFrame(capture);
 //AllocConsole();
 //判断是检测还是识别人脸
 if(bool_cameOpen)
 {
 if(!bool_detec_reco)//false只为识别
 {
  detect_and_draw(m_Frame);//检测人脸
  //_cprintf("%s\n", "jiance");
 }
 else if(bool_picNum)//false代表训练图片为空
  recog_and_draw(m_Frame);//检测和识别人脸
 }
 CvvImage m_CvvImage;
 m_CvvImage.CopyOf(m_Frame,1);
 if (true)
 {
 m_CvvImage.DrawToHDC(hDC, &rect);
 //cvWriteFrame(writer,m_Frame); //将帧图像通过writer写入文件
 //cvWaitKey(10);
 }
 if(bool_detec_reco)
 {
 if(predictedLabel <= dConfidence)
 {
  CString tipPhoto = strTip + "\r\n验证成功!!";
  pTip->SetWindowText( tipPhoto );
 }
 else
 {
  CString tipPhoto = strTip + "\r\n验证失败!!";
  pTip->SetWindowText( tipPhoto );
 }
 }

 CDialogEx::OnTimer(nIDEvent);
}

//关闭摄像头按钮
void CVideoMFCDlg::OnBnClickedButton2()
{
 // TODO: Add your control notification handler code here
 cvReleaseVideoWriter(&writer);
 cvReleaseCapture(&capture);
 CDC MemDC;
 CBitmap m_Bitmap1;
 m_Bitmap1.LoadBitmap(IDB_BITMAP1);
 MemDC.CreateCompatibleDC(NULL);
 MemDC.SelectObject(&m_Bitmap1);
 pDC->StretchBlt(rect.left,rect.top,rect.Width(),rect.Height(),&MemDC,0,0,48,48,SRCCOPY);
 GetDlgItem(IDC_BUTTON1)->EnableWindow(true);
 GetDlgItem(IDC_BUTTON2)->EnableWindow(false);
 GetDlgItem(IDC_Photograph)->EnableWindow(false);
 GetDlgItem(IDC_Recognize)->EnableWindow(false);
 bool_cameOpen = false;
}

//关闭窗体
void CVideoMFCDlg::OnClose()
{
 // TODO: Add your message handler code here and/or call default
 cvReleaseCapture(&capture);
 CDialogEx::OnClose();
}

void CVideoMFCDlg::OnEnChangeEdtconfidence()
{
}

//拍照按钮
void CVideoMFCDlg::OnBnClickedPhotograph()
{
 // TODO: 在此添加控件通知处理程序代码
 if (!faceGray)
 {
 pTip->GetWindowText(strTip);
 CString tipPhoto = strTip + "\r\n拍照失败,请将摄像头对准人脸";
 pTip->SetWindowText( tipPhoto );
 return;
 }
 Mat img(faceGray,0);
 stringstream ss;
 ss << (read_img_number()+1);
 string faceImgName = "..//einfacedata//trainingdata//"+ss.str()+".jpg";
 imwrite(faceImgName,img);

 //pTip->GetWindowText(strTip);
 CString tipPhoto = strTip + "\r\n拍照成功!已存为" + ("/einfacedata/trainingdata/"+ss.str()+".jpg").c_str();
 pTip->SetWindowText( tipPhoto );
 //MessageBox("OK");
}

//开始验证按钮
void CVideoMFCDlg::OnBnClickedRecognize()
{
 // TODO: 在此添加控件通知处理程序代码
 images.clear();
 labels.clear();
 pEdtConfidence->GetWindowText(strConfidence);

 try
 {
 dConfidence = atoi((const char *)strConfidence);
 }
 catch(cv::Exception &e)
 {
 MessageBox("置信度请输入整数!");
 return;
 }

 model->set("threshold", dConfidence);
 //string output_folder;
 //output_folder = string("../einfacedata");

 //读取你的CSV文件路径
 //string fn_csv = string("../einfacedata/at.txt");

 //两个容器来存放图像数据和对应的标签
 /*
 try
 {
 read_csv(fn_csv, images, labels);
 }
 catch(cv::Exception &e)
 {
 cerr<<"Error opening file "<<fn_csv<<". Reason: "<<e.msg<<endl;
 exit(1);
 }
 */
 if(!read_img(images, labels))
 {
 AfxMessageBox("Error in reading images!");
 //MessageBox("Error in reading images!");
 images.clear();
 labels.clear();
 return;
 }

 //如果没有读到足够的图片,就退出
 if(images.size() < 1)
 {
 MessageBox("This demo needs at least 1 images to work!");
 return;
 }
 //training
 model->train(images, labels);

 bool_detec_reco = true;
 bool_picNum = true;

}

//清除训练图片
void CVideoMFCDlg::OnBnClickedClearpictures()
{
 // TODO: 在此添加控件通知处理程序代码
 if(delete_img())
 {
 //pTip->GetWindowText(strTip);
 CString tipPhoto = strTip + "\r\n删除成功!";
 pTip->SetWindowText( tipPhoto );
 bool_detec_reco = false;
 bool_picNum = false;
 }
 else
 {
 //pTip->GetWindowText(strTip);
 CString tipPhoto = strTip + "\r\n删除失败!";
 pTip->SetWindowText( tipPhoto );
 }
}

detect_recog.cpp和上一篇博客类似

#include "stdafx.h"
#include "cv.h"
#include "highgui.h"
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <assert.h>
#include <math.h>
#include <float.h>
#include <limits.h>
#include <time.h>
#include <ctype.h>
#include "detect_recog.h"
#include <opencv2\contrib\contrib.hpp>
#include <opencv2\core\core.hpp>
#include <opencv2\highgui\highgui.hpp>
#include <iostream>
#include <fstream>
#include <sstream>
#include <stdio.h>
#include <io.h>
#include <direct.h>
#include <sys/types.h>
#include <conio.h>
using namespace std;
using namespace cv;
void detect_and_draw( IplImage* img ) // 只是检测,并圈出人脸
{
  static CvScalar colors[] =
  {
    {{0,0,255}},
    {{0,128,255}},
    {{0,255,255}},
    {{0,255,0}},
    {{255,128,0}},
    {{255,255,0}},
    {{255,0,0}},
    {{255,0,255}}
  };
  IplImage *gray, *small_img;
  int i, j;
  gray = cvCreateImage( cvSize(img->width,img->height), 8, 1 );
  small_img = cvCreateImage( cvSize( cvRound (img->width/scale),
             cvRound (img->height/scale)), 8, 1 );
  cvCvtColor( img, gray, CV_BGR2GRAY ); // 彩色RGB图像转为灰度图像
  cvResize( gray, small_img, CV_INTER_LINEAR );
  cvEqualizeHist( small_img, small_img ); // 直方图均衡化
  cvClearMemStorage( storage );
  if( cascade )
  {
    double t = (double)cvGetTickCount();
    CvSeq* faces = cvHaarDetectObjects( small_img, cascade, storage,
                      1.1, 2, 0
                      //|CV_HAAR_FIND_BIGGEST_OBJECT
                      |CV_HAAR_DO_ROUGH_SEARCH
                      //|CV_HAAR_DO_CANNY_PRUNING
                      //|CV_HAAR_SCALE_IMAGE
                      ,
                      cvSize(30, 30) );
    t = (double)cvGetTickCount() - t; // 统计检测使用时间
    //printf( "detection time = %gms\n", t/((double)cvGetTickFrequency()*1000.) );
    for( i = 0; i < (faces ? faces->total : 0); i++ )
    {
      CvRect* r = (CvRect*)cvGetSeqElem( faces, i ); // 将faces数据从CvSeq转为CvRect
      CvMat small_img_roi;
      CvSeq* nested_objects;
      CvPoint center,recPt1,recPt2;
      CvScalar color = colors[i%8]; // 使用不同颜色绘制各个face,共八种色
      int radius;
      center.x = cvRound((r->x + r->width*0.5)*scale); // 找出faces中心
      center.y = cvRound((r->y + r->height*0.5)*scale);
  recPt1.x = cvRound((r->x)*scale);
  recPt1.y = cvRound((r->y)*scale);
  recPt2.x = cvRound((r->x + r->width)*scale);
  recPt2.y = cvRound((r->y + r->height)*scale);
      radius = cvRound((r->width + r->height)*0.25*scale); 

  cvGetSubRect( small_img, &small_img_roi, *r );

  IplImage *result;
  CvRect roi;
  roi = *r;
  result = cvCreateImage( cvSize(r->width, r->height), img->depth, img->nChannels );
  cvSetImageROI(img,roi);
  // 创建子图像
  cvCopy(img,result);
  cvResetImageROI(img);

  //IplImage *resizeRes;
  CvSize dst_cvsize;
  dst_cvsize.width=(int)(100);
  dst_cvsize.height=(int)(100);
  resizeRes=cvCreateImage(dst_cvsize,result->depth,result->nChannels);
  cvResize(result,resizeRes,CV_INTER_NN);
  faceGray = cvCreateImage(cvGetSize(resizeRes), IPL_DEPTH_8U, 1);//创建目标图像
  cvCvtColor(resizeRes,faceGray,CV_BGR2GRAY);//cvCvtColor(src,des,CV_BGR2GRAY)
      cvShowImage( "resize", resizeRes );
  cvRectangle(img,recPt1,recPt2,color,1, 8,0);
  //rectangle(img,recPt1,recPt2,color,1,8,0);
  //cvCircle( img, center, radius, color, 3, 8, 0 ); // 从中心位置画圆,圈出脸部区域
      if( !nested_cascade )
        continue;
      nested_objects = cvHaarDetectObjects( &small_img_roi, nested_cascade, storage,
                    1.1, 2, 0
                    //|CV_HAAR_FIND_BIGGEST_OBJECT
                    //|CV_HAAR_DO_ROUGH_SEARCH
                    //|CV_HAAR_DO_CANNY_PRUNING
                    //|CV_HAAR_SCALE_IMAGE
                    ,
                    cvSize(0, 0) );
      for( j = 0; j < (nested_objects ? nested_objects->total : 0); j++ )
      {
        CvRect* nr = (CvRect*)cvGetSeqElem( nested_objects, j );
        center.x = cvRound((r->x + nr->x + nr->width*0.5)*scale);
        center.y = cvRound((r->y + nr->y + nr->height*0.5)*scale);
        radius = cvRound((nr->width + nr->height)*0.25*scale);
        cvCircle( img, center, radius, color, 3, 8, 0 );
      }
    }
  }
  //cvShowImage( "result", img );
  cvReleaseImage( &gray );
  cvReleaseImage( &small_img );
}
//检测并识别人脸,并在每帧图片上写入结果
void recog_and_draw( IplImage* img )
{
  static CvScalar colors[] =
  {
    {{0,0,255}},
    {{0,128,255}},
    {{0,255,255}},
    {{0,255,0}},
    {{255,128,0}},
    {{255,255,0}},
    {{255,0,0}},
    {{255,0,255}}
  };
  IplImage *gray, *small_img;
  int i, j;
  gray = cvCreateImage( cvSize(img->width,img->height), 8, 1 );
  small_img = cvCreateImage( cvSize( cvRound (img->width/scale),
             cvRound (img->height/scale)), 8, 1 );
  cvCvtColor( img, gray, CV_BGR2GRAY ); // 彩色RGB图像转为灰度图像
  cvResize( gray, small_img, CV_INTER_LINEAR );
  cvEqualizeHist( small_img, small_img ); // 直方图均衡化
  cvClearMemStorage( storage );
  if( cascade )
  {
    double t = (double)cvGetTickCount();
    CvSeq* faces = cvHaarDetectObjects( small_img, cascade, storage,
                      1.1, 2, 0
                      //|CV_HAAR_FIND_BIGGEST_OBJECT
                      //|CV_HAAR_DO_ROUGH_SEARCH
                      |CV_HAAR_DO_CANNY_PRUNING
                      //|CV_HAAR_SCALE_IMAGE
                      ,
                      cvSize(30, 30) );
    t = (double)cvGetTickCount() - t; // 统计检测使用时间
    //printf( "detection time = %gms\n", t/((double)cvGetTickFrequency()*1000.) );
    for( i = 0; i < (faces ? faces->total : 0); i++ )
    {
      CvRect* r = (CvRect*)cvGetSeqElem( faces, i ); // 将faces数据从CvSeq转为CvRect
      CvMat small_img_roi;
      CvSeq* nested_objects;
      CvPoint center,recPt1,recPt2;
      CvScalar color = colors[i%8]; // 使用不同颜色绘制各个face,共八种色
      int radius;
      center.x = cvRound((r->x + r->width*0.5)*scale); // 找出faces中心
      center.y = cvRound((r->y + r->height*0.5)*scale);
  recPt1.x = cvRound((r->x)*scale);
  recPt1.y = cvRound((r->y)*scale);
  recPt2.x = cvRound((r->x + r->width)*scale);
  recPt2.y = cvRound((r->y + r->height)*scale);
      radius = cvRound((r->width + r->height)*0.25*scale); 

  cvGetSubRect( small_img, &small_img_roi, *r );

  IplImage *result;
  CvRect roi;
  roi = *r;
  result = cvCreateImage( cvSize(r->width, r->height), img->depth, img->nChannels );
  cvSetImageROI(img,roi);
  // 创建子图像
  cvCopy(img,result);
  cvResetImageROI(img);

  //IplImage *resizeRes;
  CvSize dst_cvsize;
  dst_cvsize.width=(int)(100);
  dst_cvsize.height=(int)(100);
  resizeRes=cvCreateImage(dst_cvsize,result->depth,result->nChannels);
  cvResize(result,resizeRes,CV_INTER_NN);

  faceGray = cvCreateImage(cvGetSize(resizeRes), IPL_DEPTH_8U, 1);//创建目标图像
  cvCvtColor(resizeRes,faceGray,CV_BGR2GRAY);//cvCvtColor(src,des,CV_BGR2GRAY)
      cvShowImage( "resize", resizeRes );
  cvRectangle(img,recPt1,recPt2,color,3, 8,0);
  //cvCircle( img, center, radius, color, 3, 8, 0 ); // 从中心位置画圆,圈出脸部区域

  Mat test = faceGray;
  //images[images.size() - 1] = test;
  model->train(images, labels);
  //predictedLabel = model->predict(test);
  double predicted_confidence = 0.0;
  model->predict(test,predictedLabel,predicted_confidence);
  stringstream strStream;
  strStream<<predicted_confidence;
  string ss = strStream.str();
  cvText(img, ss.c_str(), r->x+r->width*0.5, r->y);
  if(predicted_confidence <= dConfidence)
  cvText(img, "Result:YES", 0, 30);
  else
  cvText(img, "Result:NO", 0, 30);
  //cout << "predict:"<<model->predict(test) << endl;
  //cout << "predict:"<< predictedLabel << "\nconfidence:" << predicted_confidence << endl;

      if( !nested_cascade )
        continue;

      nested_objects = cvHaarDetectObjects( &small_img_roi, nested_cascade, storage,
                    1.1, 2, 0
                    //|CV_HAAR_FIND_BIGGEST_OBJECT
                    //|CV_HAAR_DO_ROUGH_SEARCH
                    //|CV_HAAR_DO_CANNY_PRUNING
                    //|CV_HAAR_SCALE_IMAGE
                    ,
                    cvSize(0, 0) );
      for( j = 0; j < (nested_objects ? nested_objects->total : 0); j++ )
      {
        CvRect* nr = (CvRect*)cvGetSeqElem( nested_objects, j );
        center.x = cvRound((r->x + nr->x + nr->width*0.5)*scale);
        center.y = cvRound((r->y + nr->y + nr->height*0.5)*scale);
        radius = cvRound((nr->width + nr->height)*0.25*scale);
        cvCircle( img, center, radius, color, 3, 8, 0 );
      }
    }
  }
  //cvShowImage( "result", img );
  cvReleaseImage( &gray );
  cvReleaseImage( &small_img );
}
void cvText(IplImage* img, const char* text, int x, int y)
{
  CvFont font;
  double hscale = 1.0;
  double vscale = 1.0;
  int linewidth = 2;
  cvInitFont(&font,CV_FONT_HERSHEY_SIMPLEX | CV_FONT_ITALIC,hscale,vscale,0,linewidth);
  CvScalar textColor =cvScalar(0,255,255);
  CvPoint textPos =cvPoint(x, y);
  cvPutText(img, text, textPos, &font,textColor);
}

Mat norm_0_255(cv::InputArray _src)
{
 Mat src = _src.getMat();
 Mat dst;

 switch(src.channels())
 {
 case 1:
 cv::normalize(_src, dst, 0, 255, cv::NORM_MINMAX, CV_8UC1);
 break;
 case 3:
 cv::normalize(_src, dst, 0, 255, cv::NORM_MINMAX, CV_8UC3);
 break;
 default:
 src.copyTo(dst);
 break;
 }

 return dst;
}

void read_csv(const string &filename, vector<Mat> &images, vector<int> &labels, char separator)
{
 std::ifstream file(filename.c_str(), ifstream::in);
 if(!file)
 {
 string error_message = "No valid input file was given.";
 CV_Error(CV_StsBadArg, error_message);
 }

 string line, path, classlabel;
 while(getline(file, line))
 {
 stringstream liness(line);
 getline(liness, path, separator); //遇到分号就结束
 getline(liness, classlabel);   //继续从分号后面开始,遇到换行结束
 if(!path.empty() && !classlabel.empty())
 {
  images.push_back(imread(path, 0));
  labels.push_back(atoi(classlabel.c_str()));
 }
 }
}
//实现了从trainningdata 目录下直接读取jpg文件作为训练集
bool read_img(vector<Mat> &images, vector<int> &labels)
{
 long file;
  struct _finddata_t find;
 //AllocConsole();
 string path = "..//einfacedata//trainingdata/";
 char filepath[60];
  //_chdir("..//einfacedata//trainingdata");
  if((file=_findfirst("..//einfacedata//trainingdata/*.jpg", &find))==-1L)
 {
 AfxMessageBox("Cannot find the dir");
    return false;
  }
 int i = 0;
  images.push_back(imread(path+find.name, 0));
 labels.push_back(0);
  while(_findnext(file, &find)==0)
  {
 //_cprintf("%s\n", path+find.name);
 //_cprintf("%d\n", i++);
 images.push_back(imread(path+find.name, 0));
 labels.push_back(0);
  }
  _findclose(file);
 return true;
}
//实现了从trainningdata 目录下读取jpg文件数目
int read_img_number()
{
 long file;
 int i = 0;
  struct _finddata_t find;
 //AllocConsole();
 string path = "..//einfacedata//trainingdata/";
 char filepath[60];
  if((file=_findfirst("..//einfacedata//trainingdata/*.jpg", &find))==-1L)
    return i;
 i++;
  while(_findnext(file, &find)==0)
  {
 i++;
  }
  _findclose(file);
 return i;
}
bool delete_img()
{
 system( "del ..\\einfacedata\\trainingdata\\" );
 return true;
}

以上就是本文的全部内容,希望对大家的学习有所帮助,也希望大家多多支持我们。

(0)

相关推荐

  • Kears+Opencv实现简单人脸识别

    写在前面:这篇文章也是借鉴了一些前辈的代码和思路写的,代码有些也是复用了别人的. 先说下思路: 1.首先利用Opencv检测出人脸的区域 2.在成功的检测出人脸区域后,将识别出的人脸区域截取成图片,储存起来,用作后续的训练数据. 3.在获取到了足够多的数据后,搭建CNN网络进行训练. 5.训练完成后,将模型存储起来. 6.在利用Opencv实时读取视频流,将检测出人脸的区域变成图片放入模型中进行预测. 以上就是这个项目的一个基本的思路了. 1.利用Opencv检测人脸的代码如下,这个代码在Ope

  • OpenCV + MFC实现简单人脸识别

    用VS2010 + OpenCV 2.4.9 实现简单人脸识别 首先放效果图(为了防止辣眼睛,后期处理了下): 首先声明,我是在参考其他文章的基础上实现的. 切入正题: 1 设置控件 首先新建一个基于Dialog的MFC程序的工程,工程名为FaceDetect : 然后在IDD_FACEDETECT_DIALOG对话框中添加一个Picture 控件,ID命名为:IDC_PICTURE:添加一个Button控件,Caption命名为 "检测",ID命名为IDC_START,将原来自动生成

  • Opencv实现读取摄像头和视频数据

    实际上,按一定速度读取摄像头视频图像后,便可以对图像进行各种处理了. 那么获取主要用到的是VideoCapture类,一个demo如下: //如果有外接摄像头,则ID为0,内置为1,否则用0就可以表示内置摄像头 cv::VideoCapture cap(0); //判断摄像头是否打开 if(!cap.isOpened()) { return -1; } cv::Mat myframe; cv::Mat edges; bool stop = false; while(!stop) { //获取当前

  • python+opencv打开摄像头,保存视频、拍照功能的实现方法

    以下代码是保存视频 # coding:utf-8 import cv2 import sys reload(sys) sys.setdefaultencoding('utf8') cap = cv2.VideoCapture(0) cap.set(3,640) cap.set(4,480) cap.set(1, 10.0) #此处fourcc的在MAC上有效,如果视频保存为空,那么可以改一下这个参数试试, 也可以是-1 fourcc = cv2.cv.CV_FOURCC('m', 'p', '4

  • Python基于opencv调用摄像头获取个人图片的实现方法

    接触图像领域的应该对于opencv都不会感到陌生,这个应该算是功能十分强劲的一个算法库了,当然了,使用起来也是很方便的,之前使用Windows7的时候出现多该库难以安装成功的情况,现在这个问题就不存在了,需要安装包的话可以去我的资源中下载使用,使用pip安装方式十分地便捷. 今天主要是基于opencv模块来调用笔记本的内置摄像头,然后从视频流中获取到人脸的图像数据用于之后的人脸识别项目,也就是为了构建可用的数据集.整个实现过程并不复杂,具体如下: #!usr/bin/env python #en

  • OpenCV2从摄像头获取帧并写入视频文件的方法

    一段基于OpenCV2的代码. 作用是从摄像头获取帧并将帧写入指定的视频文件中. 需要注意的是,视频文件所在的路径需要存在,例如D:/images/1.avi.images这个目录需要存在.调用VideoWrite类对象的成员函数open时,编解码方式参数设置为-1,代码运行时会弹出对话框,手动选择编解码方式. #include<opencv2\highgui\highgui.hpp> #include<opencv2\imgproc\imgproc.hpp> #include&l

  • OpenCV外接USB摄像头的方法

    近期,需要利用OpenCV计算机视觉库读取视频并显示.保存.由于之前一直使用笔记本,此次台式机外接USB摄像头,出现了很大问题,总是显示内存问题,谷歌.百度大半天,总结各路大神建议,最终解决了问题,将问题和代码分享给大家,避免走弯路. 出现问题原因: 1. 摄像头初始化需要时间,进入循环前 waitKey(2000),否则会出现闪退:         2. 摄像头视频存在解码问题. //--------------------------------------[程序说明]-----------

  • opencv实现简单人脸识别

    对于opencv 它提供了许多已经练习好的模型可供使用,我们需要通过他们来进行人脸识别 参考了网上许多资料 假设你已经配好了开发环境 ,在我之前的博客中由开发环境的配置. 项目代码结构: dataSet : 存储训练用的图片,他由data_gen生成,当然也可以修改代码由其他方式生成 haarcascade_frontalface_alt.xml  . haarcascade_frontalface_default.xml: 用于人脸检测的haar分类器,网上普遍说第一个效果更好,第二个运行速度

  • python版opencv摄像头人脸实时检测方法

    OpenCV版本3.3.0,注意模型文件的路径要改成自己所安装的opencv的模型文件的路径,路径不对就会报错,一般在opencv-3.3.0/data/haarcascades 路径下 import numpy as np import cv2 face_cascade = cv2.CascadeClassifier('haarcascade_frontalface_default.xml') cap = cv2.VideoCapture(0) while True: ret,img = ca

  • 详解使用JavaCV/OpenCV抓取并存储摄像头图像

    本程序通过JFrame实时显示本机摄像头图像,并将图像存储到一个缓冲区,当用户用鼠标点击JFrame中任何区域时,显示抓取图像的简单动画,同时保存缓冲区的图像到磁盘文件中.点击JFrame关闭按钮可以退出程序. 实现: import java.awt.Graphics2D; import java.awt.event.ActionEvent; import java.awt.event.ActionListener; import java.awt.event.MouseAdapter; imp

随机推荐