int MicroDisplayControler::FreeRunningFocusing(MicroDisplayInit& mdi) { std::cout << "开始对焦,按1放大,按2缩小,按3退出。\r\n"; //采集mdi.MaxPics行图像到缓存 if (MicroDisplayControler::StartGrabbing(mdi) < 0) { MD_ErrorMessageWait(mdi.fg); return -1; } cv::Mat OriginalImage; int resize = mdi.width; frameindex_t lastPicNr = 0; cv::namedWindow("SHOW", CV_WINDOW_AUTOSIZE); int pos = 0; int realPos = 0; const int maxpos = 80; cv::createTrackbar("OFF SET / %", "SHOW", &pos, maxpos); do{ lastPicNr = Fg_getLastPicNumberBlockingEx(mdi.fg, lastPicNr + 1, mdi.nCamPort, 100, mdi.pMem0); if (lastPicNr < 0){ int status = 0; MD_ErrorMessageWait(mdi.fg); Fg_stopAcquireEx(mdi.fg, mdi.nCamPort, mdi.pMem0, 0); Fg_FreeMemEx(mdi.fg, mdi.pMem0); Fg_FreeGrabber(mdi.fg); CloseDisplay(mdi.nId); return 0; } unsigned char *bytePtr = (unsigned char*)Fg_getImagePtrEx(mdi.fg, lastPicNr, 0, mdi.pMem0); //if (mdi.nId != -1) // ::DrawBuffer(mdi.nId, Fg_getImagePtrEx(mdi.fg, lastPicNr, 0, mdi.pMem0), (int)lastPicNr, ""); if (mdi.colorType == mdi.GRAY) OriginalImage = cv::Mat(mdi.height, mdi.width, CV_8U, bytePtr); else OriginalImage = cv::Mat(mdi.height, mdi.width, CV_8UC3, bytePtr); if (resize != mdi.width) cv::resize(OriginalImage, OriginalImage, cv::Size(resize, mdi.height)); realPos = resize / 100 * pos; cv::Mat ROI = OriginalImage(cv::Rect(realPos, 0, resize - realPos, mdi.height)); cv::imshow("SHOW", ROI); char t = cv::waitKey(20); if (t == '3') break; if (t == '2' && resize > 800) resize /= 2; if (t == '1' && resize < mdi.width * 3) resize *= 2; } while (true); MicroDisplayControler::EndGrabbing(mdi); cv::destroyAllWindows(); //功能主循环END return 0; }
void MVCAM::GetFrame(cv::Mat& img) { tSdkFrameHead sFrameInfo; CameraHandle hCamera = (CameraHandle)m_hCamera; BYTE* pbyBuffer; CameraSdkStatus status; if (CameraGetImageBuffer(hCamera, &sFrameInfo, &pbyBuffer, 1000) == CAMERA_STATUS_SUCCESS) { //将获得的原始数据转换成RGB格式的数据,同时经过ISP模块,对图像进行降噪,边沿提升,颜色校正等处理。 //我公司大部分型号的相机,原始数据都是Bayer格式的 status = CameraImageProcess(hCamera, pbyBuffer, m_pFrameBuffer, &sFrameInfo);//连续模式 if (status == CAMERA_STATUS_SUCCESS) { ////使用IplImage ////调用SDK封装好的显示接口来显示图像,您也可以将m_pFrameBuffer中的RGB数据通过其他方式显示,比如directX,OpengGL,等方式。 //CameraImageOverlay(hCamera, m_pFrameBuffer, &sFrameInfo); //IplImage *iplImage = NULL; //if (iplImage) //{ // cvReleaseImageHeader(&iplImage); //} //iplImage = cvCreateImageHeader(cvSize(sFrameInfo.iWidth, sFrameInfo.iHeight), IPL_DEPTH_8U, sFrameInfo.uiMediaType == CAMERA_MEDIA_TYPE_MONO8 ? 1 : 3); //cvSetData(iplImage, m_pFrameBuffer, sFrameInfo.iWidth*(sFrameInfo.uiMediaType == CAMERA_MEDIA_TYPE_MONO8 ? 1 : 3)); //cv::Mat img(iplImage); //cv::imshow("123", img); //if (iplImage) //{ // cvReleaseImageHeader(&iplImage); //} //直接用MAT cv::Mat OriginalImage; if (ColorType == CV_8U) OriginalImage = cv::Mat(sFrameInfo.iHeight, sFrameInfo.iWidth, CV_8U, m_pFrameBuffer); else OriginalImage = cv::Mat(sFrameInfo.iHeight, sFrameInfo.iWidth, CV_8UC3, m_pFrameBuffer); //去白边 img = OriginalImage(cv::Rect(2, 2, sFrameInfo.iWidth - 4, sFrameInfo.iHeight - 4)).clone(); } //在成功调用CameraGetImageBuffer后,必须调用CameraReleaseImageBuffer来释放获得的buffer。 //否则再次调用CameraGetImageBuffer时,程序将被挂起,直到其他线程中调用CameraReleaseImageBuffer来释放了buffer CameraReleaseImageBuffer(hCamera, pbyBuffer); } }
void MainWindow::processVideoAndUpdateQUI() { ProcUpdateElapsedTime.start(); CaptureCamera.read(OriginalImageMat); if (OriginalImageMat.empty()) return; cv::resize(OriginalImageMat, ResizedImageMat, cv::Size(352,220), 0, 0, cv::INTER_CUBIC); //resizing the image to fit in the UI cv::flip(ResizedImageMat, ResizedImageMat, 1); //eliminating the mirror effect //Add the picture to the processer int move = Processer->getMove(ResizedImageMat); //std::cout << "move: " << move <<std::endl; //Move the car if(prevmove == 0) { myRoad->moveCar(move); } prevmove = move; cv::cvtColor(ResizedImageMat, ResizedImageMat, CV_BGR2RGB); //converting the image to RGB cv::rectangle(ResizedImageMat,cv::Rect(0, 0, 100, 130),cv::Scalar(200)); cv::rectangle(ResizedImageMat,cv::Rect(250,0,102,130),cv::Scalar(200)); QImage OriginalImage((uchar*)ResizedImageMat.data, ResizedImageMat.cols, ResizedImageMat.rows, ResizedImageMat.step, QImage::Format_RGB888); //Creating the QImage for the label NetworkSendImage = OriginalImage; NetworkSendImage = NetworkSendImage.scaledToHeight(165); ui->MyVideoLabel->setPixmap(QPixmap::fromImage(OriginalImage)); //Sending number of lives and distance embedded in the image NetworkSendImage.setText("lives", QString::number(lives)); NetworkSendImage.setText("distance", QString::number(distance)); n->sendData(NetworkSendImage); //sending the image over the network if(lives < 0 && PartnerIpAddr.toString()!=MyIpAddr.toString()) { lose(); } // std::cerr << "processVideoAndUpdateQUI() elapsed time: " << ProcUpdateElapsedTime.elapsed() << " msec" << std::endl << std::endl; }
//returns a pointer to a vector of int's, where, for every detected costumer, inserts his place in the costumers vector, to the returned vector std::vector<int>* FaceDetection::Detect(const std::string& ImageSent, const Customers& customers) { std::vector<int> *custsDetected= new std::vector<int>; cv::CascadeClassifier face_cascade("/usr/share/opencv/haarcascades/haarcascade_frontalface_alt.xml"); std::string image= "customers/" + ImageSent; cv::Mat OriginalImage = cv::imread(image, CV_LOAD_IMAGE_GRAYSCALE); std::vector<cv::Rect> faces; face_cascade.detectMultiScale(OriginalImage, faces, 1.1, 2, 0|CV_HAAR_SCALE_IMAGE); for (int i= 0; i < customers.GetSize(); i++){ std::string custName= (customers.GetCustomerAt(i))->getName(); std::string picPath= "faces/" + custName + "/" + custName + ".tiff"; cv::Mat custFace= cv::imread(picPath, CV_LOAD_IMAGE_GRAYSCALE); int size= faces.size(); for (int j= 0; j < size; j++){ cv::Mat faceFound= OriginalImage(faces[j]); if (custFace.size() == faceFound.size()) { if (cv::countNonZero(custFace != faceFound) == 0) custsDetected->push_back(i); } } } return custsDetected; }