void CMainFrame::on_dev_close_ccd() { if(!theApp.m_CCD.m_isinit) return; DS_RUNMODE ccd_status=theApp.m_CCD.m_runstatus; //if(ccd_status==RUNMODE_PAUSE) CameraStop(); CameraUnInit(); theApp.m_CCD.m_isinit=FALSE; theApp.m_CCD.m_runstatus=RUNMODE_STOP; }
int main(int argc, char* argv[]) { // initialise the Prosilica API if(!PvInitialize()) { tCamera Camera; memset(&Camera,0,sizeof(tCamera)); // wait for a camera to be plugged WaitForCamera(); // get a camera from the list if(CameraGet(&Camera)) { // setup the camera if(CameraSetup(&Camera)) { // strat streaming from the camera if(CameraStart(&Camera)) { printf("camera is ready now. Press q to quit or s to take a picture\n"); // wait for the user to quit or snap if(WaitForUserToQuitOrSnap()) { // snap now CameraSnap(&Camera); } // stop the streaming CameraStop(&Camera); } else printf("failed to start streaming\n"); // unsetup the camera CameraUnsetup(&Camera); } else printf("failed to setup the camera\n"); } else printf("failed to find a camera\n"); // uninitialise the API PvUnInitialize(); } else printf("failed to initialise the API\n"); return 0; }
int main(int argc, char* argv[]) { tPvErr errCode; // initialize the PvAPI if((errCode = PvInitialize()) != ePvErrSuccess) { printf("PvInitialize err: %u\n", errCode); } else { //IMPORTANT: Initialize camera structure. See tPvFrame in PvApi.h for more info. memset(&GCamera,0,sizeof(tCamera)); //Set function to handle ctrl+C SetConsoleCtrlHandler(CtrlCHandler, TRUE); printf("Press CTRL-C to terminate\n"); // wait for a camera to be plugged in WaitForCamera(); // get first camera found if(CameraGet()) { // open camera if(CameraSetup()) { // start camera streaming if(CameraStart()) { while(GCamera.Abort == false) CameraSnap(); // stop camera streaming CameraStop(); } // close camera CameraUnsetup(); } } // uninitialize PvAPI PvUnInitialize(); } return 0; }
void CDialog3::OnBnClickedExit() { // TODO: 在此添加控件通知处理程序代码 CameraStop(); //fp in dll CameraUnInit(); this->SetDlgItemTextA(IDC_PLAY,"&Play"); this->GetDlgItem(IDC_PLAY)->SetFocus(); this->GetDlgItem(IDC_SAVEFILE)->EnableWindow(FALSE); this->GetDlgItem(IDC_SETUP)->EnableWindow(FALSE); this->GetDlgItem(IDC_RESOLUTION)->EnableWindow(FALSE); this->GetDlgItem(IDC_SAVEIMG)->EnableWindow(FALSE); m_RunMode =RUNMODE_STOP; return; }
int main(int argc, char* argv[]) { // initialise the Prosilica API if(!PvInitialize()) { memset(&GCamera,0,sizeof(tCamera)); SetConsoleCtrlHandler(&CtrlCHandler, TRUE); // wait for a camera to be plugged WaitForCamera(); // get a camera from the list if(CameraGet()) { // setup the camera if(CameraSetup()) { // strat streaming from the camera if(CameraStart()) { printf("The camera is ready now. \n"); // snap now while(!GCamera.Abort) CameraSnap(); // stop the streaming CameraStop(); } else printf("failed to start streaming\n"); // unsetup the camera CameraUnsetup(); } else printf("failed to setup the camera\n"); } else printf("failed to find a camera\n"); // uninitialise the API PvUnInitialize(); } else printf("failed to initialise the API\n"); return 0; }
int main(int argc, char* argv[]) { tPvErr errCode; // initialize the PvAPI if((errCode = PvInitialize()) != ePvErrSuccess) printf("PvInitialize err: %u\n", errCode); else { //IMPORTANT: Initialize camera structure. See tPvFrame in PvApi.h for more info. memset(&GCamera,0,sizeof(tCamera)); // set the CTRL-C handler SetConsoleCtrlHandler(&CtrlCHandler, TRUE); printf("Waiting for camera discovery...\n"); // register camera plugged in callback if((errCode = PvLinkCallbackRegister(CameraEventCB,ePvLinkAdd,NULL)) != ePvErrSuccess) printf("PvLinkCallbackRegister err: %u\n", errCode); // register camera unplugged callback if((errCode = PvLinkCallbackRegister(CameraEventCB,ePvLinkRemove,NULL)) != ePvErrSuccess) printf("PvLinkCallbackRegister err: %u\n", errCode); // wait until ctrl+c break printf("Ctrl+C to break.\n"); WaitForEver(); CameraStop(); CameraUnsetup(); // If thread spawned (see HandleCameraPlugged), wait to finish if(GCamera.ThHandle) WaitThread(); if((errCode = PvLinkCallbackUnRegister(CameraEventCB,ePvLinkAdd)) != ePvErrSuccess) printf("PvLinkCallbackUnRegister err: %u\n", errCode); if((errCode = PvLinkCallbackUnRegister(CameraEventCB,ePvLinkRemove)) != ePvErrSuccess) printf("PvLinkCallbackUnRegister err: %u\n", errCode); // uninitialize the API PvUnInitialize(); } return 0; }
int main(int argc, char* argv[]) { tPvErr errCode; // initialize the PvAPI if((errCode = PvInitialize()) != ePvErrSuccess) printf("PvInitialize err: %u\n", errCode); else { //IMPORTANT: Initialize camera structure. See tPvFrame in PvApi.h for more info. memset(&GCamera,0,sizeof(tCamera)); // set the CTRL-C handler SetConsoleCtrlHandler(&CtrlCHandler, TRUE); printf("Waiting for camera discovery...\n"); // register camera plugged in callback if((errCode = PvLinkCallbackRegister(CameraLinkCallback,ePvLinkAdd,NULL)) != ePvErrSuccess) printf("PvLinkCallbackRegister err: %u\n", errCode); // register camera unplugged callback if((errCode = PvLinkCallbackRegister(CameraLinkCallback,ePvLinkRemove,NULL)) != ePvErrSuccess) printf("PvLinkCallbackRegister err: %u\n", errCode); // All camera setup, event setup, streaming, handled in ePvLinkAdd callback // wait until ctrl+c break or failure printf("***Ctrl+C to break***\n"); WaitForEver(); CameraStop(); EventUnsetup(); CameraUnsetup(); if((errCode = PvLinkCallbackUnRegister(CameraLinkCallback,ePvLinkAdd)) != ePvErrSuccess) printf("PvLinkCallbackUnRegister err: %u\n", errCode); if((errCode = PvLinkCallbackUnRegister(CameraLinkCallback,ePvLinkRemove)) != ePvErrSuccess) printf("PvLinkCallbackUnRegister err: %u\n", errCode); // uninitialize the API PvUnInitialize(); } return 0; }
void CDialog3::OnBnClickedPlay() { // TODO: 在此添加控件通知处理程序代码 if(m_RunMode == RUNMODE_PLAY) { CameraStop(); //fp in dll CameraUnInit(); this->SetDlgItemTextA(IDC_PLAY,"&Play"); this->GetDlgItem(IDC_PLAY)->SetFocus(); this->GetDlgItem(IDC_SAVEFILE)->EnableWindow(FALSE); this->GetDlgItem(IDC_SETUP)->EnableWindow(FALSE); this->GetDlgItem(IDC_RESOLUTION)->EnableWindow(FALSE); this->GetDlgItem(IDC_SAVEIMG)->EnableWindow(FALSE); m_RunMode =RUNMODE_STOP; return ; } else { BYTE CamNum = this->m_MultiCamList.GetCurSel();//取得组合框当前选中项的下标 if(this->m_MultiCamList.GetCount() == 0) { CamNum = 0; } if(R_ROI == m_Resolution) { CameraSetROI(m_HOff, m_VOff, m_Width, m_Height); //fp in dll ,to set the resolution } if(STATUS_OK != CameraInit(SnapThreadCallback, m_Resolution, m_pDlg->GetDlgItem(IDC_PICTURE)->m_hWnd, //NULL CamNum+1, m_pDlg)) { MessageBox("Init Camera failed","Error",MB_OK|MB_ICONSTOP); return ; } #ifdef ENABLE_READ_SN_DEBUG char SN[32]; CameraReadSN(SN, 32); CString sSN; sSN = SN; CString sUserSN = "WWW.D-IMAGE.NET 0755-61860636"; if (sSN == sUserSN) { // MessageBox(x, "SN is OK!", MB_OK | MB_ICONSTOP); } else //检查不对,重写一次,写之前请确保EEPROM处于可写状态。 { char* pAddr = sUserSN.GetBuffer(32); CameraWriteSN(pAddr, 32); char cNewSn[32]; CameraReadSN(cNewSn, 32); CString sNewSN = cNewSn; if (sNewSN == sUserSN ) { MessageBox("SN Rewrite Ok!", "SN Error", MB_OK | MB_ICONSTOP); } else { MessageBox("SN Rewrite Error!", "SN Error", MB_OK | MB_ICONSTOP); } CameraUnInit(); return; } #endif // Set AWB Window { INT width = 0; INT hight = 0; CameraGetImageSize(&width, &hight); CameraSetWBWindow(width>>2, hight>>2, width>>1, hight>>1); } this->SetDlgItemText(IDC_PLAY, "Sto&p"); this->GetDlgItem(IDC_SAVEFILE)->EnableWindow(TRUE); this->GetDlgItem(IDC_SETUP)->EnableWindow(TRUE); this->GetDlgItem(IDC_RESOLUTION)->EnableWindow(TRUE); this->GetDlgItem(IDC_SAVEIMG)->EnableWindow(TRUE); /*WM_USERDEFMSG 用户自定义信息,通过CameraSetMessage()函数得到相机 Message ID;该Message ID含有Wparam和Lparam信息,将它们传递到信息 响应函数OnVideoMsg()当中进行判断和处理*/ CameraSetMessage(this->m_hWnd, WM_USERDEFMSG); CameraPlay(); m_iVideoCnt = 0; m_iErrorFrames = 0; m_RunMode = RUNMODE_PLAY; return; } }
int Process(int argc, const char* argv[]) { std::cout << "/*************************************/" << std::endl; std::cout << "Input LANE_IMG_RECORD" << std::endl; std::cout << "Input LANE_DETECTOR" << std::endl; std::cout << "Input DATA_RECORD" << std::endl; std::cout << "Input SEND_DATA" << std::endl; std::cout << "Input SAMPLING_FREQ" << std::endl; std::cout << "Input COMPRESSION_RATE" << std::endl; std::cout << "Input YAW_ANGLE" << std::endl; std::cout << "Input PITCH_ANGLE" << std::endl; std::cout << "Input TIME_BASELINE" << std::endl; std::cout << "Input LANE_ANALYSER" << std::endl; std::cout << "/*************************************/" << std::endl; if(argc < 11) std::cout << "Not enough parameters" << std::endl; /// Debug Marker int LANE_IMG_RECORD = atoi(argv[1]);//Record the lane image int LANE_DETECTOR = atoi(argv[2]);//Whether detect Lane int DATA_RECORD = atoi(argv[3]);//Record the lane features int SEND_DATA = atoi(argv[4]);//Whether send data int SAMPLING_FREQ = atoi(argv[5]);//Sampling Frequency int COMPRESSION_RATE = atoi(argv[6]);//Compression rate for recording images double YAW_ANGLE = atof(argv[7]);//yaw - X double PITCH_ANGLE = atof(argv[8]);//pitch - Y double TIME_BASELINE = atof(argv[9]);//sec int LANE_ANALYSER = atoi(argv[10]); // std::cout << "LANE_IMG_RECORD: " << LANE_IMG_RECORD << std::endl; // std::cout << "DATA_RECORD: " << DATA_RECORD << std::endl; // std::cout << "SAMPLING_FREQ: " << SAMPLING_FREQ << endl; // std::cout << "COMPRESSION_RATE: " << COMPRESSION_RATE << std::endl; // std::cout << "LANE_DETECTOR: " << LANE_DETECTOR << std::endl; // std::cout << "NUM_WINDOW_SAMPLE: " << NUM_WINDOW_SAMPLE << std::endl; std::cout << "/*****************************************/" << std::endl; std::cout << "Press q / Q / ESC to quit " << std::endl; std::cout << "Press s / S to stop " << std::endl; std::cout << "/*****************************************/" << std::endl; // Init parameters int idx = FRAME_START; //index for image sequence int sampleIdx = 1; //! Get the current time time_t t; time(&t); tm *tmTime = localtime(&t); char currentTime[50]; sprintf(currentTime,"%02d-%02d-%d_%02dh%02dm%02ds",tmTime->tm_mday,tmTime->tm_mon+1,tmTime->tm_year+1900, tmTime->tm_hour,tmTime->tm_min, tmTime->tm_sec); std::cout << "Current Time: " << currentTime << std::endl; //! Init the Record file std::ofstream laneFeatureFile; char *laneFeaturePath = new char[100]; if (DATA_RECORD){ sprintf(laneFeaturePath, FILE_LANE_FEATURES, tmTime->tm_mday,tmTime->tm_mon+1,tmTime->tm_year+1900, tmTime->tm_hour,tmTime->tm_min, tmTime->tm_sec); InitRecordData(laneFeatureFile, laneFeaturePath, laneFeatureName, NUM_LANE_FEATURES); } //! Create a new dir for recording the images char * laneRawImgPath = new char[100]; DIR *pDir = NULL; if(LANE_IMG_RECORD){ do{ sprintf(laneRawImgPath, LANE_PATH_NAME, tmTime->tm_mday,tmTime->tm_mon+1,tmTime->tm_year+1900, tmTime->tm_hour,tmTime->tm_min, tmTime->tm_sec); pDir = opendir(laneRawImgPath); }while (pDir != NULL); mkdir(laneRawImgPath, S_IRWXU); } /* Parameters for Lane Detector */ cv::Mat laneMat; LaneDetector::LaneDetectorConf laneDetectorConf; std::vector<cv::Vec2f> hfLanes; std::vector<cv::Vec2f> lastHfLanes; std::vector<cv::Vec2f> preHfLanes; //lane features std::vector<double> LATSDBaselineVec; std::deque<LaneDetector::InfoCar> lateralOffsetDeque; std::deque<LaneDetector::InfoCar> LANEXDeque; std::deque<LaneDetector::InfoTLC> TLCDeque; LaneDetector::LaneFeature laneFeatures; double lastLateralOffset = 0; double lateralOffset = 0; // Lateral Offset int detectLaneFlag = -1; // init state -> normal state 0 int isChangeLane = 0; // Whether lane change happens int changeDone = 0; // Finish lane change int muWindowSize = 5; // Initial window size: 5 (sample) int sigmaWindowSize = 5; // Initial window size: 5 (sample) // Initialize Lane Kalman Filter cv::KalmanFilter laneKalmanFilter(8, 8, 0); //(rho, theta, delta_rho, delta_theta)x2 cv::Mat laneKalmanMeasureMat(8, 1, CV_32F, cv::Scalar::all(0)); //(rho, theta, delta_rho, delta_theta) int laneKalmanIdx = 0; //Marker of start kalmam LaneDetector::InitlaneFeatures(laneFeatures); if (LANE_DETECTOR) { LaneDetector::InitlaneDetectorConf(laneMat, laneDetectorConf, 1); // KIT 1, ESIEE 2 LaneDetector::InitLaneKalmanFilter(laneKalmanFilter, laneKalmanMeasureMat, laneKalmanIdx); } /* Inter-process communication */ key_t ipckey; int mq_id; struct { long type; char text[1024]; } laneMsg; if (SEND_DATA) { /* Generate the ipc key */ ipckey = ftok(KEY_PATH, 'a'); if(ipckey == -1){ printf("Key Error: %s\n", strerror(errno)); exit(1); } mq_id = msgget(ipckey, 0); if (mq_id == -1) { //MQ doesn't exit mq_id = msgget(ipckey, IPC_CREAT | IPC_EXCL | 0666); printf("LaneDetector creates a new MQ %d\n", mq_id); } else { //MQ does exit mq_id = msgget(ipckey, IPC_EXCL | 0666); printf("LaneDetector uses an existed MQ %d\n", mq_id); } //printf("Lane identifier is %d\n", mq_id); if(mq_id == -1) { throw std::logic_error("Can't build pipeline"); } //printf("This is the LaneDetectSim process, %d\n", getpid()); } /* Entrance of Process */ double initTime = (double)cv::getTickCount(); double intervalTime = 0; double execTime = 0; // Execute Time for Each Frame double pastTime = 0; double lastStartTime = (double)cv::getTickCount(); char key; double delay = 1; //! Camera parameters tCamera Camera; tPvErr errCode; // initialize the PvAPI if((errCode = PvInitialize()) != ePvErrSuccess) { printf("PvInitialize err: %u\n", errCode); } else { //IMPORTANT: Initialize camera structure. See tPvFrame in PvApi.h for more info. memset(&Camera,0,sizeof(tCamera)); // wait for a camera to be plugged in WaitForCamera(); // get first camera found if(CameraGet(&Camera)) { // open camera if(CameraSetup(&Camera)) { // start camera streaming if(CameraStart(&Camera)) { // Start Time in the process while(idx <= FRAME_END) { double startTime = (double)cv::getTickCount(); /* Lane detection starts */ if (WaitForTrigger() && CameraSnap(&Camera)) { /* Read the saved images from the camera */ laneMat = cv::imread(Camera.Filename); // Reduce the size of raw image cv::resize(laneMat, laneMat, cv::Size(WIDTH*COEF, HEIGHT*COEF), CV_INTER_AREA); /* Record the resized raw image */ if (LANE_IMG_RECORD) { std::vector<int> compression_params; compression_params.push_back(CV_IMWRITE_JPEG_QUALITY); compression_params.push_back(COMPRESSION_RATE); char * rawLaneName = new char[50]; char * tempName = new char[100]; sprintf(rawLaneName, LANE_IMG_NAME, idx); strcpy(tempName, laneRawImgPath); strcat(tempName, rawLaneName); cv::imwrite(tempName, laneMat, compression_params); delete tempName; delete rawLaneName; } if (LANE_DETECTOR) { ProcessLaneImage(laneMat, laneDetectorConf, laneKalmanFilter, laneKalmanMeasureMat, laneKalmanIdx, hfLanes, lastHfLanes, lastLateralOffset, lateralOffset, isChangeLane, detectLaneFlag, idx, execTime, preHfLanes, changeDone, YAW_ANGLE, PITCH_ANGLE); } } /* Calculate the running time for every sampling */ pastTime = ((double)cv::getTickCount() - initTime)/cv::getTickFrequency(); // printf("@Lane Sampling passes %f sec\n", pastTime); char *text_pastTime = new char[50]; sprintf(text_pastTime, "LaneDetector Time: %.2f sec", pastTime); cv::putText(laneMat, text_pastTime, cv::Point(0, laneMat.rows-5), cv::FONT_HERSHEY_SIMPLEX, 0.3, CV_RGB(0,255,0)); delete text_pastTime; intervalTime = (startTime - lastStartTime)/ cv::getTickFrequency();//get the time between two continuous frames lastStartTime = startTime; /* Generate lane indicators */ if (LANE_DETECTOR && LANE_ANALYSER) { /// First init the baseline, then get lane mass if( pastTime < TIME_BASELINE ){ /// Get lane baseline LaneDetector::GetLaneBaseline(sampleIdx, SAMPLING_TIME, muWindowSize, sigmaWindowSize, lateralOffset, LATSDBaselineVec, lateralOffsetDeque, LANEXDeque, TLCDeque, laneFeatures, intervalTime); //idx_baseline = sampleIdx; } else { //sampleIdx -= idx_baseline; LaneDetector::GenerateLaneIndicators(sampleIdx, SAMPLING_TIME, muWindowSize, sigmaWindowSize, lateralOffset, lateralOffsetDeque, LANEXDeque, TLCDeque, laneFeatures, intervalTime); //! LATSD char *text_LATSD = new char[30]; sprintf(text_LATSD, "L1. LATSD: %.4f", laneFeatures.LATSD); cv::putText(laneMat, text_LATSD, cv::Point(0, 70), cv::FONT_HERSHEY_SIMPLEX, 0.3, CV_RGB(0,255,0)); delete text_LATSD; //! LATMEAN char *text_LATMEAN = new char[30]; sprintf(text_LATMEAN, "L2. LATMEAN: %.4f", laneFeatures.LATMEAN); cv::putText(laneMat, text_LATMEAN, cv::Point(0, 80), cv::FONT_HERSHEY_SIMPLEX, 0.3, CV_RGB(0,255,0)); delete text_LATMEAN; //! LANEDEV char *text_LANEDEV = new char[30]; sprintf(text_LANEDEV, "L3. LANEDEV: %.4f", laneFeatures.LANEDEV); cv::putText(laneMat, text_LANEDEV, cv::Point(0, 90), cv::FONT_HERSHEY_SIMPLEX, 0.3, CV_RGB(0,255,0)); delete text_LANEDEV; //! LANEX char *text_LANEX = new char[30]; sprintf(text_LANEX, "L4. LANEX: %.4f", laneFeatures.LANEX); cv::putText(laneMat, text_LANEX, cv::Point(0, 100), cv::FONT_HERSHEY_SIMPLEX, 0.3, CV_RGB(0,255,0)); delete text_LANEX; //! TLC char *text_TLC = new char[30]; sprintf(text_TLC, "L5. TLC: %.4f", laneFeatures.TLC); cv::putText(laneMat, text_TLC, cv::Point(0, 110), cv::FONT_HERSHEY_SIMPLEX, 0.3, CV_RGB(0,255,0)); delete text_TLC; //! TLC_2s char *text_TLC_2s = new char[30]; sprintf(text_TLC_2s, "L6. TLC_2s: %d", laneFeatures.TLC_2s); cv::putText(laneMat, text_TLC_2s, cv::Point(0, 120), cv::FONT_HERSHEY_SIMPLEX, 0.3, CV_RGB(0,255,0)); delete text_TLC_2s; char *text_TLCF_2s = new char[50]; sprintf(text_TLCF_2s, "L7. Fraction_TLC_2s: %f", laneFeatures.TLCF_2s); cv::putText(laneMat, text_TLCF_2s, cv::Point(0, 130), cv::FONT_HERSHEY_SIMPLEX, 0.3, CV_RGB(0,255,0)); delete text_TLCF_2s; //! TLC_halfs char *text_TLC_halfs = new char[30]; sprintf(text_TLC_halfs, "L8. TLC_halfs: %d", laneFeatures.TLC_halfs); cv::putText(laneMat, text_TLC_halfs, cv::Point(0, 140), cv::FONT_HERSHEY_SIMPLEX, 0.3, CV_RGB(0,255,0)); delete text_TLC_halfs; char *text_TLCF_halfs = new char[50]; sprintf(text_TLCF_halfs, "L9. Fraction_TLC_halfs: %f", laneFeatures.TLCF_halfs); cv::putText(laneMat, text_TLCF_halfs, cv::Point(0, 150), cv::FONT_HERSHEY_SIMPLEX, 0.3, CV_RGB(0,255,0)); delete text_TLCF_halfs; //! TLC_min char *text_TLC_min = new char[30]; sprintf(text_TLC_min, "L10. TLC_min: %.4f", laneFeatures.TLC_min); cv::putText(laneMat, text_TLC_min, cv::Point(0, 160), cv::FONT_HERSHEY_SIMPLEX, 0.3, CV_RGB(0,255,0)); delete text_TLC_min; }//end if }//end Generate Lane Indicators /* Record lane features */ if (DATA_RECORD) { laneFeatures.frame = idx; RecordLaneFeatures(laneFeatureFile, laneFeatures, execTime, pastTime); }//end if /* Send the datas as string to fusion center */ if(LANE_DETECTOR & SEND_DATA) { char *str = new char[1024]; memset(str, 0, 1024); CodeMsg(laneFeatures, str); strcpy(laneMsg.text, str);//!!! overflow laneMsg.type = 1; //printf("Data sends: %s\n", laneMsg.text); //! 0 will cause a block/ IPC_NOWAIT will close the app. if(msgsnd(mq_id, &laneMsg, sizeof(laneMsg), 0) == -1) { throw std::runtime_error("LaneDetectSim: msgsnd failed!"); } delete str; } /* Adjust the interval time within fixed frequency */ double execFreq; do { //cv::waitKey(1); execFreq = 1.0 / (((double)cv::getTickCount() - startTime)/cv::getTickFrequency()); }while ( execFreq >= SAMPLING_FREQ ); char *text = new char[30]; sprintf(text, "Process: %.2f Hz", execFreq); cv::putText(laneMat, text, cv::Point(0, 60), cv::FONT_HERSHEY_SIMPLEX, 0.8, CV_RGB(0, 255, 0)); delete text; cv::imshow("Lane", laneMat); key = cv::waitKey(delay); if (key == 'q' || key == 'Q' || 27 == (int)key) //Esc q\Q\key to stop break; else if(key == 's' || key == 'S' ) delay = 0; else delay = 1; sampleIdx++; //update the sampling index idx++; }//end while loop laneFeatureFile.close(); cv::destroyAllWindows(); }//if CameraStart CameraStop(&Camera); }//if CameraSetup CameraUnsetup(&Camera); }//if CameraGet } PvUnInitialize(); return 0; }