void CApplication::displayWindow() { sf::VideoMode mode; mode.width = m_settings.getValue("Window", "width", "800").toUnsignedInt32(); mode.height = m_settings.getValue("Window", "height", "600").toUnsignedInt32(); mode.bitsPerPixel = 32; CString title = m_settings.getValue("TEngine", "title", "TEngine Application"); bool fullScreen = m_settings.getBooleanValue("Window", "fullscreen" , false); displayWindow(mode, title, fullScreen); }
//--------------------------------------------------------------------------- void File_Exr::Data_Parse() { if (CC4(Buffer+Buffer_Offset)==0x762F3101) //"v/1"+1 //Header Header(); else if (name=="comments" && type=="string") comments(); else if (name=="compression" && type=="compression" && Element_Size==1) compression(); else if (name=="dataWindow" && type=="box2i" && Element_Size==16) dataWindow(); else if (name=="displayWindow" && type=="box2i" && Element_Size==16) displayWindow(); else if (name=="pixelAspectRatio" && type=="float" && Element_Size==4) pixelAspectRatio(); else Skip_XX(Element_Size, "value"); }
// Detect keypoints and find cv::Mat AntiShake::antiShake(Mat &img_1, Mat &img_2, int matches_type, int featurePoints, int corePx, double absoluteRelation) { Mat workImage1, workImage2; reduceDifferences(img_1, img_2, workImage1, workImage2, 7, 7); // STEPS 1 to 4 here // STEP 5: KeyPoint Detection: cv::FeatureDetector *detector = new cv::FastFeatureDetector(corePx, true); std::vector<KeyPoint> keypoints_1, keypoints_2; detector->detect(workImage1, keypoints_1); detector->detect(workImage2, keypoints_2); cout << "==== STEP 5 complete: keypoints detected, (keypoints1.size(), keypoints2.size()) = (" << keypoints_1.size() << ", " << keypoints_2.size() << ")" << endl; delete (detector); // STEP 6: Calculate descriptors (feature vectors) cv::DescriptorExtractor *extractor = new cv::BriefDescriptorExtractor(); Mat descriptors_1, descriptors_2; extractor->compute(workImage1, keypoints_1, descriptors_1); extractor->compute(workImage2, keypoints_2, descriptors_2); cout << "==== STEP 6 complete: extract descriptors" << endl; delete (extractor); // STEP 7: Get Matches vector<DMatch> good_matches; std::vector<Point2f> pts1, pts2; this->getBestMatches(matches_type, featurePoints, good_matches, pts1, pts2, descriptors_1, descriptors_2, keypoints_1, keypoints_2, workImage1.rows, workImage1.cols, absoluteRelation); Mat img_matches; drawMatches(workImage1, keypoints_1, workImage2, keypoints_2, good_matches, img_matches, Scalar::all(-1), Scalar::all(-1), vector<char>(), DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS); if (shouldPrint) displayWindow(img_matches, "MATCHES"); cout << "==== STEP 7 complete: finished matching descriptors: " << featurePoints << endl; // STEP 8: Find Homography: vector<uchar> inliers(pts1.size(), 0); Mat homography = validateHomography(pts1, pts2, inliers, true); cout << "==== STEP 8 complete: finished calculating right homographY." << endl; return homography; }
//--------------------------------------------------------------------------- void File_Exr::Data_Parse() { if (name_End==0) ImageData(); else if (name=="channels" && type=="chlist") channels(); else if (name=="comments" && type=="string") comments(); else if (name=="compression" && type=="compression" && Element_Size==1) compression(); else if (name=="dataWindow" && type=="box2i" && Element_Size==16) dataWindow(); else if (name=="displayWindow" && type=="box2i" && Element_Size==16) displayWindow(); else if (name=="pixelAspectRatio" && type=="float" && Element_Size==4) pixelAspectRatio(); else Skip_XX(Element_Size, "value"); }
/* arrange windows. arg: * -1: arrange all windows * != -1: arrange only this window id */ void arrange_window(pjsua_vid_win_id wid) { #if PJSUA_HAS_VIDEO pjmedia_coord pos; int i, last; pos.x = 0; pos.y = 10; last = (wid == PJSUA_INVALID_ID) ? PJSUA_MAX_VID_WINS : wid; for (i=0; i<last; ++i) { pjsua_vid_win_info wi; pj_status_t status; status = pjsua_vid_win_get_info(i, &wi); if (status != PJ_SUCCESS) continue; if (wid == PJSUA_INVALID_ID) pjsua_vid_win_set_pos(i, &pos); if (wi.show) pos.y += wi.size.h; } if (wid != PJSUA_INVALID_ID) pjsua_vid_win_set_pos(wid, &pos); #ifdef USE_GUI displayWindow(wid); #endif #else PJ_UNUSED_ARG(wid); #endif }
// Shows the image in a window void AntiShake::displayWindow(Mat image, string filename) { displayWindow(image, filename, false); }
int main(int argc,char* argv[] ) { //!< In this variable the current frame is stored cv::Mat frame; cv::Mat sobel; cv::Mat canny; cv::Mat gray; cv::Mat scharr; cv::Mat laplacian; std::string displayWindow("Display"); int cam; bool exitVal = false ; if ( argc <= 1) { std::cout << "Usage : ./camera CAM_NUM" << std::endl; std::cout << "CAM_NUM is the camera we wish to open!" << std::endl; std::cout << "Default Laptop Camera will be opened!" << std::endl; cam = 0; } else cam = atoi(argv[1]); cv::VideoCapture camera(cam); if (!camera.isOpened()) { std::cout << "Cannot open the video file" << std::endl; return -1; } int KeyPressed=255; int i=0; std::cout << "Press esc if you want to stop the process" << std::endl; char state; while (true) { camera.grab(); camera.retrieve(frame); cv::imshow("BGR colorspace", frame); //!< Apply Sobel edge detections algorithm applySobel (frame, &sobel); cv::imshow("Sobel algorithm", sobel); //!< Apply Canny edge detection algorithm cvtColor( frame, gray, CV_BGR2GRAY ); applyCanny( gray, &canny); cv::imshow("Canny algorithm", canny); //!< Apply Scharr edge detection algorithm applyScharr (frame, &scharr); cv::imshow("Scharr algorithm", scharr); //!< Apply laplacian edge detection algorithm applyLaplacian (frame, &laplacian); cv::imshow("Laplacian algorithm", laplacian); KeyPressed=cvWaitKey(10) & 255; //!< wait for 'esc' key press for 10 ms switch (KeyPressed) { case (char)27: exitVal = true; break; default: break; } if (exitVal) //!< KeyPressed==esc break; } }