/* We tell the image to resize themself to our current bounds. Our bounds could have changed because Resize may have been called by some layout-objects between Button.CalcSize and Button.Draw. */ void Button::Layout() { int x,y; size_t width,height; x=this->x; y=this->y; width=this->width; height=this->height; if (ShowImage()) { imageX=x; imageY=y+(height-image->GetHeight())/2; x+=image->GetWidth(); width-=image->GetWidth(); } if (ShowObject()) { if (ShowImage()) { x+=OS::display->GetSpaceHorizontal(OS::Display::spaceIntraObject); width-=OS::display->GetSpaceHorizontal(OS::Display::spaceIntraObject); } object->Resize(width,height); object->Move(x+(width-object->GetOWidth()) / 2, y+(height-object->GetOHeight()) / 2); object->Layout(); } Control::Layout(); }
int main(int argc, char* argv[]) { Ihandle* dlg; IupOpen(&argc, &argv); dlg = CreateDialog(); IupShow(dlg); /* Try to get a file name from the command line. */ if (argc > 1) ShowImage(argv[1], dlg); else { char file_name[1024] = "*.*"; if (IupGetFile(file_name) == 0) ShowImage(file_name, dlg); } IupMainLoop(); IupDestroy(dlg); IupClose(); return 0; }
BOOL CSearchBarCtrl::OnCommand(WPARAM wParam, LPARAM lParam) { switch(wParam) { case MP_SEARCHVERYCD: ShowImage(0); SetSearchType(SearchTypeVeryCD); SetType(MP_SEARCHEMULE); m_isFocus = TRUE; break; case MP_SEARCHEMULE: ShowImage(1); SetSearchType(SearchTypeEd2kGlobal); SetType(MP_SEARCHVERYCD); m_isFocus = TRUE; break; default: break; } if (m_isFocus) { m_SearchEdit.SetFocus(); m_SearchEdit.SetSel(0, -1); m_isFocus = FALSE; } return CWnd::OnCommand(wParam, lParam); }
void CMFC_systemServerDlg::OnBnClickedButton1() { IplImage* pImg_live = cvLoadImage("objectHis/liveTest0.bmp", 1); IplImage* pImg_depth = cvLoadImage("objectHis/depthTest0.bmp", 1); m_TabPage1.sImage_live = cvCloneImage(pImg_live); m_TabPage1.sImage_depth = cvCloneImage(pImg_depth); ShowImage(m_TabPage1.sImage_live, GetDlgItem(IDC_IMAGE_binPickLive)); ShowImage(m_TabPage1.sImage_depth, GetDlgItem(IDC_IMAGE_binPickLiveDepth)); cvReleaseImage(&pImg_live); cvReleaseImage(&pImg_depth); }
static bool ShowTextScreen(TextScreenIntermissionAction *textscreen, bool demoMode) { if(textscreen->TextSpeed) Printf("Warning: Text screen has a non-zero textspeed which isn't supported at this time.\n"); ShowImage(textscreen, true); if(textscreen->TextDelay) { if(WaitIntermission(textscreen->TextDelay) && demoMode) return true; } py = textscreen->PrintY; px = textscreen->PrintX; for(unsigned int i = 0;i < textscreen->Text.Size();++i) { FString str = textscreen->Text[i]; if(str[0] == '$') str = language[str.Mid(1)]; DrawMultiLineText(str, textscreen->TextFont, textscreen->TextColor, textscreen->Alignment, textscreen->Anchor); } // This really only makes sense to use if trying to display text immediately. if(textscreen->FadeTime) { VL_FadeIn(0, 255, textscreen->FadeTime); } VW_UpdateScreen(); return WaitIntermission(textscreen->Time); }
//////////////////////////////////////////////////////////////////// // Panel::CascadeClassify() // Description: This function has 2 inputs: a path to an image // and a path to a classifier. We are not using Haar Training // in our current application, but we will leave this function // in case someone decides to use it later. If someone does decide // to use it reference this website: // http://www.memememememememe.me/training-haar-cascades/ // We already have the directory structure and scripts set up // in the repository so it will not be hard, there is just // quite a bit of overhead with Haar training when compared to // our current method. //////////////////////////////////////////////////////////////////// void Panel::CascadeClassify(string sImgPath, string sClassPath) { if (!ShowImage(sImgPath, "Original")) return; detectAndDisplay(m_pPanel->m_Image, sClassPath); }
void CFaceEncryptView::OnDraw(CDC *pDc){ /*获取要现实的初始图片*/ IplImage *pic; pic=cvLoadImage("./res/WelcomeBkPic.jpg",CV_LOAD_IMAGE_COLOR); if(pic==NULL) { AfxMessageBox("获取标志图片失败,谢谢!"); return ;} cvResize(pic,welcomeImage); //*****方法一******** AfxMessageBox("欢迎使用"); ShowImage(welcomeImage,IDC_WELCOME_PIC); //根据图片控件ID显示图片 if(pic) cvReleaseImage(&pic); //调用基类成员 CFormView::OnDraw(pDc); // /******方法二*********/ // CDC* pDC = GetDlgItem( IDC_WELCOME_PIC) ->GetDC(); // HDC hDC = pDC ->GetSafeHdc(); // CvvImage m_CvvImage; // CRect rect; // GetDlgItem(IDC_WELCOME_PIC)->GetClientRect( &rect ); // m_CvvImage.CopyOf(welcomeImage,1); // AfxMessageBox("欢迎使用"); // m_CvvImage.DrawToHDC(hDC,&rect); //ReleaseDC( pDC ); //方法三 // CString path="./res/bitmap1.bmp"; // HWND hwnd=GetDlgItem(IDC_WELCOME_PIC)->m_hWnd; //HBITMAP hBitmap=(HBITMAP)LoadImage(AfxGetInstanceHandle(),path,IMAGE_BITMAP,0,0,LR_LOADFROMFILE | LR_CREATEDIBSECTION); //path是bmp路径 //::SendMessage(hwnd,STM_SETIMAGE,IMAGE_BITMAP,&hBitmap); // ::SendMessage(hwnd,STM_SETIMAGE,IMAGE_BITMAP,(LPARAM)hBitmap); // CFormView::OnDraw(pDc); //important且应放在最后调用 // HBITMAP hBitmap=(HBITMAP)LoadImage(AfxGetApp()->m_hInstance,path,IMAGE_BITMAP, 0, 0,LR_CREATEDIBSECTION | LR_DEFAULTSIZE | LR_LOADFROMFILE); // CStatic*pEdit=(CStatic*)GetDlgItem(IDC_STATIC);//IDC_PIC是picture control控件的ID // pEdit->SetBitmap(hBitmap); }
int main(int argc,char * argv[]) { int iterTime = 100; IplImage *imgSrc = ImageInit(iterTime); IplImage *imgDes = cvCreateImage(cvSize(imgSrc->width,imgSrc->height),IPL_DEPTH_8U, 1); imgDes = cvCloneImage(imgSrc); clock_t sclock,eclock; time_t stime,etime; sclock = clock(); stime=time(NULL); // SRAD(imgSrc,imgDes,iterTime); SRAD_GPU( imgSrc, imgDes, iterTime); etime=time(NULL); printf("time=%ld\n",etime-stime); eclock = clock(); printf("Total compute time is %fs\n",(eclock-sclock)/(double)(CLOCKS_PER_SEC)); ShowImage(imgSrc,imgDes); cvSaveImage(RESULT_DIR,imgDes); printf("result image has been saved in %s. \n",RESULT_DIR); cvWaitKey(0); cvReleaseImage(&imgSrc); cvReleaseImage(&imgDes); return 0; }
int main() { IplImage *img = cvLoadImage("target.jpg"); SetImageFloodFill(img); ShowImage(img); ReleaseImage(img); return 0; }
void MemoryViewerPanel::OnShowImage(wxCommandEvent& WXUNUSED(event)) { u32 addr = m_addr; int mode = cbox_img_mode->GetSelection(); int sizex = sc_img_size_x->GetValue(); int sizey = sc_img_size_y->GetValue(); ShowImage(this, m_addr, mode, sizex, sizey, false); }
void ChoroidTab::handleFileChanged (const QModelIndex& index) { if (!index.isValid ()) return; const QString& path = index.sibling (index.row (), 0) .data (CRFilePath).toString (); ShowImage (path); }
void ChoroidTab::goUp () { if (!CurrentImage_.isEmpty ()) ShowImage (QUrl ()); else { const auto& parentIdx = Ui_.DirTree_->currentIndex ().parent (); Ui_.DirTree_->setCurrentIndex (parentIdx); } }
void ChoroidTab::showNextImage () { auto current = FindFileItem (QFileInfo (CurrentImage_.path ()).fileName ()); if (!current) return; const auto rc = QMLFilesModel_->rowCount (); const auto& url = QMLFilesModel_->item ((current->row () + 1) % rc)->data (ILRImage).value<QUrl> (); ShowImage (url); }
void BrowsePicture::ShowPicButtonClicked() { pic_path = ui.dir_edit->text(); if(!ReadFile(pic_path)) { return; } clearDir(pic_path); ShowImage(); }
void ShowImageFromFile(char *filepath) { FIBITMAP* dib = FreeImageAlgorithms_LoadFIBFromFile(filepath); assert(dib != NULL); ShowImage(dib); FreeImage_Unload(dib); }
void ChoroidTab::showPrevImage () { auto current = FindFileItem (QFileInfo (CurrentImage_.path ()).fileName ()); if (!current) return; auto prev = current->row () - 1; if (prev < 0) prev = QMLFilesModel_->rowCount () - 1; const auto& url = QMLFilesModel_->item (prev)->data (ILRImage).value<QUrl> (); ShowImage (url); }
static void ShowFader(FaderIntermissionAction *fader) { switch(fader->Fade) { case FaderIntermissionAction::FADEIN: ShowImage(fader, true); VL_FadeIn(0, 255, fader->Time); break; case FaderIntermissionAction::FADEOUT: // We want to hold whatever may have been drawn in the previous page during the fade, so we don't need to draw. VL_FadeOut(0, 255, 0, 0, 0, fader->Time); break; } }
void GaussianMain::OnD3D11FrameRender2(ID3D11DeviceContext* pd3dImmediateContext) { ID3D11RenderTargetView* rtv_array[1]; // Resore default render target so that g_pRTV_Downscale can be unbound. rtv_array[0] = g_pRTV_Default; pd3dImmediateContext->OMSetRenderTargets(1, rtv_array, g_pDSV_Default); // Perform Gaussian filtering with repeated box filters ApplyGaussianFilter(pd3dImmediateContext); // Display the filtering result ShowImage(pd3dImmediateContext); // The D3D states must be restored at the end of frame. Otherwise the runtime // will complain unreleased resource due to D3DX11Effect. //RestoreDefaultStates(pd3dImmediateContext); }
void CMFC_systemServerDlg::OnLButtonDown(UINT nFlags, CPoint point) { CRect winRect; if (point.x > 30 && point.x < (30 + 320) && point.y > 30 && point.y < (30 + 240)) { //open live UpdateData(TRUE); m_threadPara.m_case = 0; m_threadPara.hWnd = m_hWnd; m_lpThread = AfxBeginThread(&CMFC_systemServerDlg::MythreadFun, (LPVOID)&m_threadPara); } if (point.x > (30 + 320) && point.x < (30 + 320 + 320) && point.y > 30 && point.y < (30 + 240)) { //get depth ground Kinect2Capture kinect; kinect.Open(1, 1, 0); kinect.uDepthMax = 2000; IplImage* img_get = nullptr; while (1) { img_get = kinect.DepthImage(); if (img_get != NULL) { cv::Mat src_img = img_get; // 設定變換[之前]與[之後]的坐標 (左上,左下,右下,右上) cv::Point2f pts1[4] = { roi.pts_depth[0], roi.pts_depth[1], roi.pts_depth[2], roi.pts_depth[3] }; cv::Point2f pts2[4] = { roi.pts_to[0], roi.pts_to[1], roi.pts_to[2], roi.pts_to[3] }; // 透視變換行列計算 cv::Mat perspective_matrix = cv::getPerspectiveTransform(pts1, pts2); cv::Mat dst_img; // 變換 cv::warpPerspective(src_img, dst_img, perspective_matrix, cvSize(320, 240), cv::INTER_LINEAR); m_TabPage1.sImage_depthGround = cvCloneImage(&(IplImage)dst_img); ShowImage(m_TabPage1.sImage_depthGround, GetDlgItem(IDC_IMAGE_binPickLiveDepth), 1); break; } cvReleaseImage(&img_get); } } CDialogEx::OnLButtonDown(nFlags, point); }
FirstOpenCVwithQt::FirstOpenCVwithQt(QWidget *parent) : QMainWindow(parent),cv_img(cv::imread("D:/62a7d933c895d143cfbebbd773f082025baf077d.jpg")) { ui.setupUi(this); const int FPS = 24; const int FREQ = 1000/FPS; cv_cam = new VideoCapture(0); cv_cam->open(0); //彩色直方图反向投影法 hranges[0] = 0.0f; hranges[1] = 255.0f; channels[0] = 0; channels[1] = 1; channels[2] = 2; float threshold = 0.05f; histSize[0] = 256; histSize[1] = 256; histSize[2] = 256; ranges[0] = hranges; ranges[1] = hranges; ranges[2] = hranges; ROI_img = (cv::imread("./hand.jpg"))(cv::Rect(40, 40, 100, 100)); Sleep(2000); *cv_cam >> cv_img; cv_frame = cv_img.clone(); cv_img_pre = cv_img.clone(); //开启定时器 timer.start(FREQ); connect(&timer,SIGNAL(timeout()),this,SLOT(ShowImage())); label = new QLabel(this); label->move(100, 50);//图像在窗口中所处的位置 img = QImage((const unsigned char*)(cv_img_processed.data), 1280, 720, QImage::Format_RGB32); label->setPixmap(QPixmap::fromImage(img)); label->resize(QSize(640, 480)); label->show(); }
static int cbCanvasButton(Ihandle* iup_canvas, int but, int pressed) { char file_name[200] = "*.*"; if (but != IUP_BUTTON1 || !pressed) return IUP_DEFAULT; disable_repaint = 1; if (IupGetFile(file_name) != 0) { disable_repaint = 0; return IUP_DEFAULT; } disable_repaint = 0; ShowImage(file_name, IupGetDialog(iup_canvas)); return IUP_DEFAULT; }
/////////////////////////////////////////////////////////////////// // Panel::DetectEdges() // Description: This function is the entry point for Canny // Edge Detection. It is called by the CLR project which // is called by the C# dialog. Image path is the image which // Canny Edge Detection will be applied. If debug is true // we will call CannyDetectionDebug() which is very similar to // CannyDetection except with debugging statements. This is // not good programming but it is the way we chose to implement // it with short time remaining. /////////////////////////////////////////////////////////////////// void Panel::DetectEdges(string sImgPath, bool debug) { if (!ShowImage(sImgPath, "Original")) return; // Set the image boundary if we have one Mat image; if (m_roi.width && m_roi.width <= m_Image.cols && m_roi.height && m_roi.height <= m_Image.rows) image = m_pPanel->m_Image(m_roi); else image = m_pPanel->m_Image; // Canny Edge and Hough Line Detection Mat edges; if (debug) edges = CannyDetectionDebug(image, true); else edges = CannyDetection(image, true); }
void MyLine(Mat img, Point start, Point end, string image_name, float angle) { int lineEnd; int thickness = 2; int lineType = 8; // create 8bit color image. IMPORTANT: initialize image otherwise it will result in 32F cv::Mat img_rgb(img.size(), CV_8UC3); // convert grayscale to color image cv::cvtColor(img, img_rgb, CV_GRAY2RGB); lineEnd = end.x; end.x = end.x + lineEnd * angle; line(img_rgb, start, end, Scalar(0, 0, 255), thickness, lineType); end.x = lineEnd - lineEnd*angle; line(img_rgb, start, end, Scalar(0, 0, 255), thickness, lineType); String name = "Line" + image_name; imwrite(name, img_rgb); ShowImage(img_rgb, img); }
int main(int argc, char *argv[]) { QApplication App(argc, argv); QImage MemoryImage("D:/Vyzkumak/Workspace/Qt/Qt/Debug/Picture.png"); QImage ShowImage("D:/Vyzkumak/Workspace/Qt/Qt/Debug/Picture.png"); QLabel label; label.setPixmap(QPixmap::fromImage(ShowImage)); label.show(); int w = MemoryImage.width(); int h = MemoryImage.height(); start = time (NULL); for (int j=0; j<1; j++){ qreal H, S, V; QColor Color; qreal cont=0; for (int i=0; i<100; i++){ cont=cont+0.01; for ( int x = 0; x < w; x++ ){ for ( int y = 0; y < h; y++ ){ QRgb Rgb = MemoryImage.pixel( x, y); Color.setRgb(Rgb); Color.getHsvF(&H, &S, &V); qreal v=V*cont; Color.setHsvF(H, S, v); Rgb = Color.rgb(); ShowImage.setPixel(x, y, Rgb); } } label.setPixmap(QPixmap::fromImage(ShowImage)); label.repaint(); } } end = time (NULL); int length = (int) (end - start); std::cout << "Length of rendering 100 frames was: " << length << " seconds.\n"; std::cout << "Average fps is: " << 100. / (float)length << " frames per seconds.\n"; App.exec(); }
static bool R_CastZoomer(const Frame *frame, CastIntermissionAction *cast) { // This may appear to animate faster than vanilla, but I'm fairly sure // that's because while the time on screen is adaptive, the frame durations // were decremented by one each frame. TObjPtr<SpriteZoomer> zoomer = new SpriteZoomer(frame, 224); do { for(unsigned int t = tics;zoomer && t-- > 0;) zoomer->Tick(); if(!zoomer) break; if(intermissionMapLoaded) ThreeDRefresh(); else { // Unlike a 3D view, we will overwrite the whole screen here ShowImage(cast, true); DrawCastName(cast); } zoomer->Draw(); VH_UpdateScreen(); IN_ProcessEvents(); if(Keyboard[sc_Space] || Keyboard[sc_Escape] || Keyboard[sc_Enter]) { bool done = Keyboard[sc_Escape] || Keyboard[sc_Enter]; Keyboard[sc_Space] = Keyboard[sc_Escape] = Keyboard[sc_Enter] = false; zoomer->Destroy(); if(done) return true; break; } CalcTics(); } while(true); return false; }
////////////////////////////////////////////////////////////////////////////// // Panel::DetectFeatures() // Description: This is the function which finds boundaries that contain // the part we are looking for using BRISK feature detector. If one or // two features are found the return value is true. If no features are // found, the return value is false. More information on feature // detection can be found in FeatureDetector.h ///////////////////////////////////////////////////////////////////////////// void Panel::DetectFeatures(string scenePath, string objPath, bool exceedsBorder, bool featureRotated) { if (!ShowImage(scenePath, "Scene", false)) return; MyFeatureDetector detector; detector.Detect(m_Image, objPath, m_roi, m_feature_height, m_feature_width, m_conversionRate, exceedsBorder, featureRotated, false); Mat boundImg; if (m_roi.width) { boundImg = m_Image(m_roi); // Store the image boundary in the settings file FileStorage fs("../../Config/image_boundary.xml", FileStorage::WRITE); fs << "image_boundary" << m_roi; fs << "conversion_rate" << m_conversionRate; fs.release(); } else boundImg = m_Image; // namedWindow("bound image", CV_WINDOW_NORMAL); // imshow("bound image", boundImg); }
void VideoFromFile::Play() { ShowImage(); }
void ChoroidTab::handleQMLImageSelected (const QString& url) { ShowImage (QUrl (url)); }
void ChoroidTab::ShowImage (const QString& path) { ShowImage (QUrl::fromLocalFile (path)); }
/**********************特定类型文件浏览器***************************/ u16 FileExplorer(u16 num,FileType ft) { u16 cnt = 0; u16 index=0; //返回的文件名索引 u16 curruntIndex=0; //当前显示的页数,每页8行,页数从0开始,每次加减8 EXPLORER_BUTTON eb; u8 fresh=1; LCD_Clear(White); //白色背景 LCD_FillRect(0, 0, 20, 400,BackColor); //蓝色标题栏 LCD_FillRect(0, 220, 20, 400,BackColor);//蓝色菜单栏 LCD_FillRect(360, 20, 200, 40,BackColor);//蓝色滚动条 LCD_SetTextColor(Black); LCD_SetBackColor(BackColor); LCD_DisplayString(10,2,"文件浏览器"); LCD_DisplayString(5,222,"选择"); LCD_DisplayString(360,222,"返回"); LCD_SetBackColor(ButtonColor); LCD_DisplayString(360+12,20+12,"∧"); LCD_DisplayString(360+12,220-12-16,"∨"); LCD_DrawLine(0,20,400,Horizontal); //黑色分割线 LCD_DrawLine(0,220,400,Horizontal); for(cnt=1;cnt<8;cnt++)LCD_DrawLine(0,cnt*25+20,360,Horizontal); while(1) { if(fresh) { //文件显示 switch(ft) { case MP3: for (cnt = 1;cnt<9&&cnt+curruntIndex<=num;cnt++) { ShowImage("/icon/mp3.bmp",0,cnt*25-5); LCD_FillRect(30,cnt*25,16,330,White); LCD_DisplayString(30,cnt*25,musicFile[cnt+curruntIndex]); } break; case BMP: for (cnt = 1;cnt<9&&cnt+curruntIndex<=num;cnt++) { ShowImage("/icon/bmp.bmp",0,cnt*25-5); LCD_FillRect(30,cnt*25,16,330,White); LCD_DisplayString(30,cnt*25,photoFile[cnt+curruntIndex]); } break; case TXT: for (cnt = 1;cnt<9&&cnt+curruntIndex<=num;cnt++) { ShowImage("/icon/txt.bmp",0,cnt*25-5); LCD_FillRect(30,cnt*25,16,330,White); LCD_DisplayString(30,cnt*25,bookFile[cnt+curruntIndex]); } break; case UNKNOWN: break; } for(;cnt<9;cnt++)LCD_FillRect(0,cnt*25-3,23,360,White); //不足一页,清楚上页内容 fresh=0; } if(penPoint.keyState==Down) { eb=GetExplorerIndex(); switch(eb) { case BACK: goto RT; case TURNUP: if(curruntIndex>0)curruntIndex-=8; fresh=1; break; case TURNDOWN: if(curruntIndex<num)curruntIndex+=8; fresh=1; break; default: index=eb+curruntIndex; goto RT; } } } RT: return index; }