コード例 #1
0
void testNode_MainWindow::timerEvent(QTimerEvent *)
{
    if(ui->tabWidget->currentIndex()==0){
        IplImage* showCamera = cvQueryFrame(m_camera);
        QImage *frame = IplImageToQImage(showCamera);

        ui->threshold_num->setNum(ui->threshold_bar->value());

        *frame = func_Gray(*frame);

        if(ui->check_Threshold->isChecked()){
            threshold = ui->threshold_bar->value();
        }else{
            threshold = Average_Threshold(*frame);
        }

        for(int h=0;h<frame->height();h++){
            for(int w=0;w<frame->width();w++){
                if(qRed(frame->pixel(w,h))<=threshold){
                    frame->setPixel(w,h,QColor(0,0,0).rgb());
                }else{
                    frame->setPixel(w,h,QColor(255,255,255).rgb());
                }
            }
        }
        ui->show_camera->setPixmap(QPixmap::fromImage(*frame));
    }else if(ui->tabWidget->currentIndex()==1){
        ros::NodeHandle n;
        image_transport::ImageTransport it(n);
        image_transport::Subscriber sub = it.subscribe("camera/image", 1, imageCallback);
//        sub1 = it1->subscribe("/camera/image",1,imageCallback);

//        sub_image = MatToQImage(cvframe);
//        ui->sub_label->setPixmap(QPixmap::fromImage(sub_image));
        ros::spin();
        killTimer(m_nTimerId);
    }else if(ui->tabWidget->currentIndex()==2){
        cv::VideoCapture cap(201);
        cap>>cv00;
        cv::VideoCapture cap1(202);
        cap1>>cv01;

//        IplImage* showCamera = cvQueryFrame(m_camera);
//        QImage *camera2 = IplImageToQImage(showCamera);

        camera0 = MatToQImage(cv00);
        camera1 = MatToQImage(cv01);

        ui->camera01->setPixmap(QPixmap::fromImage(camera0));
        ui->camera02->setPixmap(QPixmap::fromImage(camera1));
    }
コード例 #2
0
// the function the thread will run when it is called
void ProcessingThread::run()
{
    while(1)
    {
        /////////////////////////////////
        // Stop thread if stopped=TRUE //
        /////////////////////////////////
        stoppedMutex.lock();
        if (stopped)
        {
            stopped=false;
            stoppedMutex.unlock();
            break;
        }
        stoppedMutex.unlock();
        /////////////////////////////////
        /////////////////////////////////

        // Save processing time
        processingTime=t.elapsed();
        // Start timer (used to calculate processing rate)
        t.start();
        // Get frame from queue
        Mat currentFrame=imageBuffer->getFrame();
        // Make copy of current frame (processing will be performed on this copy)
        currentFrame.copyTo(currentFrameCopy);
        // Set ROI of currentFrameCopy
        currentFrameCopy.locateROI(frameSize,framePoint);
        currentFrameCopy.adjustROI(-currentROI.y,-(frameSize.height-currentROI.height-currentROI.y),
                                   -currentROI.x,-(frameSize.width-currentROI.width-currentROI.x));

        updateMembersMutex.lock();

        updateMembersMutex.unlock();

        // Update statistics
        updateFPS(processingTime);
        currentSizeOfBuffer=imageBuffer->getSizeOfImageBuffer();

        if (decideToProcess()){
            writeHistory();

            // set the black level
            if (SetBlackFlag == 1){
                blackVal = setBlackCalib(currentFrame);
                SetBlackFlag = 0;
            }

            process(&currentFrame);
        }
        draw_on_image(GameState.table, &currentFrameCopy);
        frame=MatToQImage(currentFrameCopy);

        emit newFrame(frame);
        //printTest();

        //@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
    }
    qDebug() << "Stopping processing thread...";
}
コード例 #3
0
void MyProcessing::run () {
	long long prevFrameNum, curFrameNum;
	prevFrameNum = -1;
	curFrameNum = 0;

    while(1)
    {
        ////////////////////////////////
        // Stop thread if doStop=TRUE //
        ////////////////////////////////
        doStopMutex.lock();
        if(doStop)
        {
            doStop=false;
            doStopMutex.unlock();
            break;
        }
        doStopMutex.unlock();
        /////////////////////////////////
        /////////////////////////////////

        int idx = rb->curRead();
        FrameData *d = rb->getPointer(idx);
        curFrameNum = d->frameNumber;
        if (prevFrameNum < curFrameNum) {
        	// Save processing time
        	processingTime=t.elapsed();
        	// Start timer (used to calculate processing rate)
        	t.start();

        	processingMutex.lock();
        	//rb->lockRead();
        	pluginStack->process(d);

        	Mat currentFrame = d->frame.clone();
        	//cout << "frame number : " << d->frameNumber << endl;

        	//rb->unlockRead();
            // Convert Mat to QImage
            frame=MatToQImage(currentFrame);
            processingMutex.unlock();

            // Inform GUI thread of new frame (QImage)
            emit newFrame(frame);

            // Update statistics
            updateFPS(processingTime);
            statsData.nFramesProcessed++;
            // Inform GUI of updated statistics
            emit updateStatisticsInGUI(statsData);
        }//if

        // With "true" it jumps directly ahead to the most recent data available. This means that we can possibly skip bins.
        rb->nextRead(true);
        prevFrameNum=curFrameNum;
    }
    //qDebug() << "Stopping processing thread...";

}
コード例 #4
0
QImage Erosion::Draw(const QImage &image, const QHash<QString, QString>& args)
{
	int shape = cv::MORPH_ELLIPSE;
	int ksize = 2;

	Arguments(args, shape, ksize);

	cv::Mat mat = QImageToMat(image);
	cv::Mat kernel = cv::getStructuringElement(shape, cv::Size(ksize, ksize));
	cv::erode(mat, mat, kernel);
	return MatToQImage(mat);
}
コード例 #5
0
ファイル: waldogui.cpp プロジェクト: cbadillac/Where-s-Waldo
void WaldoGUI::on_pushButton_2_clicked()
{
    CvCapture* cap = cvCaptureFromCAM(0);
    waldoImage = cvQueryFrame(cap);
    viewImage = MatToQImage(waldoImage);
    if(viewImage.isNull()) {
        QMessageBox::information(this, tr("Error de carga!!"), tr("No se puede cargar webcam"));
        return;
    }
    cvReleaseCapture(&cap);
    emit imagen_lista();
}
コード例 #6
0
void MainWindow::processFrameAndUpdateGUI() {
    //capWebCam.read(cvmCurrentFrame);

    switch (this->cameraType)
    {
        case CAMERA_WEBCAM:
            this->webcam.processFrame(cvmCurrentFrame);
        break;

        case CAMERA_DSLR:
            // capture DSLR live view if no current still exists, else use current still to display and process
            if (this->dslr.isStillCaptured() == false) {
                this->dslr.processFrame(cvmCurrentFrame);
            }else{
                // if a still is capture always use the unmodified image
                // if not the various image manipulation are not able to undo a processing
                this->dslr.getCurrentStill().copyTo(cvmCurrentFrame);
            }
        break;
    }

    // return if there is a capture problem
    if (cvmCurrentFrame.empty() == true) return;

    cvmCurrentFrame = videoProcessing.process(cvmCurrentFrame);

    // fit image to window or show 1:1
    // convert from opencv to QT
    QImage qimgDisplay =  MatToQImage(cvmCurrentFrame);
    QPixmap pm = QPixmap::fromImage(qimgDisplay);
    if (resizeFrame == true) {
        ui->scrollArea->width();
        ui->scrollArea->height();

        //QPixmap qimgDisplay = QPixmap::fromImage(currentCapture);
        int w = ui->scrollArea->width() - 30;
        int h = ui->scrollArea->height() - 30;
        pm = pm.scaled(QSize(w, h), Qt::KeepAspectRatio, Qt::SmoothTransformation);


    }

    // and then display it on the QT widget
    ui->frame->setPixmap(pm);
    ui->frame->setFixedWidth(pm.width());
    ui->frame->setFixedHeight(pm.height());

    // if live view area is too small increase it
    if (size().width() < pm.width() && this->cameraType == CAMERA_DSLR) {
        resize(pm.width()+200, pm.height()+50);
    }

}
コード例 #7
0
ファイル: mainwindow.cpp プロジェクト: CHTim/Barcode-Decoder
void MainWindow::updateBarcodeImg(const cv::Mat& bar){
    //cv::imshow("gray",bar);

    QImage a = MatToQImage(bar);

    if(a.width()>300 || a.height()>200){
        a = a.scaled(300,200,Qt::KeepAspectRatio);
    }

    // display on label
    ui->BarcodeImg->setPixmap(QPixmap::fromImage(a,Qt::AutoColor));
    // resize the label to fit the image
    ui->BarcodeImg->resize(ui->BarcodeImg->pixmap()->size());
}
コード例 #8
0
void StereoCalibrationDialog::updateFrame()
{
    captureLeft.grab();
    captureRight.grab();

    captureLeft.retrieve(currentFrameLeft);
    captureRight.retrieve(currentFrameRight);

    if(!currentFrameLeft.empty() && !currentFrameRight.empty())
    {
        cv::Mat frameLeftCopy = currentFrameLeft.clone();
        cv::Mat frameRightCopy = currentFrameRight.clone();

        if(ui->checkBox->isChecked())
        {
            detectChessboard(frameLeftCopy, patternSize);
            detectChessboard(frameRightCopy, patternSize);
        }

        QImage qframeL, qframeR;
        qframeL = MatToQImage(frameLeftCopy);
        qframeR = MatToQImage(frameRightCopy);

        frameLeft = MatToQImage(currentFrameLeft);
        frameRight = MatToQImage(currentFrameRight);

        pixmapLeft = QPixmap::fromImage(qframeL).scaled(currentFrameLeft.cols,
                                                  currentFrameRight.rows, Qt::KeepAspectRatio);

        pixmapRight = QPixmap::fromImage(qframeR).scaled(currentFrameRight.cols,
                                                  currentFrameRight.rows, Qt::KeepAspectRatio);
        ui->leftCameraLabel->setPixmap(pixmapLeft);
        ui->rightCameraLabel->setPixmap(pixmapRight);

    }

}
コード例 #9
0
void MainWindow::on_btnSave_clicked()
{

    //QImage qCaptureImg((uchar*)cvmCurrentFrame.data, cvmCurrentFrame.cols, cvmCurrentFrame.rows, cvmCurrentFrame.step, QImage::Format_Indexed8);

    QImage qCaptureImg = MatToQImage(cvmCurrentFrame);

    stopCapture();

    if (filePath.isNull()) {
        filePath = QDir::currentPath();
    }

    QString filename = QFileDialog::getSaveFileName(
        this,
        tr("Save Capture"),
        filePath,
        tr("PNG (*.png);;TIF (*.tif);;JPG (*.jpg)"),
        &fileExtension);

    if( !filename.isNull() )
    {
        // save current path to set selected path in next save action
        QFileInfo fi = QFileInfo(filename);
        filePath = fi.absoluteDir().absolutePath();

        // Generate file path + file name without extension
        // this is done because the extension is set below
        // if not it could save file.png.png if a previous
        // file should be overwritten
        filename = filePath + QDir::separator() + fi.baseName();

        if (fileExtension == "PNG (*.png)") {
            filename += ".png";
        }else if(fileExtension == "TIF (*.tif)") {
            filename += ".tif";
        }else{
            filename += ".jpg";
        }
        qCaptureImg.save(filename);
    }
    startCapture();
}
コード例 #10
0
QImage Sobel::Draw(const QImage &image, const QHash<QString, QString>& args)
{
	int ksize = 3;

	cv::Mat src, grad, result;
	cv::Mat gradX, gradY;
	cv::Mat absGradX, absGradY;

	Arguments(args, ksize);

	src = QImageToMat(image);
	cv::cvtColor(src, src, CV_BGR2GRAY);
	cv::Sobel(src, gradX, -1, 1, 0, ksize);
	cv::convertScaleAbs(gradX, absGradX);
	cv::Sobel(src, gradY, -1, 0, 1, ksize);
	cv::convertScaleAbs(gradY, absGradY);
	cv::addWeighted(absGradX, 0.5, absGradY, 0.5, 0, grad);
	grad.convertTo(result, CV_8U);
	return MatToQImage(result);
}
コード例 #11
0
ファイル: dialogct.cpp プロジェクト: zhangjaycee/arm_a8_gui
void DialogCt::grabAndShow(){
    yuv2Mat(buffers[0].start,imgWidth,imgHeight);
    //new frame grabed,process start
    if(mode == 2){//ct mode
        while(1){
            if(first_flag){
                cvtColor(frame, first_frame, CV_RGB2GRAY);
                first_flag=0;
            }
            cvtColor(frame, current_gray, CV_RGB2GRAY);
            absdiff(first_frame,current_gray,fore_frame);
            if(!gotHand){
                getHand();
                if(gotHand){
                    ctInitFlag=1;
                }
                break;
            }
            // CT initialization
            if(ctInitFlag){
                for(int i=0;i<HANDNUM;i++)
                    ct[i].init(fore_frame, box[i]);
                ctInitFlag=0;
            }
            for(int i=0;i<HANDNUM;i++){
                ct[i].processFrame(fore_frame, box[i]);
                rectangle(frame, box[i], Scalar(rgb_b[i],rgb_g[i],rgb_r[i]));
            }
            flip(frame, frame, 1);
            break;
        }
    } else if (mode == 1){//only dispaly mode
        flip(frame, frame, 1);
    }

    qImg=MatToQImage(frame);
    ui->label->setPixmap(QPixmap::fromImage(qImg));
    ui->label->resize(ui->label->pixmap()->size());
}
コード例 #12
0
ファイル: waldogui.cpp プロジェクト: cbadillac/Where-s-Waldo
void WaldoGUI::on_pushButton_clicked()
{
    QString fileName = QFileDialog::getOpenFileName(this, tr("Abrir Imagen"), "./",
                                                    tr("Imagen (*.png);; All files (*.*)"));
    if(fileName == "") return;
    emit texto_listo(fileName);

    Mat waldoImage = imread(fileName.toStdString());

    viewImage = MatToQImage(waldoImage);

    if(viewImage.isNull()) {
        QMessageBox::information(this, tr("Error de carga!!"), tr("No se puede cargar %1. ").arg(fileName));

        return;
    }
    emit imagen_lista();

    //AGV Perdonanos porque no sabemos la que hacemos
    //Aca se encuentra el main del codigo

    Franjas franj;
    Mat franjResult;
    Mat templResult;

    franjResult = franj.run(waldoImage);
    emit franjasListas(franjResult);

    emit RunHistogram(franjResult);


    TemplateMatching templ;
    templResult = templ.run(franjResult);
    emit templateMatchListo(templResult);


    //HistogramModule<1>(&franjResult);
}
コード例 #13
0
void ProcessingThread::run()
{
    while(1)
    {
        // Check if paused
        pauseMutex.lock();
        if (paused)
        {
            pauseMutex.unlock();
            sleep(3);
            continue;
        }
        pauseMutex.unlock();

        /////////////////////////////////
        // Stop thread if stopped=TRUE //
        /////////////////////////////////
        stoppedMutex.lock();
        if (stopped)
        {
            stopped = false;
            stoppedMutex.unlock();
            break;
        }
        stoppedMutex.unlock();
        /////////////////////////////////
        /////////////////////////////////

        inputMutex.lock();
        if (inputMode != INPUT_IMAGE)
        {
            inputMutex.unlock();
            currentFrame = outputBuffer->getFrame();
        }
        else
        {
            inputMutex.unlock();
            if (outputBuffer->getSizeOfImageBuffer() > 0)
            {
                currentFrame = outputBuffer->getFrame();
            }
            msleep(50);
        }
        inputMutex.unlock();

        updM.lock();
        ////////////////////////////////////
        // PERFORM IMAGE PROCESSING BELOW //
        ////////////////////////////////////

        cv::Mat outputIm = currentFrame.clone();

        if (filters.flags[ImageProcessingFlags::ConvertColorspace])
        {
            switch (settings.colorSpace)
            {
            case 0:
            { // Gray
                cv::cvtColor(currentFrame,outputIm, CV_RGB2GRAY);
            } break;
            case 1:
            { // HSV
                cv::cvtColor(currentFrame,outputIm, CV_RGB2HSV);
            } break;
            case 3:
            { // Lba
                cv::cvtColor(currentFrame,outputIm, CV_RGB2Lab);
            } break;
            }
        }

        if (filters.flags[ImageProcessingFlags::SaltPepperNoise])
        {
            for (int i=0; i<settings.saltPepperNoiseDensity; i+=1)
            { // adding noise
                // generate randomly the col and row
                int m = qrand() % outputIm.rows;
                int n = qrand() % outputIm.cols;

                // generate randomly the value {black, white}
                int color_ = ((qrand() % 100) > 50) ? 255 : 0;

                if (outputIm.channels() == 1)
                { // gray-level image
                    outputIm.at<uchar>(m, n)= color_;
                }
                else if (outputIm.channels() == 3)
                { // color image
                    outputIm.at<cv::Vec3b>(m, n)[0]= color_;
                    outputIm.at<cv::Vec3b>(m, n)[1]= color_;
                    outputIm.at<cv::Vec3b>(m, n)[2]= color_;
                }
            }
        }

        if (filters.flags[ImageProcessingFlags::Dilate])
        {
            cv::dilate(outputIm,
                       outputIm,
                       cv::Mat(),
                       cv::Point(-1, -1),
                       settings.dilateIterations);
        }

        if (filters.flags[ImageProcessingFlags::Erode])
        {
            cv::erode(outputIm,
                      outputIm,
                      cv::Mat(),
                      cv::Point(-1, -1),
                      settings.erodeIterations);
        }

        if (filters.flags[ImageProcessingFlags::Open])
        {
            cv::morphologyEx(outputIm,
                             outputIm,
                             cv::MORPH_OPEN,
                             cv::Mat(),
                             cv::Point(-1, -1),
                             settings.openIterations);
        }

        if (filters.flags[ImageProcessingFlags::Close])
        {
            cv::morphologyEx(outputIm,
                             outputIm,
                             cv::MORPH_CLOSE,
                             cv::Mat(),
                             cv::Point(-1, -1),
                             settings.openIterations);
        }

        if (filters.flags[ImageProcessingFlags::Blur])
        {
            cv::GaussianBlur(outputIm,
                             outputIm,
                             cv::Size(settings.blurSize, settings.blurSize),
                             settings.blurSigma);
        }

        if (filters.flags[ImageProcessingFlags::Sobel])
        {
            int scale = 1;
            int delta = 0;
            int ddepth = CV_16S;

            // check the direction
            switch (settings.sobelDirection)
            {
            case 0:
            { // horizontal
                cv::Mat grad_x;
                cv::Sobel( outputIm, grad_x, ddepth, 1, 0, settings.sobelKernelSize, scale, delta, BORDER_DEFAULT );
                cv::convertScaleAbs( grad_x, outputIm );
            } break;
            case 1:
            { // vertical
                cv::Mat grad_y;
                cv::Sobel( outputIm, grad_y, ddepth, 0, 1, settings.sobelKernelSize, scale, delta, BORDER_DEFAULT );
                cv::convertScaleAbs( grad_y, outputIm );
            } break;
            case 2:
            { // both directions
                cv::Mat grad_x;
                cv::Mat grad_y;
                cv::Mat abs_grad_x;
                cv::Mat abs_grad_y;
                cv::Sobel( outputIm, grad_x, ddepth, 1, 0, settings.sobelKernelSize, scale, delta, BORDER_DEFAULT );
                cv::Sobel( outputIm, grad_y, ddepth, 0, 1, settings.sobelKernelSize, scale, delta, BORDER_DEFAULT );
                cv::convertScaleAbs( grad_x, abs_grad_x );
                cv::convertScaleAbs( grad_y, abs_grad_y );

                cv::addWeighted( abs_grad_x, 0.5, abs_grad_y, 0.5, 0, outputIm );
            } break;
            }
        }

        if (filters.flags[ImageProcessingFlags::Laplacian])
        {
            int scale = 1;
            int delta = 0;
            int ddepth = CV_16S;

            cv::Laplacian( outputIm, outputIm, ddepth, settings.laplacianKernelSize, scale, delta, BORDER_DEFAULT );
            cv::convertScaleAbs( outputIm, outputIm );
        }

        if (filters.flags[ImageProcessingFlags::SharpByKernel])
        {
            cv::Mat kernel(3,3,CV_32F,cv::Scalar(0));// init the kernel with zeros
            // assigns kernel values
            kernel.at<float>(1,1)= settings.sharpKernelCenter;
            kernel.at<float>(0,1)= -1.0;
            kernel.at<float>(2,1)= -1.0;
            kernel.at<float>(1,0)= -1.0;
            kernel.at<float>(1,2)= -1.0;
            //filter the image
            cv::filter2D(outputIm,outputIm,outputIm.depth(),kernel);
        }

        if (filters.flags[ImageProcessingFlags::EdgeDetection])
        { // with canny
            cv::Canny(outputIm, outputIm, settings.cannyLowThres, settings.cannyHighThres);
        }

        if (filters.flags[ImageProcessingFlags::LinesHough])
        {
            // Apply Canny algorithm
            cv::Mat contours;
            cv::Canny(outputIm,contours,125,350);

            // Hough tranform for line detection
            vector<cv::Vec2f> lines;
            cv::HoughLines(contours,lines, 1, PI/180, settings.linesHoughVotes);

            vector<cv::Vec2f>::const_iterator it= lines.begin();

            while (it!=lines.end())
            {
                float rho = (*it)[0]; // first element is distance rho
                float theta = (*it)[1]; // second element is angle theta
                if (theta < PI/4. || theta > 3.*PI/4.)
                {// ~vertical line
                    // point of intersection of the line with first row
                    cv::Point pt1(rho/cos(theta),0);
                    // point of intersection of the line with last row
                    cv::Point pt2((rho-contours.rows*sin(theta))/cos(theta),contours.rows);
                    // draw a white line
                    cv::line( outputIm, pt1, pt2, cv::Scalar(255), 1);
                }
                else
                { // ~horizontal line
                    // point of intersection of the line with first column
                    cv::Point pt1(0,rho/sin(theta));
                    // point of intersection of the line with last column
                    cv::Point pt2(contours.cols, (rho-contours.cols*cos(theta))/sin(theta));
                    // draw a white line
                    cv::line(outputIm, pt1, pt2, cv::Scalar(255), 1);
                }
                ++it;
            }
        }

        if (filters.flags[ImageProcessingFlags::CirclesHough])
        {
            cv::Mat temp;
            if (outputIm.channels() > 1)
            {
                cv::cvtColor(outputIm, temp, CV_RGB2GRAY);
            }
            else
            {
                temp = outputIm;
            }

            cv::GaussianBlur(temp, temp, cv::Size(5,5), 1.5);
            vector<cv::Vec3f> circles;

            cv::HoughCircles(temp, circles, CV_HOUGH_GRADIENT,
                            2,    // accumulator resolution (size of the image / 2)
                            50,   // minimum distance between two circles
                            200,  // Canny high threshold
                            60,   // minimum number of votes
                            settings.circlesHoughMin,
                            settings.circlesHoughMax);

            std::vector<cv::Vec3f>::const_iterator itc= circles.begin();
            while (itc!=circles.end())
            {
                cv::circle(outputIm,
                        cv::Point((*itc)[0], (*itc)[1]), // circle centre
                                (*itc)[2],               // circle radius
                                cv::Scalar(255),         // color
                                2);                      // thickness
                ++itc;
            }
        }

        if (filters.flags[ImageProcessingFlags::Countours])
        {
            cv::Mat temp;
            if (outputIm.channels() > 1)
            {
                cv::cvtColor(outputIm, temp, CV_RGB2GRAY);
            }
            else
            {
                temp = outputIm;
            }

            cv::blur(temp, temp, Size(3,3));
            cv::Canny(temp, temp, settings.contoursThres, settings.contoursThres+30);

            vector< vector<cv::Point> > contours;
            cv::findContours(temp,
                            contours,              // a vector of contours
                            CV_RETR_TREE,          // retrieve all contours, reconstructs a full hierarchy
                            CV_CHAIN_APPROX_NONE); // all pixels of each contours

            cv::drawContours(outputIm,contours,
                            -1,                        // draw all contours
                            cv::Scalar(255, 255, 255), // in white
                            1);                        // with a thickness of 1
        }

        if (filters.flags[ImageProcessingFlags::BoundingBox])
        {
            cv::Mat temp;
            if (outputIm.channels() > 1)
            {
                cv::cvtColor(outputIm, temp, CV_RGB2GRAY);
            }
            else
            {
                temp = outputIm;
            }

            cv::blur(temp, temp, Size(3,3));
            cv::Canny(temp, temp, settings.boundingBoxThres, settings.boundingBoxThres*2);

            vector< vector<cv::Point> > contours;
            cv::findContours(temp,
                            contours,              // a vector of contours
                            CV_RETR_TREE,          // retrieve all contours, reconstructs a full hierarchy
                            CV_CHAIN_APPROX_NONE); // all pixels of each contours

            vector< vector<cv::Point> >::iterator itc = contours.begin();
            while (itc != contours.end())
            {
                cv::Rect r0 = cv::boundingRect(cv::Mat(*itc));
                cv::rectangle(outputIm,r0,cv::Scalar(255, 0, 0), 2);
                ++itc;
            }
        }

        if (filters.flags[ImageProcessingFlags::enclosingCircle])
        {
            cv::Mat temp;
            if (outputIm.channels() > 1)
            {
                cv::cvtColor(outputIm, temp, CV_RGB2GRAY);
            }
            else
            {
                temp = outputIm;
            }

            cv::blur(temp, temp, Size(3,3));
            cv::Canny(temp, temp, settings.enclosingCircleThres, settings.enclosingCircleThres*2);

            vector< vector<cv::Point> > contours;
            cv::findContours(temp,
                            contours,              // a vector of contours
                            CV_RETR_TREE,          // retrieve all contours, reconstructs a full hierarchy
                            CV_CHAIN_APPROX_NONE); // all pixels of each contours

            vector< vector<cv::Point> >::iterator itc = contours.begin();
            while (itc != contours.end())
            {
                float radius;
                cv::Point2f center;
                cv::minEnclosingCircle(cv::Mat(*itc),center,radius);
                cv::circle(outputIm, center,
                        static_cast<int>(radius),
                        cv::Scalar(0, 255, 0),
                        2);
                ++itc;
            }
        }

        if (filters.flags[ImageProcessingFlags::harris])
        {
            cv::Mat temp;
            if (outputIm.channels() > 1)
            {
                cv::cvtColor(outputIm, temp, CV_RGB2GRAY);
            }
            else
            {
                temp = outputIm;
            }

            // Detector parameters
            int blockSize = 2;
            int apertureSize = 3;
            double k = 0.04;

            // Detecting corners
            cv::cornerHarris(temp, temp, blockSize, apertureSize, k, BORDER_DEFAULT);

            // Normalizing
            normalize(temp,temp, 0, 255, NORM_MINMAX, CV_32FC1, Mat());

            // Drawing a circle around corners
            for( int j = 0; j < temp.rows ; j++ )
            {
                for( int i = 0; i < temp.cols; i++ )
                {
                    if( (int) temp.at<float>(j,i) > settings.harrisCornerThres)
                    {
                        circle(outputIm, Point( i, j ), 5,  Scalar(0, 0 , 255), 2, 8, 0);
                    }
                }
            }
        }

        if (filters.flags[ImageProcessingFlags::FAST])
        {
            // vector of keypoints
            vector<cv::KeyPoint> keypoints;
            // Construction of the Fast feature detector object
            cv::FastFeatureDetector fast(settings.fastThreshold); // threshold for detection
            // feature point detection
            fast.detect(outputIm,keypoints);

            cv::drawKeypoints(outputIm, keypoints, outputIm, cv::Scalar(255,255,255), cv::DrawMatchesFlags::DRAW_OVER_OUTIMG);
        }

        if (filters.flags[ImageProcessingFlags::SURF])
        {
            // vector of keypoints
            vector<cv::KeyPoint> keypoints;
            // Construct the SURF feature detector object
            cv::SurfFeatureDetector surf((double) settings.surfThreshold); // threshold
            // Detect the SURF features
            surf.detect(outputIm,keypoints);

            // Draw the keypoints with scale and orientation information
            cv::drawKeypoints(outputIm, keypoints, outputIm, cv::Scalar(255,255,255),cv::DrawMatchesFlags::DRAW_RICH_KEYPOINTS);
        }

        if (filters.flags[ImageProcessingFlags::SIFT])
        {

            vector<cv::KeyPoint> keypoints;
            // Construct the SURF feature detector object
            cv::SiftFeatureDetector sift( settings.siftContrastThres,        // feature threshold
                                          (double) settings.siftEdgeThres); // threshold to reduce sens. to lines

            sift.detect(outputIm,keypoints);
            // Draw the keypoints with scale and orientation information
            cv::drawKeypoints(outputIm, keypoints, outputIm, cv::Scalar(255,255,255),cv::DrawMatchesFlags::DRAW_RICH_KEYPOINTS);
        }

        if (filters.flags[ImageProcessingFlags::EqualizeHistogram])
        {
            // converting the image to gray
            if (outputIm.channels() == 3)
            {
                vector<Mat> bgr_planes;
                split( outputIm, bgr_planes );
                equalizeHist( bgr_planes[0], bgr_planes[0] );
                equalizeHist( bgr_planes[1], bgr_planes[1] );
                equalizeHist( bgr_planes[2], bgr_planes[2] );
                merge( bgr_planes, outputIm );
            }
            else
            {
                equalizeHist( outputIm, outputIm );
            }
        }

        // Computing histogram
        if (filters.flags[ImageProcessingFlags::ComputeHistogram])
        {
            cv::Mat grayIm;
            cv::Mat hist;
            // converting the image to gray
            if (outputIm.channels() == 3)
            {
                 cv::cvtColor(outputIm,grayIm, CV_RGB2GRAY);
            }
            else
            {
                grayIm = outputIm;
            }

            int histSize = 256;           // number of bins
            float range [] = {0, 256};    // ranges
            const float* histRange = { range };
            bool uniform = true, accumulate = false;

            // compute histogram
            cv::calcHist(&grayIm,
                         1,  // using just one image
                         0,  // using just one layer
                         cv::Mat(),
                         hist,
                         1,
                         &histSize,
                         &histRange,
                         uniform,
                         accumulate);


            int hist_w = 691; int hist_h =161;
            Mat result( hist_h, hist_w, CV_8UC3, Scalar( 255,255,255) );
            int bin_w = cvRound( (double) hist_w/histSize );
            normalize(hist, hist, 0, result.rows, NORM_MINMAX, -1, Mat());

            /// Draw for each channel
            for( int i = 1; i < histSize; i++ )
            {
                line(result,
                     Point( bin_w*(i-1), hist_h - cvRound(hist.at<float>(i-1)) ),
                     Point( bin_w*(i), hist_h - cvRound(hist.at<float>(i)) ),
                     Scalar( 0, 0, 0), 2, 8, 0  );
            }
            // emit signal
            emit newProcessedHistogram(MatToQImage(result));
        }

        updM.unlock();

        processedFrame =  outputIm;
        // Inform GUI thread of new frame (QImage)
        emit newProcessedFrame(MatToQImage(outputIm));
    }
}
コード例 #14
0
void ProcessingThread::run()
{
    qDebug() << "Starting processing thread...";
    while(1)
    {
        ////////////////////////// /////// 
        // Stop thread if doStop=TRUE // 
        ////////////////////////// /////// 
        doStopMutex.lock();
        if(doStop)
        {
            doStop=false;
            doStopMutex.unlock();
            break;
        }
        doStopMutex.unlock();
        ////////////////////////// ////////
        ////////////////////////// ////////

        // Save processing time
        processingTime=t.elapsed();
        // Start timer (used to calculate processing rate)
        t.start();

        processingMutex.lock();
        // Get frame from queue, store in currentFrame, set ROI
        currentFrame=Mat(sharedImageBuffer->getByDeviceNumber(deviceNumber)->get().clone(), currentROI);

        ////////////////////////// ///////// // 
        // PERFORM IMAGE PROCESSING BELOW // 
        ////////////////////////// ///////// // 

        // Grayscale conversion (in-place operation)
       if(imgProcFlags.grayscaleOn && (currentFrame.channels() == 3 || currentFrame.channels() == 4)) {
           cvtColor(currentFrame, currentFrame, CV_BGR2GRAY, 1);
       }

       // Save the original Frame after grayscale conversion, so VideoWriter works correct
       if(emitOriginal || captureOriginal)
           originalFrame = currentFrame.clone();

       // Fill Buffer that is processed by Magnificator
       fillProcessingBuffer();

       if (processingBufferFilled()) {
           if(imgProcFlags.colorMagnifyOn)
           {
               magnificator.colorMagnify();
               currentFrame = magnificator.getFrameLast();
           }
           else if(imgProcFlags.laplaceMagnifyOn)

           {
               magnificator.laplaceMagnify();
               currentFrame = magnificator.getFrameLast();
           }
           else if(imgProcFlags.waveletMagnifyOn)
           {
               magnificator.waveletMagnify();
               currentFrame = magnificator.getFrameLast();
           }
           else
               processingBuffer.erase(processingBuffer.begin());
       }

        ////////////////////////// ///////// // 
        // PERFORM IMAGE PROCESSING ABOVE // 
        ////////////////////////// ///////// // 

        // Convert Mat to QImage
        frame=MatToQImage(currentFrame);

        processingMutex.unlock();

        // Save the Stream
        if(doRecord) { 
            if(output.isOpened()) { 
                if(captureOriginal) {

                    processingMutex.lock();
                    // Combine original and processed frame
                    combinedFrame = combineFrames(currentFrame,originalFrame);
                    processingMutex.unlock();

                    output.write(combinedFrame);
                }
                else {
                    output.write(currentFrame);
                }

                framesWritten++;
                emit frameWritten(framesWritten);
            }
        }

        // Emit the original image before converting to grayscale
       if(emitOriginal)
           emit origFrame(MatToQImage(originalFrame));
        // Inform GUI thread of new frame (QImage)
        // emit newFrame(frame);
        emit newFrame(MatToQImage(currentFrame));

        // Update statistics
        updateFPS(processingTime);
        statsData.nFramesProcessed++;
        // Inform GUI of updated statistics
        emit updateStatisticsInGUI(statsData);
    }
    qDebug() << "Stopping processing thread...";
}
/**
 * 五组原图显示函数
 */
bool ImageViewer::updateImages()
{
    if (!hasinitmodel)
        return false;

    QString tmpcamid;
    bool ret = true;
    QString tmpcamimgdir;

    QLabel * tmpcamimg1 = NULL;
    QLabel * tmpcamimg2 = NULL;
    QLabel * tmpcamimg3 = NULL;
    QLabel * tmpcamimg4 = NULL;
    QLabel * tmpcamimg5 = NULL;
    QLabel * tmpcamimg = NULL;
    tmphasfc1 = false;
    tmphasfc2 = false;
    tmphasfc3 = false;
    tmphasfc4 = false;
    tmphasfc5 = false;

    for (int i = 0; i < 5; i++)
    {
        switch (i)
        {
            case 0: tmpcamid = ui->cam1->currentText(); 
                tmpcamimg1 = ui->cam1img1;
                tmpcamimg2 = ui->cam1img2;
                tmpcamimg3 = ui->cam1img3;
                tmpcamimg4 = ui->cam1img4;
                tmpcamimg5 = ui->cam1img5;
                break;
            case 1: tmpcamid = ui->cam2->currentText(); 
                tmpcamimg1 = ui->cam2img1;
                tmpcamimg2 = ui->cam2img2;
                tmpcamimg3 = ui->cam2img3;
                tmpcamimg4 = ui->cam2img4;
                tmpcamimg5 = ui->cam2img5;
                break;
            case 2: tmpcamid = ui->cam3->currentText(); 
                tmpcamimg1 = ui->cam3img1;
                tmpcamimg2 = ui->cam3img2;
                tmpcamimg3 = ui->cam3img3;
                tmpcamimg4 = ui->cam3img4;
                tmpcamimg5 = ui->cam3img5;
                break;
            case 3: tmpcamid = ui->cam4->currentText(); 
                tmpcamimg1 = ui->cam4img1;
                tmpcamimg2 = ui->cam4img2;
                tmpcamimg3 = ui->cam4img3;
                tmpcamimg4 = ui->cam4img4;
                tmpcamimg5 = ui->cam4img5;
                break;
            case 4: tmpcamid = ui->cam5->currentText(); 
                tmpcamimg1 = ui->cam5img1;
                tmpcamimg2 = ui->cam5img2;
                tmpcamimg3 = ui->cam5img3;
                tmpcamimg4 = ui->cam5img4;
                tmpcamimg5 = ui->cam5img5;
                break;
            default:break;
        }

        __int64 & tmpcurrentfc = getTmpCurrentfc(i);

        if (tmpcamid.trimmed().compare("") == 0)
        {
            tmpcamimg1->clear();
            tmpcamimg2->clear();
            tmpcamimg3->clear();
            tmpcamimg4->clear();
            tmpcamimg5->clear();
            continue;
        }
        tmpcamimgdir = currenttmpimgpath + "_" + tmpcamid + "/";

        // 每个相机的5张图
        for (int j = -2; j < 3; j++)
        {
            switch (j)
            {
                case -2: tmpcamimg = tmpcamimg1; break;
                case -1: tmpcamimg = tmpcamimg2; break;
                case 0: tmpcamimg = tmpcamimg3; break;
                case 1: tmpcamimg = tmpcamimg4; break;
                case 2: tmpcamimg = tmpcamimg5; break;
                default:break;
            }

            QString filename3 = (QString("%1/%2.jpg").arg(tmpcamimgdir).arg(tmpcurrentfc + j));
            if (!QFile(filename3).exists())
            {
                // 清空显示
                tmpcamimg->clear();
                qDebug() << QObject::tr("图片目录:") << filename3 << QObject::tr("不存在!");

                ret = false;
                continue;
            }
            qDebug() << QObject::tr("图片目录:") << filename3;

            bool & tmphasfc = getTmphasfc(j + 2);
            tmphasfc = true;

            // 做图像增强处理
            QByteArray tmpba = filename3.toLocal8Bit();
            cv::Mat img = cv::imread(tmpba.constData(), CV_LOAD_IMAGE_GRAYSCALE);
            float sclae = 0.3; // 缩放比例
            cv::Size size = cv::Size(img.size().width * sclae, img.size().height * sclae);
            cv::resize(img, img, size);
            //cv::imshow("win1",img);
            //cv::waitKey(0);
            if (ifimgenhance == 2)
            {
                
                double alpha = 2;  
                double beta = 50; 

                // 调整对比度,RGB调整,速度慢
                /*
                result = Mat::zeros(img.size(),img.type()); 
                for (int i = 0;i<img.rows;++i)  
                    for(int j= 0;j<img.cols;++j)  
                        for (int k = 0; k < 3; k++)
                            result.at(i,j)[k] = cv::saturate_cast<uchar>(img.at(i,j)[k]*alpha+beta);  
                cv::cvtColor(result,result, CV_BGRA2GRAY);
                */

                // 调整对比度,灰度调整,速度快
                cv::Mat result = cv::Mat(size, CV_8U);
                for( int i=0;i<img.rows;i++)  
                {  
                    uchar* dataimg = img.ptr<uchar>(i);
                    uchar* dataresult = result.ptr<uchar>(i);  
                    for(int j=0;j<img.cols;j++)  
                    {  
                        dataresult[j] = cv::saturate_cast<uchar>(dataimg[j]*alpha+beta);   
                    }  
                } 
                bigpm = MatToQImage(result);
            
                // 直方图均衡化---效果较差
                //cv::equalizeHist(img, result);
                //cv::imwrite("D:\\2.png", result);
                //cv::imshow("win1",result);
                //cv::waitKey(0);
            }
            else if (ifimgenhance == 1)
            {
                // 直方图规定化处理---By Ding
                cv::Mat result;
                LzCalculator::argument(img, result); 
                //cv::GaussianBlur(result, result, Size(5,5), 1.5, 1.5);
                bigpm = MatToQImage(result);
            }
            else
            {
                bigpm = MatToQImage(img);
            }
            
            smallpm = bigpm.scaled(TN_WIDTH, TN_HEIGHT, Qt::KeepAspectRatio);
            tmpcamimg->setPixmap(QPixmap::fromImage(smallpm));
            tmpcamimg->show();
        }
    }

    qDebug() << "has fc?: " << tmphasfc1 << tmphasfc2 << tmphasfc3 << tmphasfc4 << tmphasfc5;

    QLabel * tmpfclabel = NULL;
    QLabel * tmpmilelabel = NULL;

    // 每个相机的masterfc号
    for (int j = -2; j < 3; j++)
    {
        switch (j)
        {
            case -2: tmpfclabel = ui->fc1; tmpmilelabel = ui->mile1; break;
            case -1: tmpfclabel = ui->fc2; tmpmilelabel = ui->mile2; break;
            case 0: tmpfclabel = ui->fc3; tmpmilelabel = ui->mile3; break;
            case 1: tmpfclabel = ui->fc4; tmpmilelabel = ui->mile4; break;
            case 2: tmpfclabel = ui->fc5; tmpmilelabel = ui->mile5; break;
            default:break;
        }
        bool & tmphasfc = getTmphasfc(j + 2);
        if (tmphasfc)
        {
            tmpfclabel->setText(QString("%1").arg(current_fc_master + j));
            tmpmilelabel->setText(QString("%1").arg(current_fc_master + j));
        }
        else
        {
            tmpfclabel->setText(QString("%1无数据").arg(current_fc_master + j));
            tmpmilelabel->setText(QString("%1无数据").arg(current_fc_master + j));
        }
    }

    // 强制更新显示
    this->repaint();
    
    return ret;
}
コード例 #16
0
void ProcessingThread::run()
{
    while(1)
    {
        /////////////////////////////////
        // Stop thread if stopped=TRUE //
        /////////////////////////////////
        stoppedMutex.lock();
        if (stopped)
        {
            stopped=false;
            stoppedMutex.unlock();
            break;
        }
        stoppedMutex.unlock();
        /////////////////////////////////
        /////////////////////////////////

        // Save processing time
        processingTime=t.elapsed();
        // Start timer (used to calculate processing rate)
        t.start();

        // Get frame from queue, store in currentFrame, set ROI
        currentFrame=Mat(imageBuffer->getFrame(),currentROI);

        updateMembersMutex.lock();
        ///////////////////
        // PERFORM TASKS //
        ///////////////////
        // Note: ROI changes will take effect on next frame
        if(resetROIFlag)
            resetROI();
        else if(setROIFlag)
            setROI();
        ////////////////////////////////////
        // PERFORM IMAGE PROCESSING BELOW //
        ////////////////////////////////////
        // Grayscale conversion
        if(grayscaleOn)
            cvtColor(currentFrame,currentFrameGrayscale,CV_BGR2GRAY);
        // Smooth (in-place operations)
        if(smoothOn)
        {
            if(grayscaleOn)
            {
                switch(smoothType)
                {
                    // BLUR
                    case 0:
                        blur(currentFrameGrayscale,currentFrameGrayscale,Size(smoothParam1,smoothParam2));
                        break;
                    // GAUSSIAN
                    case 1:
                        GaussianBlur(currentFrameGrayscale,currentFrameGrayscale,Size(smoothParam1,smoothParam2),smoothParam3,smoothParam4);
                        break;
                    // MEDIAN
                    case 2:
                        medianBlur(currentFrameGrayscale,currentFrameGrayscale,smoothParam1);
                        break;
                }
            }
            else
            {
                switch(smoothType)
                {
                    // BLUR
                    case 0:
                        blur(currentFrame,currentFrame,Size(smoothParam1,smoothParam2));
                        break;
                    // GAUSSIAN
                    case 1:
                        GaussianBlur(currentFrame,currentFrame,Size(smoothParam1,smoothParam2),smoothParam3,smoothParam4);
                        break;
                    // MEDIAN
                    case 2:
                        medianBlur(currentFrame,currentFrame,smoothParam1);
                        break;
                }
            }
        }
        // Dilate
        if(dilateOn)
        {
            if(grayscaleOn)
                dilate(currentFrameGrayscale,currentFrameGrayscale,Mat(),Point(-1,-1),dilateNumberOfIterations);
            else
                dilate(currentFrame,currentFrame,Mat(),Point(-1,-1),dilateNumberOfIterations);
        }
        // Erode
        if(erodeOn)
        {
            if(grayscaleOn)
                erode(currentFrameGrayscale,currentFrameGrayscale,Mat(),Point(-1,-1),erodeNumberOfIterations);
            else
                erode(currentFrame,currentFrame,Mat(),Point(-1,-1),erodeNumberOfIterations);
        }
        // Flip
        if(flipOn)
        {
            if(grayscaleOn)
                flip(currentFrameGrayscale,currentFrameGrayscale,flipCode);
            else
                flip(currentFrame,currentFrame,flipCode);
        }
        // Canny edge detection
        if(cannyOn)
        {
            // Frame must be converted to grayscale first if grayscale conversion is OFF
            if(!grayscaleOn)
                cvtColor(currentFrame,currentFrameGrayscale,CV_BGR2GRAY);

            Canny(currentFrameGrayscale,currentFrameGrayscale,
                  cannyThreshold1,cannyThreshold2,
                  cannyApertureSize,cannyL2gradient);
        }
        ////////////////////////////////////
        // PERFORM IMAGE PROCESSING ABOVE //
        ////////////////////////////////////

        // Convert Mat to QImage: Show grayscale frame [if either Grayscale or Canny processing modes are ON]
        if(grayscaleOn||cannyOn)
            frame=MatToQImage(currentFrameGrayscale);
        // Convert Mat to QImage: Show BGR frame
        else
            frame=MatToQImage(currentFrame);
        updateMembersMutex.unlock();

        // Update statistics
        updateFPS(processingTime);
        currentSizeOfBuffer=imageBuffer->getSizeOfImageBuffer();
        // Inform GUI thread of new frame (QImage)
        emit newFrame(frame);
    }
    qDebug() << "Stopping processing thread...";
}