예제 #1
0
void drawLine(ImgRGB& outImg, double l[], uchar r, uchar g, uchar b) {
	double x1, y1, x2, y2;
	getEndPoints(l, outImg.h - 1, outImg.w - 1, x1, y1, x2, y2);
	cv::Mat tmpImg(outImg.h, outImg.w, CV_8UC3, outImg.data);
	cv::line(tmpImg, cv::Point2f(x1, y1), cv::Point2f(x2, y2),
			cv::Scalar(r, g, b));
}
예제 #2
0
void MainWindow::loadImage(QString filepath){
    QFile file(filepath);
    if(file.exists()){
        QString filename = filepath.split("/").back();
        QString filetype = filename.split(".").back().toUpper();
        int bw_ratio = ui->ratio_slider->value();
        QImage tmpImg(filepath, filetype.toStdString().c_str());
        origin = new QImage(tmpImg.scaled(QSize(NT, NT)));
        // transfer it to grayscale first
        gray2bin(bw_ratio);
    } else {
        //qDebug() << "Error: file does not exists!";
    }
}
예제 #3
0
void KoReportItemMaps::requestRedraw()
{
    myDebug() << sender();
    QImage tmpImg(*m_mapImage);
    Marble::MarbleModel* marbleModel = dynamic_cast<Marble::MarbleModel*>(sender());
    OroIds *oroIds = &m_marbleImgs[marbleModel];
    oroIds->marbleWidget->render(&tmpImg);
    if(oroIds->pageId)
        oroIds->pageId->setImage(tmpImg);
    if(oroIds->sectionId)
        oroIds->sectionId->setImage(tmpImg);
    myDebug() << "pageId sectionId marbleWidget";
    myDebug() << oroIds->pageId << oroIds->sectionId << oroIds->marbleWidget;
}
예제 #4
0
//==================================================================
void SaveJPEG( const Image &img, const char *pFName )
{
	// make sure it's RGB
	if ( !img.IsSamplesNames( "rgb" ) || img.IsSamplesType( Image::ST_U8 ) )
	{
		Image	tmpImg( img.mWd, img.mHe, 3, Image::ST_U8, -1, "rgb" );
		DIMG::ConvertImages( tmpImg, img );

		write_JPEG_file( pFName, 100, (const JSAMPLE *)tmpImg.GetPixelPtrR(0,0), img.mWd, img.mHe );
	}
	else
	{
		write_JPEG_file( pFName, 100, (const JSAMPLE *)img.GetPixelPtrR(0,0), img.mWd, img.mHe );
	}
}
예제 #5
0
void BlitzMainWindow::openFile(const QString &filename)
{
    setWindowTitle(filename);
    QImage tmpImg(filename);
    if(tmpImg.isNull()){
        QMessageBox::warning(this, tr("File Open Error"),
                             tr("Unable to open image."));
        return;
    }
    img = tmpImg;
    qWarning("Loaded image: %s, (%dx%d), format: %d", (const char *)
             filename.toLatin1(), img.width(), img.height(), img.format());
    fn = filename;
    lbl->setPixmap(QPixmap::fromImage(img));
    if(lbl->sizeHint() != lbl->size())
        lbl->resize(lbl->sizeHint());
    else
        lbl->update();
}
예제 #6
0
void drawKeyPoints(const ImgG& img, const Mat_d& corners, ImgRGB& rgb, uchar r,
		uchar g, uchar b) {
	int nPts = corners.rows;
	if (nPts == 0)
		return;

	cv::Mat cvImg(img.rows, img.cols, CV_8UC1, img.data);
	cv::Mat tmpImg(img.rows, img.cols, CV_8UC3);

	cv::cvtColor(cvImg, tmpImg, CV_GRAY2RGB);

	std::vector<cv::KeyPoint> cvPts;
	cvPts.reserve(nPts);
	for (int i = 0; i < nPts; i++) {
		double x = corners.data[2 * i];
		double y = corners.data[2 * i + 1];
		cv::circle(tmpImg, cv::Point(x, y), 3, cv::Scalar(r, g, b), 1, CV_AA);
	}

	CvMat cvtmpImg = tmpImg;
	rgb.resize(img.cols, img.rows);
	memcpy(rgb.data, cvtmpImg.data.ptr, 3 * img.rows * img.cols);
}
예제 #7
0
void showAnCalChrRes::updColSenHoriz(){
    //Accumulate values in each color
    //..
    QGraphicsView *tmpCanvas = ui->canvasCroped;

    tmpCanvas->scene()->clear();
    QImage tmpImg( "./tmpImages/tmpCropped.ppm" );
    int Red[tmpImg.width()];memset(Red,'\0',tmpImg.width());
    int Green[tmpImg.width()];memset(Green,'\0',tmpImg.width());
    int Blue[tmpImg.width()];memset(Blue,'\0',tmpImg.width());
    int r, c, maxR, maxG, maxB, xR, xG, xB;
    maxR = 0;
    maxG = 0;
    maxB = 0;
    xR   = 0;
    xG   = 0;
    xB   = 0;
    QRgb pixel;
    for(c=0;c<tmpImg.width();c++){
        Red[c]   = 0;
        Green[c] = 0;
        Blue[c]  = 0;
        for(r=0;r<tmpImg.height();r++){
            if(!tmpImg.valid(QPoint(c,r))){
                qDebug() << "Invalid r: " << r << "c: "<<c;
                qDebug() << "tmpImg.width(): " << tmpImg.width();
                qDebug() << "tmpImg.height(): " << tmpImg.height();
                return (void)NULL;
                close();
            }
            pixel     = tmpImg.pixel(QPoint(c,r));
            Red[c]   += qRed(pixel);
            Green[c] += qGreen(pixel);
            Blue[c]  += qBlue(pixel);
        }
        Red[c]   = round((float)Red[c]/tmpImg.height());
        Green[c] = round((float)Green[c]/tmpImg.height());
        Blue[c]  = round((float)Blue[c]/tmpImg.height());
        if( Red[c] > maxR ){
            maxR = Red[c];
            xR = c;
        }
        if( Green[c] > maxG ){
            maxG = Green[c];
            xG = c;
        }
        if( Blue[c] > maxB ){
            maxB = Blue[c];
            xB = c;
        }
        //qDebug() << "xR: " << xR << "xG: " << xG << "xB: " << xB;
    }
    int maxRGB = (maxR>maxG)?maxR:maxG;
    maxRGB = (maxB>maxRGB)?maxB:maxRGB;
    float upLimit = (float)tmpImg.height() * 0.7;

    //qDebug() << "c" << c << "maxR:"<<maxR<<" maxG:"<<maxG<<" maxB:"<<maxB;
    //qDebug() << "c" << c << "xR:"<<xR<<" xG:"<<xG<<" xB:"<<xB;
    //qDebug() << "tmpImg.width(): " << tmpImg.width();
    //qDebug() << "tmpImg.height(): " << tmpImg.height();

    //Draw camera's sensitivities
    //..
    int tmpPoint1, tmpPoint2, tmpHeight;
    tmpHeight = tmpImg.height();
    for(c=1;c<tmpImg.width();c++){
        if( ui->chbRed->isChecked() ){
            tmpPoint1 = ((float)Red[c-1]/((float)maxRGB)) * upLimit;
            tmpPoint2 = ((float)Red[c]/((float)maxRGB)) * upLimit;
            tmpPoint1 = tmpHeight - tmpPoint1;
            tmpPoint2 = tmpHeight - tmpPoint2;
            tmpCanvas->scene()->addLine( c, tmpPoint1, c+1, tmpPoint2, QPen(QColor("#FF0000")) );
        }
        if( ui->chbGreen->isChecked() ){
            tmpPoint1 = ((float)Green[c-1]/((float)maxRGB)) * upLimit;
            tmpPoint2 = ((float)Green[c]/((float)maxRGB)) * upLimit;
            tmpPoint1 = tmpHeight - tmpPoint1;
            tmpPoint2 = tmpHeight - tmpPoint2;
            tmpCanvas->scene()->addLine( c, tmpPoint1, c+1, tmpPoint2, QPen(QColor("#00FF00")) );            
        }
        if( ui->chbBlue->isChecked() ){
            tmpPoint1 = ((float)Blue[c-1]/((float)maxRGB)) * upLimit;
            tmpPoint2 = ((float)Blue[c]/((float)maxRGB)) * upLimit;
            tmpPoint1 = tmpHeight - tmpPoint1;
            tmpPoint2 = tmpHeight - tmpPoint2;
            tmpCanvas->scene()->addLine( c, tmpPoint1, c+1, tmpPoint2, QPen(QColor("#0000FF")) );
        }
    }

    //Draw RGB peaks
    //..
    addLine2CanvasInPos(true,xR,Qt::red);
    globalRedLine = globalTmpLine;
    addLine2CanvasInPos(true,xG,Qt::green);
    globalGreenLine = globalTmpLine;
    addLine2CanvasInPos(true,xB,Qt::blue);
    globalBlueLine = globalTmpLine;

    globalRedLine->parameters.orientation   = _VERTICAL;
    globalGreenLine->parameters.orientation = _VERTICAL;
    globalBlueLine->parameters.orientation  = _VERTICAL;

    /*
    QPoint p1,p2;
    p1.setX(0);
    p1.setY(0);
    p2.setX(0);
    p2.setY(tmpImg.height());
    customLine *redPeak = new customLine(p1,p2,QPen(Qt::red));
    customLine *greenPeak = new customLine(p1,p2,QPen(Qt::green));
    customLine *bluePeak = new customLine(p1,p2,QPen(Qt::blue));

    redPeak->setX(xR);
    greenPeak->setX(xG);
    bluePeak->setX(xB);
    if(ui->chbRedLine->isChecked()){
        tmpCanvas->scene()->addItem(redPeak);
    }
    if(ui->chbGreenLine->isChecked()){
        tmpCanvas->scene()->addItem(greenPeak);
    }
    if(ui->chbBlueLine->isChecked()){
        tmpCanvas->scene()->addItem(bluePeak);
    }
    globalRedLine = redPeak;
    globalGreenLine = greenPeak;
    globalBlueLine = bluePeak;
    */
}
예제 #8
0
int video_thread(void *arg)
{
    ZW_LOG_WARNING(QString("TTTTTTTV"));
    VideoState *is = (VideoState *) arg;
    AVPacket pkt1, *packet = &pkt1;

    int ret, got_picture, numBytes;

    double video_pts = 0; //当前视频的pts
    double audio_pts = 0; //音频pts


    ///解码视频相关
    AVFrame *pFrame, *pFrameRGB;
    uint8_t *out_buffer_rgb; //解码后的rgb数据
    struct SwsContext *img_convert_ctx;  //用于解码后的视频格式转换

    AVCodecContext *pCodecCtx = is->video_st->codec; //视频解码器

    pFrame = av_frame_alloc();
    pFrameRGB = av_frame_alloc();

    ///这里我们改成了 将解码后的YUV数据转换成RGB32
    img_convert_ctx = sws_getContext(pCodecCtx->width, pCodecCtx->height,
            pCodecCtx->pix_fmt, pCodecCtx->width, pCodecCtx->height,
            AV_PIX_FMT_RGB32, SWS_BICUBIC, NULL, NULL, NULL);

    numBytes = avpicture_get_size(AV_PIX_FMT_RGB32, pCodecCtx->width,pCodecCtx->height);

    out_buffer_rgb = (uint8_t *) av_malloc(numBytes * sizeof(uint8_t));
    avpicture_fill((AVPicture *) pFrameRGB, out_buffer_rgb, AV_PIX_FMT_RGB32,
            pCodecCtx->width, pCodecCtx->height);

    while(1)
    {
        if (is->quit)
        {
            ZW_LOG_WARNING(QString("TTTTTTTV"));
            break;
        }

        if (is->isPause == true) //判断暂停
        {
            ZW_LOG_WARNING(QString("TTTTTTTV"));
            SDL_Delay(10);
            continue;
        }

        if (packet_queue_get(&is->videoq, packet, 0) <= 0)
        {
            ZW_LOG_WARNING(QString("TTTTTTTV"));
            if (is->readFinished)
            {//队列里面没有数据了且读取完毕了
                ZW_LOG_WARNING(QString("TTTTTTTV"));
                break;
            }
            else
            {
                ZW_LOG_WARNING(QString("TTTTTTTV"));
                SDL_Delay(1); //队列只是暂时没有数据而已
                continue;
            }
        }
        ZW_LOG_WARNING(QString("TTTTTTTV"));

        //收到这个数据 说明刚刚执行过跳转 现在需要把解码器的数据 清除一下
        if(strcmp((char*)packet->data,FLUSH_DATA) == 0)
        {
            avcodec_flush_buffers(is->video_st->codec);
            av_free_packet(packet);
            continue;
        }
        ZW_LOG_WARNING(QString("TTTTTTTV"));

        ret = avcodec_decode_video2(pCodecCtx, pFrame, &got_picture,packet);

        if (ret < 0) {
            qDebug()<<"decode error.\n";
            av_free_packet(packet);
            continue;
        }
        ZW_LOG_WARNING(QString("TTTTTTTV"));

        if (packet->dts == AV_NOPTS_VALUE && pFrame->opaque&& *(uint64_t*) pFrame->opaque != AV_NOPTS_VALUE)
        {
            video_pts = *(uint64_t *) pFrame->opaque;
        }
        else if (packet->dts != AV_NOPTS_VALUE)
        {
            video_pts = packet->dts;
        }
        else
        {
            video_pts = 0;
        }
        ZW_LOG_WARNING(QString("TTTTTTTV"));

        video_pts *= av_q2d(is->video_st->time_base);
        video_pts = synchronize_video(is, pFrame, video_pts);

        if (is->seek_flag_video)
        {
            //发生了跳转 则跳过关键帧到目的时间的这几帧
           if (video_pts < is->seek_time)
           {
               ZW_LOG_WARNING(QString("TTTTTTTV"));
               av_free_packet(packet);
               continue;
           }
           else
           {
               ZW_LOG_WARNING(QString("TTTTTTTV"));
               is->seek_flag_video = 0;
           }
        }

        while(1)
        {
            if (is->quit)
            {
                ZW_LOG_WARNING(QString("TTTTTTTV"));
                break;
            }

            audio_pts = is->audio_clock;

            //主要是 跳转的时候 我们把video_clock设置成0了
            //因此这里需要更新video_pts
            //否则当从后面跳转到前面的时候 会卡在这里
            video_pts = is->video_clock;
            ZW_LOG_WARNING(QString("TTTTTTTVaudio_pts=%1,video_pts=%2").arg(audio_pts).arg(video_pts));


            if (video_pts <= audio_pts) break;
//            if (video_pts >= audio_pts) break;

            int delayTime = (video_pts - audio_pts) * 1000;

            delayTime = delayTime > 5 ? 5:delayTime;
            ZW_LOG_WARNING(QString("TTTTTTTV"));

            SDL_Delay(delayTime);
        }

        if (got_picture) {
            sws_scale(img_convert_ctx,
                    (uint8_t const * const *) pFrame->data,
                    pFrame->linesize, 0, pCodecCtx->height, pFrameRGB->data,
                    pFrameRGB->linesize);

            //把这个RGB数据 用QImage加载
            QImage tmpImg((uchar *)out_buffer_rgb,pCodecCtx->width,pCodecCtx->height,QImage::Format_RGB32);
            QImage image = tmpImg.copy(); //把图像复制一份 传递给界面显示
            ZW_LOG_WARNING(QString("TTTTTTTV"));
            is->player->disPlayVideo(image); //调用激发信号的函数
        }
        ZW_LOG_WARNING(QString("TTTTTTTV"));

        av_free_packet(packet);
        ZW_LOG_WARNING(QString("TTTTTTTV"));

    }

    av_free(pFrame);
    av_free(pFrameRGB);
    av_free(out_buffer_rgb);

    if (!is->quit)
    {
        ZW_LOG_WARNING(QString("TTTTTTTV"));
        is->quit = true;
    }

    ZW_LOG_WARNING(QString("TTTTTTTV"));
    is->videoThreadFinished = true;

    return 0;
}
예제 #9
0
void drwnTextonFilterBank::filter(const cv::Mat& img, std::vector<cv::Mat>& response) const
{
    // check input
    DRWN_ASSERT(img.data != NULL);
    if (response.empty()) {
        response.resize(NUM_FILTERS);
    }
    DRWN_ASSERT((int)response.size() == NUM_FILTERS);
    DRWN_ASSERT((img.channels() == 3) && (img.depth() == CV_8U));
    for (int i = 0; i < NUM_FILTERS; i++) {
        if ((response[i].rows != img.rows) || (response[i].cols != img.cols)) {
            response[i] = cv::Mat(img.rows, img.cols, CV_32FC1);
	}
        DRWN_ASSERT((response[i].channels() == 1) && (response[i].depth() == CV_32F));
    }

    int k = 0;

    // color convert
    DRWN_LOG_DEBUG("Color converting image...");
    cv::Mat imgCIELab8U(img.rows, img.cols, CV_8UC3);
    cv::cvtColor(img, imgCIELab8U, CV_BGR2Lab);
    cv::Mat imgCIELab(img.rows, img.cols, CV_32FC3);
    imgCIELab8U.convertTo(imgCIELab, CV_32F, 1.0 / 255.0);

    cv::Mat greyImg(img.rows, img.cols, CV_32FC1);
    const int from_to[] = {0, 0};
    cv::mixChannels(&imgCIELab, 1, &greyImg, 1, from_to, 1);

    // gaussian filter on all color channels
    DRWN_LOG_DEBUG("Generating gaussian filter responses...");
    cv::Mat gImg32f(img.rows, img.cols, CV_32FC3);
    for (double sigma = 1.0; sigma <= 4.0; sigma *= 2.0) {
        const int h = 2 * (int)(_kappa * sigma) + 1;
        cv::GaussianBlur(imgCIELab, gImg32f, cv::Size(h, h), 0);
        cv::split(gImg32f, &response[k]);
        k += 3;
    }

    // derivatives of gaussians on just greyscale image
    DRWN_LOG_DEBUG("Generating derivative of gaussian filter responses...");
    for (double sigma = 2.0; sigma <= 4.0; sigma *= 2.0) {
        // x-direction
        cv::Sobel(greyImg, response[k++], CV_32F, 1, 0, 1);
        cv::GaussianBlur(response[k - 1], response[k - 1],
            cv::Size(2 * (int)(_kappa * sigma) + 1, 2 * (int)(3.0 * _kappa * sigma) + 1), 0);

        // y-direction
        cv::Sobel(greyImg, response[k++], CV_32F, 0, 1, 1);
        cv::GaussianBlur(response[k - 1], response[k - 1],
            cv::Size(2 * (int)(3.0 * _kappa * sigma) + 1, 2 * (int)(_kappa * sigma) + 1), 0);
    }

    // laplacian of gaussian on just greyscale image
    DRWN_LOG_DEBUG("Generating laplacian of gaussian filter responses...");
    cv::Mat tmpImg(img.rows, img.cols, CV_32FC1);
    for (double sigma = 1.0; sigma <= 8.0; sigma *= 2.0) {
        const int h = 2 * (int)(_kappa * sigma) + 1;
        cv::GaussianBlur(greyImg, tmpImg, cv::Size(h, h), 0);
        cv::Laplacian(tmpImg, response[k++], CV_32F, 3);
    }

    DRWN_ASSERT_MSG(k == NUM_FILTERS, k << " != " << NUM_FILTERS);
}