コード例 #1
0
void QgsEffectStackPropertiesWidget::updatePreview()
{
  QPainter painter;
  QImage previewImage( 150, 150, QImage::Format_ARGB32 );
  previewImage.fill( Qt::transparent );
  painter.begin( &previewImage );
  painter.setRenderHint( QPainter::Antialiasing );
  QgsRenderContext context = QgsSymbolLayerV2Utils::createRenderContext( &painter );
  if ( !mPreviewPicture )
  {
    QPicture previewPic;
    QPainter previewPicPainter;
    previewPicPainter.begin( &previewPic );
    previewPicPainter.setPen( Qt::red );
    previewPicPainter.setBrush( QColor( 255, 100, 100, 255 ) );
    previewPicPainter.drawEllipse( QPoint( 75, 75 ), 30, 30 );
    previewPicPainter.end();
    mStack->render( previewPic, context );
  }
  else
  {
    context.painter()->translate( 35, 35 );
    mStack->render( *mPreviewPicture, context );
  }
  painter.end();

  lblPreview->setPixmap( QPixmap::fromImage( previewImage ) );
}
コード例 #2
0
ファイル: fqterm_http.cpp プロジェクト: ashang/fqterm
void FQTermHttp::getLink(const QUrl& url, bool preview) {
  QUrl u=url;
  isExisting_ = false;
  isPreview_ = preview;
  previewEmitted = false;
  lastPercent_ = 0;
  if (u.isRelative() || u.scheme() == "file") {
    emit previewImage(cacheFileName_, false, true);
    emit done(this);
    return ;
  }

  if (QFile::exists(getPath(USER_CONFIG) + "hosts.cfg")) {
    config_ = new FQTermConfig(getPath(USER_CONFIG) + "hosts.cfg");
    QString strTmp = config_->getItemValue("hosts", u.host().toLocal8Bit());
    if (!strTmp.isEmpty()) {
      QString strUrl = u.toString();
      strUrl.replace(QRegExp(u.host(), Qt::CaseInsensitive), strTmp);
      u = strUrl;
    }
  }
  if (!(netreply_ && netreply_->hasRawHeader("Location"))) {
    cacheFileName_ = QFileInfo(u.path()).fileName();
  }
  if(netreply_){
    netreply_->blockSignals(true);
    netreply_.take()->deleteLater();
  }

  netreply_.reset(nam_->get(QNetworkRequest(u)));
  FQ_VERIFY(connect(netreply_.data(), SIGNAL(finished()), this, SLOT(httpDone())));
  FQ_VERIFY(connect(netreply_.data(), SIGNAL(downloadProgress(qint64, qint64)),this, SLOT(httpRead(qint64, qint64))));
  FQ_VERIFY(connect(netreply_.data(), SIGNAL(error( QNetworkReply::NetworkError)), this, SLOT(httpError(QNetworkReply::NetworkError))));
  FQ_VERIFY(connect(netreply_.data(), SIGNAL(metaDataChanged()), this, SLOT(httpResponse())));
}
コード例 #3
0
ファイル: UiUtil.cpp プロジェクト: xtuer/Qt
// 下载预览 url 的图片
void UiUtil::previewImage(const QString &url, const QString &dir) {
    // 1. 计算缓存封面图片的路径: dir/${url-md5}.${image-suffix}
    // 2. 预览图片
    //    2.1 如果图片已经缓存,则使用缓存的图片
    //    2.2 如果没有缓存过,则从网络下载缓存到本地,缩放到适合的大小,然后显示

    // [1] 计算缓存封面图片的路径: temp/${url-md5}.${image-suffix}
    QByteArray hex = QCryptographicHash::hash(url.toUtf8(), QCryptographicHash::Md5).toHex();
    QString    md5(hex.constData());
    QFileInfo  info(QUrl(url).fileName());
    QString    previewImagePath = QString("%1/%2.%3").arg(dir).arg(md5).arg(info.suffix());
    QFile      previewImage(previewImagePath);

    if (previewImage.exists()) {
        // [2.1] 如果预览图片已经缓存就用缓存的,否则下载最新的
        MessageBox::message(QString("<img src='%1'>").arg(previewImagePath));
    } else {
        HttpClient(url).debug(true).download(previewImagePath, [=] (const QString &) {
            // [2.2] 如果没有缓存过,则从网络下载缓存到本地,缩放到适合的大小,然后显示
            QImage image = QImage(previewImagePath).scaledToWidth(256, Qt::SmoothTransformation);
            image.save(previewImagePath);

            MessageBox::message(QString("<img src='%1'>").arg(previewImagePath));
        }, [] (const QString &error) {
            // 显示下载错误
            MessageBox::message(error);
        });
    }
}
コード例 #4
0
ファイル: exportimagedialog.cpp プロジェクト: KDE/kstars
ExportImageDialog::ExportImageDialog(const QString &url, const QSize &size, ImageExporter *imgExporter)
    : QDialog((QWidget*) KStars::Instance()), m_KStars(KStars::Instance()), m_Url(url), m_Size(size)
{
#ifdef Q_OS_OSX
    setWindowFlags(Qt::Tool| Qt::WindowStaysOnTopHint);
#endif
    m_DialogUI = new ExportImageDialogUI(this);

    QVBoxLayout *mainLayout = new QVBoxLayout;
    mainLayout->addWidget(m_DialogUI);
    setLayout(mainLayout);

    QDialogButtonBox *buttonBox = new QDialogButtonBox(QDialogButtonBox::Ok|QDialogButtonBox::Cancel);
    mainLayout->addWidget(buttonBox);
    connect(buttonBox, SIGNAL(accepted()), this, SLOT(accept()));
    connect(buttonBox, SIGNAL(rejected()), this, SLOT(reject()));

    connect(buttonBox, SIGNAL(accepted()), this, SLOT(exportImage()));
    connect(buttonBox, SIGNAL(rejected()), this, SLOT(close()));

    QPushButton *previewB = new QPushButton(i18n("Preview image"));
    buttonBox->addButton(previewB, QDialogButtonBox::ActionRole);
    connect(previewB, SIGNAL(clicked()), this, SLOT(previewImage()));

    connect(m_DialogUI->addLegendCheckBox, SIGNAL(toggled(bool)), this, SLOT(switchLegendEnabled(bool)));
    connect(m_DialogUI->addLegendCheckBox, SIGNAL(toggled(bool)), previewB, SLOT(setEnabled(bool)));

    m_ImageExporter = ( ( imgExporter ) ? imgExporter : new ImageExporter( this ) );

    setWindowTitle(i18n("Export sky image"));

    setupWidgets();
}
コード例 #5
0
void ExportImageDialog::setupConnections()
{
    connect(this, SIGNAL(okClicked()), this, SLOT(exportImage()));
    connect(this, SIGNAL(cancelClicked()), this, SLOT(close()));
    connect(this, SIGNAL(user1Clicked()), this, SLOT(previewImage()));

    connect(m_DialogUI->addLegendCheckBox, SIGNAL(toggled(bool)), this, SLOT(switchLegendEnabled(bool)));
    connect(m_DialogUI->addLegendCheckBox, SIGNAL(toggled(bool)), button(KDialog::User1), SLOT(setEnabled(bool)));
}
コード例 #6
0
void ImageWriter::writeFrame(VideoFrame& frame,  QImage& image)
{
    QImage previewImage(frame.width, frame.height, QImage::Format_RGB888);
    for (quint32 y = 0; y < frame.height; y++) {
        // Copy each line ..
        memcpy(previewImage.scanLine(y), &frame.frameData[y*frame.lineSize], frame.width*3);
    }

    image = previewImage;
}
コード例 #7
0
ファイル: fqterm_http.cpp プロジェクト: ashang/fqterm
void FQTermHttp::httpDone() {
    mutex_.lock();
    downloadMap_.remove(cacheFileName_);
    mutex_.unlock();
    if (isPreview_) {
      emit previewImage(cacheFileName_, true, true);
    } else {
      emit message("Download one file successfully");
    }
    emit done(this);
}
コード例 #8
0
void ClassifierTrainer::show_image_preview_for_positive_item(const QModelIndex &index)
{
    // section
    QModelIndexList selectedIndices = positivesTreeView->selectionModel()->selectedIndexes();
    QString path = positivesModel->itemFromIndex(selectedIndices[0])->text();
    bool xOK = false, yOK = false, widthOK = false, heightOK = false;
    int x = positivesModel->itemFromIndex(selectedIndices[1])->text().toInt(&xOK),
        y = positivesModel->itemFromIndex(selectedIndices[2])->text().toInt(&yOK),
        width = positivesModel->itemFromIndex(selectedIndices[3])->text().toInt(&widthOK),
        height = positivesModel->itemFromIndex(selectedIndices[4])->text().toInt(&heightOK);

    if (xOK && yOK && widthOK && heightOK)
    {
        Section s(path, x, y, width, height);
        previewImage(path, &s);
    }
    else
    {
        previewImage(path);
    }
}
コード例 #9
0
ファイル: fqterm_http.cpp プロジェクト: ashang/fqterm
void FQTermHttp::httpRead(qint64 done, qint64 total) {
  QByteArray ba = netreply_->readAll();
  QFile file(cacheFileName_);
  if (file.open(QIODevice::ReadWrite | QIODevice::Append)) {
    QDataStream ds(&file);
    ds.writeRawData(ba, ba.size());
    file.close();
  }
  if (total != 0) {
	//m_pDialog->setProgress(done,total);
    int p = done *100 / total;
    if (p - lastPercent_ >= 10 && isPreview_ && QFileInfo(cacheFileName_).suffix().toLower() == "jpg") {
      if (!previewEmitted) {
        emit previewImage(cacheFileName_,true, false);
        previewEmitted = true;
      } else {
        emit previewImage(cacheFileName_,false, false);
      }
      lastPercent_ = p;
    }
    emit percent(p);
  }
}
コード例 #10
0
// This method is the main workhorse, and is run by the camera thread.
static void *FCamAppThread(void *ptr) {
    FCAM_INTERFACE_DATA *tdata = (FCAM_INTERFACE_DATA *)ptr;
    Timer timer;
    JNIEnv *env;
    tdata->javaVM->AttachCurrentThread(&env, 0);
    writer = 0; // Initialized on the first PARAM_OUTPUT_DIRECTORY set request.

    // Initialize FCam devices.
    FCam::Tegra::Sensor sensor;
    FCam::Tegra::Lens lens;
    FCam::Tegra::Flash flash;
    sensor.attach(&lens);
    sensor.attach(&flash);
    MyAutoFocus autofocus(&lens);
    MyFaceDetector faceDetector("/data/fcam/face.xml");

    FCam::Image previewImage(PREVIEW_IMAGE_WIDTH, PREVIEW_IMAGE_HEIGHT, FCam::YUV420p);
    FCam::Tegra::Shot shot;

    // Initialize FPS stat calculation.
    tdata->captureFps = 30; // assuming 30hz
    double fpsUpdateTime = timer.get();
    int frameCount = 0;

    // Local task queue that processes messages from the Android application.
    std::queue<ParamSetRequest> taskQueue;
    ParamSetRequest task;

    for (;;) {
        FCAM_SHOT_PARAMS *currentShot = &tdata->currentShot;
        FCAM_SHOT_PARAMS *previousShot = &tdata->previousShot;
        // Copy tasks to local queue
        sAppData->requestQueue.consumeAll(taskQueue);

        // Parse all tasks from the Android applications.
        while (!taskQueue.empty()) {
            task = taskQueue.front();
            taskQueue.pop();

            bool prevValue;
            int taskId = task.getId() & 0xffff;
            int *taskData = (int *)task.getData();
            int pictureId = task.getId() >> 16;

            switch (taskId) {
            case PARAM_SHOT:
                // Note: Exposure is bounded below at 1/1000 (FCam bug?)
                currentShot->captureSet[pictureId].exposure = taskData[SHOT_PARAM_EXPOSURE] < 1000 ? 1000 : taskData[SHOT_PARAM_EXPOSURE];
                currentShot->captureSet[pictureId].focus = taskData[SHOT_PARAM_FOCUS];
                currentShot->captureSet[pictureId].gain = taskData[SHOT_PARAM_GAIN];
                currentShot->captureSet[pictureId].wb = taskData[SHOT_PARAM_WB];
                currentShot->captureSet[pictureId].flashOn = taskData[SHOT_PARAM_FLASH];
                break;
            case PARAM_PREVIEW_EXPOSURE:
                currentShot->preview.user.exposure = taskData[0];
                break;
            case PARAM_PREVIEW_FOCUS:
                currentShot->preview.user.focus = taskData[0];
                break;
            case PARAM_PREVIEW_GAIN:
                currentShot->preview.user.gain = taskData[0];
                break;
            case PARAM_PREVIEW_WB:
                currentShot->preview.user.wb = taskData[0];
                break;
            case PARAM_PREVIEW_AUTO_EXPOSURE_ON:
                prevValue = currentShot->preview.autoExposure;
                currentShot->preview.autoExposure = taskData[0] != 0;
                if (!prevValue && prevValue ^ currentShot->preview.autoExposure != 0) {
                    previousShot->preview.evaluated.exposure = currentShot->preview.user.exposure;
                } else {
                    currentShot->preview.user.exposure = previousShot->preview.evaluated.exposure;
                }
                break;
            case PARAM_PREVIEW_AUTO_FOCUS_ON:
                prevValue = currentShot->preview.autoFocus;
                currentShot->preview.autoFocus = taskData[0] != 0;
                if (!prevValue && prevValue ^ currentShot->preview.autoFocus != 0) {
                    previousShot->preview.evaluated.focus = currentShot->preview.user.focus;
                } else {
                    currentShot->preview.user.focus = previousShot->preview.evaluated.focus;
                }
                break;
            case PARAM_PREVIEW_AUTO_GAIN_ON:
                prevValue = currentShot->preview.autoGain;
                currentShot->preview.autoGain = taskData[0] != 0;
                if (!prevValue && prevValue ^ currentShot->preview.autoGain != 0) {
                    previousShot->preview.evaluated.gain = currentShot->preview.user.gain;
                } else {
                    currentShot->preview.user.gain = previousShot->preview.evaluated.gain;
                }
                break;
            case PARAM_PREVIEW_AUTO_WB_ON:
                prevValue = currentShot->preview.autoWB;
                currentShot->preview.autoWB = taskData[0] != 0;
                if (!prevValue && prevValue ^ currentShot->preview.autoWB != 0) {
                    previousShot->preview.evaluated.wb = currentShot->preview.user.wb;
                } else {
                    currentShot->preview.user.wb = previousShot->preview.evaluated.wb;
                }
                break;
            case PARAM_RESOLUTION:
                break;
            case PARAM_BURST_SIZE:
                currentShot->burstSize = taskData[0];
                break;
            case PARAM_OUTPUT_FORMAT:
                break;
            case PARAM_VIEWER_ACTIVE:
                tdata->isViewerActive = taskData[0] != 0;
                break;
            case PARAM_OUTPUT_DIRECTORY:
                if (writer == 0) {
                    writer = new AsyncImageWriter((char *)task.getData());
                    writer->setOnFileSystemChangedCallback(OnFileSystemChanged);
                }
                break;
            case PARAM_OUTPUT_FILE_ID:
                AsyncImageWriter::SetFreeFileId(taskData[0]);
                break;
            case PARAM_TAKE_PICTURE:
                if (writer != 0 && task.getDataAsInt() != 0) { // Don't take picture if we can't write out.
                    // capture begin
                    tdata->isCapturing = true;
                    // notify capture start
                    env->CallVoidMethod(tdata->fcamInstanceRef, tdata->notifyCaptureStart);
                    OnCapture(tdata, writer, sensor, flash, lens);
                    // capture done
                    tdata->isCapturing = false;
                    // notify capture completion
                    env->CallVoidMethod(tdata->fcamInstanceRef, tdata->notifyCaptureComplete);
                }
                break;
            case PARAM_PRIV_FS_CHANGED:
                if (taskData[0] != 0) {
                    // notify fs change
                    env->CallVoidMethod(tdata->fcamInstanceRef, tdata->notifyFileSystemChange);
                }
                break;
            /* [CS478]
             * You will probably want extra cases here, to handle messages
             * that request autofocus to be activated. Define any new
             * message types in ParamSetRequestion.h.
             */
            case PARAM_AUTO_FOCUS_LOCAL_REG:
                //LOG("MYFOCUS local focus switch\n");
                autofocus.state = AUTO_FOCUS_FOCUS;
                autofocus.setRect(taskData[0] - RECT_EDGE_LEN / 2, taskData[1] - RECT_EDGE_LEN / 2);//hack TODO
                autofocus.startSweep();
                break;
            case PARAM_AUTO_FOCUS_GLOBAL:
                //LOG("MYFOCUS global focus switch\n");
                autofocus.state = AUTO_FOCUS_FOCUS;
                autofocus.setRect(0, 0, PREVIEW_IMAGE_WIDTH, PREVIEW_IMAGE_HEIGHT);
                autofocus.startSweep();
                break;

            /* [CS478] Assignment #2
             * You will probably yet another extra case here to handle face-
             * based autofocus. Recall that it might be useful to add a new
             * message type in ParamSetRequest.h
             */
            case PARAM_AUTO_FOCUS_FACE:
                LOG("MYFOCUS face focus switch\n");
                autofocus.state = AUTO_FOCUS_FACE_DETECT;
                autofocus.fdWait();
                //autofocus.startFaceDetect();
                break;
            // TODO TODO TODO
            default:
                ERROR("TaskDispatch(): received unsupported task id (%i)!", taskId);
            }
        }

        if (!tdata->isViewerActive) continue; // Viewer is inactive, so skip capture.

        // Setup preview shot parameters.
        shot.exposure = currentShot->preview.autoExposure ? previousShot->preview.evaluated.exposure : currentShot->preview.user.exposure;
        shot.gain = currentShot->preview.autoGain ? previousShot->preview.evaluated.gain : currentShot->preview.user.gain;
        shot.whiteBalance = currentShot->preview.autoWB ? previousShot->preview.evaluated.wb : currentShot->preview.user.wb;
        shot.image = previewImage;
        shot.histogram.enabled = true;
        shot.histogram.region = FCam::Rect(0, 0, PREVIEW_IMAGE_WIDTH, PREVIEW_IMAGE_HEIGHT);
        shot.sharpness.enabled = currentShot->preview.autoFocus;
        shot.sharpness.size = FCam::Size(16, 12);
        shot.fastMode = true;
        shot.clearActions();

        // If in manual focus mode, and the lens is not at the right place, add an action to move it.
        if (!currentShot->preview.autoFocus && previousShot->preview.user.focus != currentShot->preview.user.focus) {
            shot.clearActions();
            FCam::Lens::FocusAction focusAction(&lens);
            focusAction.time = 0;
            focusAction.focus = currentShot->preview.user.focus;
            shot.addAction(focusAction);
        }

        // Send the shot request to FCam.
        sensor.stream(shot);

        // Fetch the incoming frame from FCam.
        FCam::Frame frame = sensor.getFrame();

        // Process the incoming frame. If autoExposure or autoGain is enabled, update parameters based on the frame.
        if (currentShot->preview.autoExposure || currentShot->preview.autoGain) {
            FCam::autoExpose(&shot, frame, sensor.maxGain(), sensor.maxExposure(), sensor.minExposure(), 0.3);
            currentShot->preview.evaluated.exposure = shot.exposure;
            currentShot->preview.evaluated.gain = shot.gain;
        }

        // Process the incoming frame. If autoWB is enabled, update parameters based on the frame.
        if (currentShot->preview.autoWB) {
            FCam::autoWhiteBalance(&shot, frame);
            currentShot->preview.evaluated.wb = shot.whiteBalance;
        }

        if (autofocus.state == AUTO_FOCUS_FACE_DETECT) {
            std::vector<cv::Rect> facesFound = faceDetector.detectFace(frame.image());
            for (unsigned int i = 0; i < facesFound.size(); i++) {
                cv::Rect r = facesFound[i];
                for (int x = 0; x < r.width; x++) {
                    frame.image()(r.x + x, r.y)[0] = 254u;
                    frame.image()(r.x + x, r.y + r.height)[0] = 254u;
                }
                for (int y = 0; y < r.height; y++) {
                    frame.image()(r.x, r.y + y)[0] = 254u;
                    frame.image()(r.x + r.width, r.y + y)[0] = 254u;
                }
            }
            if (facesFound.size() != 0)
                autofocus.setRects(facesFound);

            autofocus.fdWait();
        }
        /* [CS478] Assignment #2
         * Above, facesFound contains the list of detected faces, for the given frame.
         * If applicable, you may pass these values to the MyAutoFocus instance.
         *
         * e.g. autofocus.setTarget(facesFound);
         * Note that MyAutoFocus currently has no setTarget method. You'd have
         * to write the appropriate interface.
         *
         * You should also only run faceDetector.detectFace(...) if it
         * is necessary (to save compute), so change "true" above to something else
         * appropriate.
         */
        // TODO TODO TODO

        /* [CS478] Assignment #1
         * You should process the incoming frame for autofocus, if necessary.
         * Your autofocus (MyAutoFocus.h) has a function called update(...).
         */

        if(autofocus.state == AUTO_FOCUS_FOCUS)
        {
            autofocus.update(frame);
            //LOG("MYFOCUS update called\n");
        }
        if(currentShot->preview.autoFocus)
        {
            currentShot->preview.evaluated.focus = (float) frame["lens.focus"];
        }
        // TODO TODO TODO

        // Update histogram data
        const FCam::Histogram &histogram = frame.histogram();
        int maxBinValue = 1;
        for (int i = 0; i < 64; i++) {
            int currBinValue = histogram(i);
            maxBinValue = (currBinValue > maxBinValue) ? currBinValue : maxBinValue;
            currentShot->histogramData[i * 4] = currBinValue;
        }
        float norm = 1.0f / maxBinValue;
        for (int i = 0; i < 64; i++) {
            currentShot->histogramData[i * 4] *= norm;
            currentShot->histogramData[i * 4 + 1] = 0.0f;
            currentShot->histogramData[i * 4 + 2] = 0.0f;
            currentShot->histogramData[i * 4 + 3] = 0.0f;
        }

        // Update the frame buffer.
        uchar *src = (uchar *)frame.image()(0, 0);
        FCam::Tegra::Hal::SharedBuffer *captureBuffer = tdata->tripleBuffer->getBackBuffer();
        uchar *dest = (uchar *)captureBuffer->lock();

        // Note: why do we need to shuffle U and V channels? It seems to be a bug.
        memcpy(dest, src, PI_PLANE_SIZE);
        memcpy(dest + PI_U_OFFSET, src + PI_V_OFFSET, PI_PLANE_SIZE >> 2);
        memcpy(dest + PI_V_OFFSET, src + PI_U_OFFSET, PI_PLANE_SIZE >> 2);
        captureBuffer->unlock();
        tdata->tripleBuffer->swapBackBuffer();

        // Frame capture complete, copy current shot data to previous one
        pthread_mutex_lock(&tdata->currentShotLock);
        memcpy(&tdata->previousShot, &tdata->currentShot, sizeof(FCAM_SHOT_PARAMS));
        pthread_mutex_unlock(&tdata->currentShotLock);
        frameCount++;

        // Update FPS
        double time = timer.get();
        double dt = time - fpsUpdateTime;
        if (dt > FPS_UPDATE_PERIOD) {
            float fps = frameCount * (1000.0 / dt);
            fpsUpdateTime = time;
            frameCount = 0;
            tdata->captureFps = fps;
        }
    }

    tdata->javaVM->DetachCurrentThread();

    // delete instance ref
    env->DeleteGlobalRef(tdata->fcamInstanceRef);

    return 0;
}
コード例 #11
0
//Preview selected file
void QxMainWindow::preview()
{
	QListWidgetItem *pCurrentItem = m_pFileListWidget->currentItem();
	if (!pCurrentItem)
	{
		return;
	}
	QString strFileName = pCurrentItem->text();
	QFile file(strFileName);
	if (!file.open(QIODevice::ReadOnly))
	{
		return;
	}
	QByteArray rawData = file.readAll();
	file.close();

	cv::Size imageSize(640,448);	//the image used to preview some of the characters
	const unsigned uCharacterWidth = 64;	//the width of each character in the preview image
	const unsigned uCharacterHeight = 64;	//the height of each character in the preview image
	cv::Size smallerSize(54, 54);; //to guarantee a certain distance among characters, characters are presented in smaller size than they are.
	const quint32 uInterval = 5;  //this should be calculated by (characterSize.width-smallerSize.width)/2;
	quint32 uCharacterPerRow = imageSize.width / uCharacterWidth;
	quint32 uCharacterPerCol = imageSize.height / uCharacterHeight;
	cv::Mat img = 255 * cv::Mat::ones(imageSize, CV_8UC1);

	quint32 uDecodedByteNum = 0;	//As the data is read as a stream, this variable tells how many bytes are processed already
	for (quint32 i = 0; i != uCharacterPerRow; ++i)
	{
		for (quint32 j = 0; j != uCharacterPerCol; ++j)
		{
			quint32 uWidth = uchar(rawData.at(6 + uDecodedByteNum)) + quint32(uchar(rawData.at(7 + uDecodedByteNum))) * (1 << 8);
			quint32 uHeight = uchar(rawData.at(8 + uDecodedByteNum)) + quint32(uchar(rawData.at(9 + uDecodedByteNum))) * (1 << 8);
			quint32 uArcLen = uWidth > uHeight ? uWidth : uHeight;
			uDecodedByteNum += 10;

			// save data to a pre-defined white image(all pixel values are pre-defined to be 255)
			cv::Mat characterImage = 255 * cv::Mat::ones(uArcLen, uArcLen, CV_8UC1);
			quint32 uHalfPadRowNum = (uArcLen - uHeight) / 2;
			quint32 uHalfPadColNum = (uArcLen - uWidth) / 2;
			for (quint32 row = uHalfPadRowNum; row != uHeight + uHalfPadRowNum; ++row)
			{
				uchar *pRow = characterImage.ptr<uchar>(row);
				for (quint32 col = uHalfPadColNum; col != uWidth + uHalfPadColNum; ++col)
				{
					pRow[col] = uchar(rawData.at(uDecodedByteNum++));
				}
			}
			// image normalization and filling
			cv::resize(characterImage, characterImage, smallerSize);
			cv::Range rowRange(j*uCharacterWidth, (j + 1)*uCharacterWidth);
			cv::Range colRange(i*uCharacterHeight, (i + 1)*uCharacterHeight);
			cv::Mat roi = img(rowRange, colRange);
			//cv::Mat::copyTo() calls cv::Mat::create(), which breaks the original image.
			//Also, cv::Mat::clone() fails to copy the values properly somehow, thus a pixelwise copy is implemented here.
            for (int row = 0; row != smallerSize.height; ++row)
			{
				uchar *pDst = roi.ptr<uchar>(row + uInterval);
				uchar *pSrc = characterImage.ptr<uchar>(row);
                for (int col = 0; col != smallerSize.width; ++col)
				{
					pDst[col + uInterval] = pSrc[col];
				}
			}
		}
	}

	QImage previewImage(img.data, img.cols, img.rows, img.step, QImage::Format_Grayscale8);
	QPixmap pixmap = QPixmap::fromImage(previewImage);
	m_pPreviewLabel->setPixmap(pixmap);
	m_pPreviewLabel->show();
}
コード例 #12
0
void ClassifierTrainer::initialize()
{
    QHBoxLayout *mainLayout = new QHBoxLayout;
    QVBoxLayout *imagesListLayout = new QVBoxLayout;

    // construct positives section
    positivesGroup = new QGroupBox("Positives", this);
    positivesProgressBar = new QProgressBar(this);
    positivesProgressBar->hide();
    QHBoxLayout *positivesControlLayout = new QHBoxLayout;
    QVBoxLayout *positivesGroupLayout = new QVBoxLayout;
    positivesTreeView = new QTreeView(positivesGroup);
    updatePositivesGroup();

    addImageToPositivesBtn = new QPushButton(QIcon(":/image_add.png"), "Add Image", this);
    delImageFromPositivesBtn = new QPushButton(QIcon(":/image_delete.png"), "Delete Image", this);
    delImageFromPositivesBtn->setShortcut(QKeySequence(tr("Ctrl+D")));
    addSelectionToPositivesBtn = new QPushButton(QIcon(":/note_add.png"), "Add Section", this);
    delSelectionToNegativesBtn = new QPushButton(QIcon(":/note_delete.png"), "Delete Section", this);

    connect(addImageToPositivesBtn, SIGNAL(clicked()), this, SLOT(add_image_to_positives_slot()));
    connect(delImageFromPositivesBtn, SIGNAL(clicked()), this, SLOT(del_image_from_positives_slot()));
    connect(addSelectionToPositivesBtn, SIGNAL(clicked()), this, SLOT(add_selection_to_positives_slot()));
    connect(delSelectionToNegativesBtn, SIGNAL(clicked()), this, SLOT(del_selection_from_positives_slot()));
    connect(positivesTreeView, SIGNAL(clicked(QModelIndex)), this, SLOT(show_image_preview_for_positive_item(QModelIndex)));

    positivesControlLayout->addWidget(addImageToPositivesBtn);
    positivesControlLayout->addWidget(delImageFromPositivesBtn);
    positivesControlLayout->addWidget(addSelectionToPositivesBtn);
    positivesControlLayout->addWidget(delSelectionToNegativesBtn);

    positivesGroupLayout->addLayout(positivesControlLayout);
    positivesGroupLayout->addWidget(positivesProgressBar);
    positivesGroupLayout->addWidget(positivesTreeView);

    positivesGroup->setLayout(positivesGroupLayout);

    // construct negatives section
    negativesGroup = new QGroupBox("Negatives", this);
    negativesProgressBar = new QProgressBar(this);
    negativesProgressBar->hide();
    QHBoxLayout *negativesControlLayout = new QHBoxLayout;
    QVBoxLayout *negativesGroupLayout = new QVBoxLayout;
    negativesTreeView = new QTreeView(this);
    updateNegativesGroup();

    addImageToNegativesBtn = new QPushButton(QIcon(":/image_add.png"), "Add Image", this);
    delImageFromNegativesBtn = new QPushButton(QIcon(":/image_delete.png"), "Delete Image", this);
    delImageFromNegativesBtn->setShortcut(QKeySequence(tr("Ctrl+E")));

    connect(addImageToNegativesBtn, SIGNAL(clicked()), this, SLOT(add_image_to_negatives_slot()));
    connect(delImageFromNegativesBtn, SIGNAL(clicked()), this, SLOT(del_image_from_negatives_slot()));
    connect(negativesTreeView, SIGNAL(clicked(QModelIndex)), this, SLOT(show_image_preview_for_negative_item(QModelIndex)));

    negativesControlLayout->addWidget(addImageToNegativesBtn);
    negativesControlLayout->addWidget(delImageFromNegativesBtn);

    negativesGroupLayout->addLayout(negativesControlLayout);
    negativesGroupLayout->addWidget(negativesProgressBar);
    negativesGroupLayout->addWidget(negativesTreeView);

    negativesGroup->setLayout(negativesGroupLayout);

    // create the preview plane
    previewPaneGroup = new QGroupBox("Image Preview", this);
    previewPaneGroup->setFixedWidth(500);
    previewImage("");

    // set main layout
    imagesListLayout->addWidget(positivesGroup);
    imagesListLayout->addWidget(negativesGroup);

    mainLayout->addLayout(imagesListLayout);
    mainLayout->addWidget(previewPaneGroup);

    // finalize gui
    setLayout(mainLayout);
    QString title = QString("Cascade Classifier Trainer - %1").arg(projectName);
    setWindowTitle(title);
}
コード例 #13
0
void ClassifierTrainer::show_image_preview_for_negative_item(const QModelIndex &index)
{
    QStandardItem *item = negativesModel->item(index.row());
    QString path = item->text();
    previewImage(path);
}
コード例 #14
0
ファイル: mainwindow.cpp プロジェクト: 3nids/gliner
MainWindow::MainWindow(QWidget *parent ) :
  QMainWindow(parent),
  ui(new Ui::MainWindow),
  mImage( QPixmap() ),
  mFileDownloader( new QNetworkAccessManager ),
  mReply( 0 ),
  mRequestDelay( 0.05 )
{
  ui->setupUi(this);

  // definition of flags for alignment
  mHalignMap << QPair<QString,int>( "gauche", Qt::AlignLeft )
             << QPair<QString,int>( QString::fromUtf8("centré"), Qt::AlignHCenter )
             << QPair<QString,int>( "droite", Qt::AlignRight );
  mValignMap << QPair<QString,int>( "haut", Qt::AlignTop )
             << QPair<QString,int>( "milieu", Qt::AlignVCenter )
             << QPair<QString,int>( "bas", Qt::AlignBottom );

  // gui connection
  connect( ui->mActionOpen, SIGNAL(triggered()), this, SLOT(selectFile()) );
  connect( ui->mActionHelp, SIGNAL(triggered()), this, SLOT(showHelp()) );
  connect( ui->mActionAbout, SIGNAL(triggered()), this, SLOT(showAbout()) );
  connect( ui->systemComboBox, SIGNAL(activated(int)), this, SLOT(fillTable()) );
  connect( ui->markerColorButton, SIGNAL(clicked()), this, SLOT(markerColorPicker()) ); // will also reload the image
  connect( ui->lineColorButton, SIGNAL(clicked()), this, SLOT(lineColorPicker()) ); // will also reload the image
  connect( ui->labelColorButton, SIGNAL(clicked()), this, SLOT(labelColorPicker()) ); // will also reload the image
  connect( ui->labelFontButton, SIGNAL(clicked()), this, SLOT(labelFontPicker()) ); // will also reload the image
  connect( ui->destinationButton, SIGNAL(clicked()), this, SLOT(selectDestination()) );
  connect( ui->realSizeCheckBox, SIGNAL(clicked()), this, SLOT( displayImage()) );
  connect( ui->runButton, SIGNAL(clicked()), this, SLOT(run()) );
  connect( ui->stopButton, SIGNAL(clicked()), this, SLOT(stop()) );
  // auto reload image
  connect( ui->previewCheckBox, SIGNAL(clicked()), this, SLOT(previewImage()) );
  connect( ui->pointTable, SIGNAL(itemSelectionChanged()), this, SLOT(previewImage()) );
  connect( ui->mapTypeComboBox, SIGNAL(activated(int)), this, SLOT(previewImage(int)) );
  connect( ui->formatComboBox, SIGNAL(activated(int)), this, SLOT(previewImage(int)) );
  connect( ui->imageHeightSpinBox, SIGNAL(valueChanged(int)), this, SLOT(previewImage(int)) );
  connect( ui->imageWidthSpinBox, SIGNAL(valueChanged(int)), this, SLOT(previewImage(int)) );
  connect( ui->levelSpinBox, SIGNAL(valueChanged(int)), this, SLOT(previewImage(int)) );
  connect( ui->scaleSpinBox, SIGNAL(valueChanged(int)), this, SLOT(previewImage(int)) );
  connect( ui->markerGroupBox, SIGNAL(clicked()), this, SLOT(previewImage()) );
  connect( ui->markerSizeComboBox, SIGNAL(activated(int)), this, SLOT(previewImage(int)) );
  connect( ui->lineGroupBox, SIGNAL(clicked()), this, SLOT(previewImage()) );
  connect( ui->lineWeightSpinBox, SIGNAL(valueChanged(int)), this, SLOT(previewImage(int)) );
  connect( ui->lineLengthSpinBox, SIGNAL(valueChanged(int)), this, SLOT(previewImage(int)) );
  connect( ui->writeLabelGroupBox, SIGNAL(clicked()), this, SLOT(previewImage()) );
  connect( ui->labelHalignComboBox, SIGNAL(activated(int)), this, SLOT(previewImage(int)) );
  connect( ui->labelValignComboBox, SIGNAL(activated(int)), this, SLOT(previewImage(int)) );
  connect( ui->labelMarginSpinBox, SIGNAL(valueChanged(int)), this, SLOT(previewImage(int)) );

  // prepare table
  ui->pointTable->setSelectionMode(QAbstractItemView::SingleSelection);
  ui->pointTable->setSelectionBehavior(QAbstractItemView::SelectRows);
  ui->pointTable->setColumnCount(0);
  ui->pointTable->setRowCount(0);
  ui->pointTable->horizontalHeader()->setMinimumSectionSize(15);
  ui->pointTable->verticalHeader()->setVisible(true);
  ui->pointTable->verticalHeader()->setDefaultSectionSize(25);

  // fill gui
  ui->mapTypeComboBox->addItem( "satellite", "satellite");
  ui->mapTypeComboBox->addItem( "terrain", "terrain");
  ui->mapTypeComboBox->addItem( QString::fromUtf8("carte routière"), "roadmap");
  ui->mapTypeComboBox->addItem( "hybride", "hybrid");
  ui->markerSizeComboBox->addItem( "mini", "tiny" );
  ui->markerSizeComboBox->addItem( "petit", "small" );
  ui->markerSizeComboBox->addItem( "moyen", "mid" );
  ui->markerSizeComboBox->addItem( "grand", "normal" );
  ui->formatComboBox->addItem( "png8", ".png" );
  ui->formatComboBox->addItem( "png32", ".png" );
  ui->formatComboBox->addItem( "jpg", ".jpg" );
  ui->formatComboBox->addItem( "gif", ".gif" );
  QList<QPair<QString, int> >::iterator it;
  for (it = mHalignMap.begin(); it != mHalignMap.end(); ++it)
    ui->labelHalignComboBox->addItem( it->first );
  for (it = mValignMap.begin(); it != mValignMap.end(); ++it)
    ui->labelValignComboBox->addItem( it->first );

  // finalize gui stuff
  ui->dnlProgressBar->hide();
  ui->globalProgressBar->hide();
  ui->stopButton->hide();

  // restore settings
  QSettings settings;
  QColor markerColor = settings.value("/gliner/markerColor", QColor(Qt::blue) ).value<QColor>();
  setMarkerColorButton( markerColor );
  QColor lineColor = settings.value("/gliner/lineColor", QColor( 0, 0, 255, 100 ) ).value<QColor>();
  setLineColorButton( lineColor );
  QColor labelColor = settings.value("/gliner/labelColor", QColor(Qt::yellow) ).value<QColor>();
  setLabelColorButton( labelColor );
  QFont labelFont = settings.value("/gliner/labelFont", QFont("Helvetica", 12)).value<QFont>();
  setLabelFontButton( labelFont );
  ui->destinationPath->setText(settings.value("/gliner/destinationPath", "").toString());
  ui->mapTypeComboBox->setCurrentIndex(settings.value("/gliner/mapType", 1).toInt());
  ui->formatComboBox->setCurrentIndex(settings.value("/gliner/imageFormat", 1).toInt());
  ui->imageHeightSpinBox->setValue(settings.value("/gliner/imageHeight", 512).toInt());
  ui->imageWidthSpinBox->setValue(settings.value("/gliner/imageWidth", 512).toInt());
  ui->levelSpinBox->setValue(settings.value("/gliner/zoomLevel", 16).toInt());
  ui->scaleSpinBox->setValue(settings.value("/gliner/scale", 1).toInt());
  ui->markerGroupBox->setChecked(settings.value("/gliner/useMarker", true).toBool());
  ui->markerSizeComboBox->setCurrentIndex(settings.value("/gliner/markerSize", 2).toBool());
  ui->lineGroupBox->setChecked(settings.value("/gliner/useLine", true).toBool());
  ui->lineWeightSpinBox->setValue(settings.value("/gliner/lineWeight", 5).toInt());
  ui->lineLengthSpinBox->setValue(settings.value("/gliner/lineLength", 7).toInt());
  ui->writeLabelGroupBox->setChecked(settings.value("/gliner/placeText", true).toBool());
  ui->labelHalignComboBox->setCurrentIndex(settings.value("/gliner/labelHalign", 1).toInt());
  ui->labelValignComboBox->setCurrentIndex(settings.value("/gliner/labelValign", 1).toInt());
  ui->labelMarginSpinBox->setValue(settings.value("/gliner/labelMargin", 10).toInt());
}
コード例 #15
0
ファイル: stagepreview.cpp プロジェクト: Voidious/BerryBots
void StagePreview::showPreview(sf::RenderWindow *window, const char *stageName,
                               int x, int y) {
  if (stageName_ != 0) {
    delete stageName_;
  }
  stageName_ = new char[strlen(stageName) + 1];
  strcpy(stageName_, stageName);

  SetPosition(wxPoint(x, y));
  infoSizer_->Clear(true);
  descSizer_->Clear(true);
  
  BerryBotsEngine *engine = new BerryBotsEngine(0, fileManager_, 0);
  Stage *stage = engine->getStage();
  try {
    engine->initStage(getStagesDir().c_str(), stageName, getCacheDir().c_str());
  } catch (EngineException *e) {
    wxMessageDialog errorMessage(NULL, e->what(), "Preview failure",
                                 wxOK | wxICON_EXCLAMATION);
    errorMessage.ShowModal();
    delete engine;
    delete e;
    return;
  }
  SetTitle(wxString::Format(wxT("%s"), stage->getName()));

  unsigned int targetWidth;
  unsigned int targetHeight;
  char *previewFilename =
      savePreviewImage(window, engine, targetWidth, targetHeight);
  wxImage previewImage(previewFilename);
  delete previewFilename;

  double backingScale = getBackingScaleFactor();
  if (backingScale > 1) {
    targetWidth /= backingScale;
    targetHeight /= backingScale;
  }
  visualPreview_->SetMinSize(wxSize(targetWidth, targetHeight));
  visualPreview_->SetMaxSize(wxSize(targetWidth, targetHeight));

#ifdef __WXOSX__
  int padding = 4;
#else
  int padding = 8;
#endif

  wxSizer *infoGrid = new wxFlexGridSizer(2, 0, padding);
  addInfo(infoGrid, "Name:", stage->getName());
  addInfo(infoGrid, "Size:",
      wxString::Format(wxT("%i x %i"), stage->getWidth(), stage->getHeight()));
  if (engine->getTeamSize() > 1) {
    addInfo(infoGrid, "Team size:", engine->getTeamSize());
  }
  addInfo(infoGrid, "Walls:", (stage->getWallCount() - 4));
  addInfo(infoGrid, "Zones:", stage->getZoneCount());
  addInfo(infoGrid, "Starts:", stage->getStartCount());
  int numStageShips = stage->getStageShipCount();
  if (numStageShips > 0) {
    char **stageShips = stage->getStageShips();
    for (int x = 0; x < numStageShips; x++) {
      const char *shipName = stageShips[x];
      if (shipName != 0) {
        int count = 1;
        for (int y = x + 1; y < numStageShips; y++) {
          const char *shipName2 = stageShips[y];
          if (shipName2 != 0 && strcmp(shipName, shipName2) == 0) {
            count++;
            stageShips[y] = 0;
          }
        }
        wxString wxShipName = (count == 1) ? wxString(stageShips[x])
            : wxString::Format(wxT("%s x%i"), shipName, count);
        addInfo(infoGrid, (x == 0 ? "Ships:" : ""), wxShipName);
      }
    }
  }
  infoSizer_->Add(infoGrid);

  char *description = fileManager_->getStageDescription(
      getStagesDir().c_str(), stageName, getCacheDir().c_str());
  if (description == 0) {
    std::string descstr("<No description>");
    description = new char[descstr.length() + 1];
    strcpy(description, descstr.c_str());
  }
  wxStaticText *descCtrl = new wxStaticText(mainPanel_, wxID_ANY, description);
  descSizer_->Add(descCtrl);
  delete description;

  mainPanel_->GetSizer()->SetSizeHints(mainPanel_);
  mainPanel_->Layout();
  Fit();
  mainPanel_->SetFocus();

  wxBitmap bitmap;
#ifdef __WINDOWS__
  bitmap = wxBitmap(previewImage);
#else
  bitmap.CreateScaled(targetWidth, targetHeight, wxBITMAP_SCREEN_DEPTH,
                      backingScale);
  wxMemoryDC dc(bitmap);
  double logicalScale = (backingScale > 1) ? (1.0 / backingScale) : 1;
  dc.SetLogicalScale(logicalScale, logicalScale);
  dc.DrawBitmap(wxBitmap(previewImage), 0, 0);
#endif

  // On Windows, if we set the bitmap before the Layout/Fit stuff, we get visual
  // artifacts when paging through the stages with up/down keys.
  visualPreview_->SetBitmap(bitmap);

  delete engine;
}