void DecodeVideo::run() { int frameFinished = 0; AVFrame *pFrame = avcodec_alloc_frame(); SDL_LockMutex(mutex); avcodec_decode_video(pCodecCtx, pFrame, &frameFinished,packet.data,packet.size); SDL_UnlockMutex(mutex); AVFrame *pFrameRGB; pFrameRGB = avcodec_alloc_frame(); avpicture_fill((AVPicture *)pFrameRGB, bufferRGB, PIX_FMT_RGB24,pCodecCtx->width, pCodecCtx->height); /* * 最後再整理一次,要使用swscale, * 只要使用 sws_getContext() 進行初始化、 * sws_scale() 進行主要轉換、 * sws_freeContext() 結束,即可完成全部動作。 */ /* * SwsContext * * sws_getContext(int srcW, int srcH, enum PixelFormat srcFormat, * int dstW, int dstH, enum PixelFormat dstFormat, * int flags, SwsFilter *srcFilter, * SwsFilter *dstFilter, const double *param) * 總共有十個參數,其中,較重要的是前七個; * 前三個參數分別代表 原视频 的寬、高及PixelFormat; * 四到六個參數分別代表 目标视频 的寬、高及PixelFormat; * 第七個參數則代表要使用哪種scale的方法;此參數可用的方法可在 libswscale/swscale.h 內找到。 * * * 个人建议,如果对图像的缩放,要追求高效,比如说是视频图像的处理,在不明确是放大还是缩小时, * 直接使用 SWS_FAST_BILINEAR 算法即可。 * 如果明确是要缩小并显示,建议使用Point算法,如果是明确要放大并显示,其实使用CImage的Strech更高效。 * 当然,如果不计速度追求画面质量。在上面的算法中,选择帧率最低的那个即可,画面效果一般是最好的。 * * 最後三個參數,如無使用,可以都填上NULL。 */ SwsContext *convert_ctx = sws_getContext(width,height,pix_fmt, width,height,PIX_FMT_RGB24, SWS_BICUBIC, // SWS_FAST_BILINEAR or SWS_BICUBIC NULL,NULL,NULL); /* * int * sws_scale(SwsContext *c, * uint8_t* src[], * int srcStride[], * int srcSliceY, * int srcSliceH, * uint8_t* dst[], * int dstStride[]); * 總共有七個參數; * 第一個參數即是由 sws_getContext 所取得的參數。 * * 第二個 src 及第六個 dst 分別指向input 和 output 的 buffer。 * 第三個 srcStride 及第七個 dstStride 分別指向 input 及 output 的 stride * * 如果不知道什麼是 stride,姑且可以先把它看成是每一列的 byte 數。 * 第四個 srcSliceY,就註解的意思來看,是指第一列要處理的位置;這裡我是從頭處理,所以直接填0。 * 想知道更詳細說明的人,可以參考 swscale.h 的註解。 * 第五個srcSliceH指的是 source slice 的高度。 */ sws_scale(convert_ctx, (const uint8_t* const*)pFrame->data, pFrame->linesize, 0, height, pFrameRGB->data, pFrameRGB->linesize); //! 这里正式获取视频帧 QImage tmpImage((uchar *)bufferRGB,width,height,QImage::Format_RGB888); emit readOneFrame(QPixmap::fromImage(tmpImage),width,height); av_free(pFrameRGB); sws_freeContext(convert_ctx); av_free_packet(&packet); }
void Image::scale(int sizeX, int sizeY) { Image tmpImage(sizeX, sizeY); int ip, jp,m,n; float x, y, dx, dy; int i, j; for (ip = 0; ip < sizeY; ip++) { for (jp = 0; jp < sizeX; jp++) { x = (float)jp * (float)width / (float)sizeX; dx = x - floor(x); j = (int)floor(x); y = (float)ip * (float)height / (float)sizeY; dy = y - floor(y); i = (int)floor(y); float res = 0; for (m = -1; m <= 2; m++) { for (n = -1; n <= 2; n++) { unsigned char val; int yy = i + m; int xx = j + n; //xx = xx < 0 ? 0 : xx; if (xx < 0) xx = 0; xx = xx >= width ? width - 1 : xx; yy = yy < 0 ? 0 : yy; yy = yy >= height ? height - 1 : yy; res += (*this)(xx, yy) * R(n - dx) * R(m - dy); } } tmpImage(jp,ip) = (unsigned char)res; } } (*this) = tmpImage; }
void StandbyState::transformSelection(Editor* editor, MouseMessage* msg, HandleType handle) { try { EditorCustomizationDelegate* customization = editor->getCustomizationDelegate(); Document* document = editor->document(); base::UniquePtr<Image> tmpImage(new_image_from_mask(editor->getSite())); gfx::Point origin = document->mask()->bounds().getOrigin(); int opacity = 255; PixelsMovementPtr pixelsMovement( new PixelsMovement(UIContext::instance(), editor->getSite(), tmpImage, origin, opacity, "Transformation")); // If the Ctrl key is pressed start dragging a copy of the selection if (customization && customization->isCopySelectionKeyPressed()) pixelsMovement->copyMask(); else pixelsMovement->cutMask(); editor->setState(EditorStatePtr(new MovingPixelsState(editor, msg, pixelsMovement, handle))); } catch (const LockedDocumentException&) { // Other editor is locking the document. // TODO steal the PixelsMovement of the other editor and use it for this one. StatusBar::instance()->showTip(1000, "The sprite is locked in other editor"); ui::set_mouse_cursor(kForbiddenCursor); } catch (const std::bad_alloc&) { StatusBar::instance()->showTip(1000, "Not enough memory to transform the selection"); ui::set_mouse_cursor(kForbiddenCursor); } }
void StandbyState::transformSelection(Editor* editor, MouseMessage* msg, HandleType handle) { try { EditorCustomizationDelegate* customization = editor->getCustomizationDelegate(); Document* document = editor->document(); base::UniquePtr<Image> tmpImage(NewImageFromMask(editor->getDocumentLocation())); int x = document->mask()->bounds().x; int y = document->mask()->bounds().y; int opacity = 255; Sprite* sprite = editor->sprite(); Layer* layer = editor->layer(); PixelsMovementPtr pixelsMovement( new PixelsMovement(UIContext::instance(), document, sprite, layer, tmpImage, x, y, opacity, "Transformation")); // If the Ctrl key is pressed start dragging a copy of the selection if (customization && customization->isCopySelectionKeyPressed()) pixelsMovement->copyMask(); else pixelsMovement->cutMask(); editor->setState(EditorStatePtr(new MovingPixelsState(editor, msg, pixelsMovement, handle))); } catch (const LockedDocumentException&) { // Other editor is locking the document. // TODO steal the PixelsMovement of the other editor and use it for this one. } }
void FrameObject::loadImage(int frameIdx) { // TODO: // this method gets called way too many times even // if just a single parameter was changed if (frameIdx==INT_INVALID || frameIdx >= numFrames()) { p_displayImage = QPixmap(); return; } if (p_source == NULL) return; // check if we have this frame index in our cache already CacheIdx cIdx(p_source->getName(), frameIdx); QPixmap* cachedFrame = frameCache.object(cIdx); if(cachedFrame == NULL) // load the corresponding frame from yuv file into the frame buffer { // add new QPixmap to cache and use its data buffer cachedFrame = new QPixmap(); if (p_source->pixelFormat() != YUVC_24RGBPixelFormat) { // read YUV444 frame from file - 16 bit LE words p_source->getOneFrame(&p_tmpBufferYUV444, frameIdx); // if requested, do some YUV math if( doApplyYUVMath() ) applyYUVMath(&p_tmpBufferYUV444, p_width, p_height, p_source->pixelFormat()); // convert from YUV444 (planar) - 16 bit words to RGB888 (interleaved) color format (in place) convertYUV2RGB(&p_tmpBufferYUV444, &p_PixmapConversionBuffer, YUVC_24RGBPixelFormat, p_source->pixelFormat()); } else { // read RGB24 frame from file p_source->getOneFrame(&p_PixmapConversionBuffer, frameIdx); } if (p_PixmapConversionBuffer.size() == 0) { // Conversion failed. This can happen for example when the pixel format could not be determined. QString pixelFmtName = p_source->pixelFormatList()[p_source->pixelFormat()].name(); QString errTxt = "Error converting image from pixel format type " + pixelFmtName + "."; setInfo(errTxt, true); p_displayImage = QPixmap(); return; } // add this frame into our cache, use MBytes as cost int sizeInMB = p_PixmapConversionBuffer.size() >> 20; // Convert the image in p_PixmapConversionBuffer to a QPixmap QImage tmpImage((unsigned char*)p_PixmapConversionBuffer.data(),p_width,p_height,QImage::Format_RGB888); //QImage tmpImage((unsigned char*)p_PixmapConversionBuffer.data(),p_width,p_height,QImage::Format_RGB30); cachedFrame->convertFromImage(tmpImage); frameCache.insert(cIdx, cachedFrame, sizeInMB); }
void blurImage(const QImage &source, QImage &dest, int r) { /* Blur in two steps. First horizontal and then vertical. Todo: search * for more optimization. */ QImage tmpImage(source.size(), QImage::Format_ARGB32); blurImageHorizontal(source, tmpImage, r); blurImageVertical(tmpImage, dest, r); }
void TilesetEditor::changeSelectedTile (QPersistentModelIndex newTile) { if (tileSelectedId >= 0 && tileSelectedId < tiles.size()) { if (newTile.isValid() && m_projectWidget->typeOfIndex(newTile) == ProjectWidget::Tile) { QImage tmpImage(newTile.data(ProjectWidget::TileRole).value<QImage>()); tiles[tileSelectedId]->setPixmap(QPixmap::fromImage(tmpImage)); tilesId[tileSelectedId] = newTile.data(ProjectWidget::IdRole).toInt(); } } }
void ImagePropertyDialog::onFilePathChanged(const QString &path) { QImage tmpImage(path); if(tmpImage.isNull()){ enableControls(false, tmpImage); }else{ enableControls(true, tmpImage); } }
void run(cv::Mat& image,cv::Mat& outImage,cv::Mat& outMinTrace,cv::Mat& outDeletedLine) { cv::Mat image_gray(image.rows,image.cols,CV_8U,cv::Scalar(0)); cv::cvtColor(image,image_gray,CV_BGR2GRAY); //彩色图像转换为灰度图像 cv::Mat gradiant_H(image.rows,image.cols,CV_32F,cv::Scalar(0));//水平梯度矩阵 cv::Mat gradiant_V(image.rows,image.cols,CV_32F,cv::Scalar(0));//垂直梯度矩阵 cv::Mat kernel_H = (cv::Mat_<float>(3,3) << 0, 0, 0, 0, 1, -1, 0, 0, 0); //求水平梯度所使用的卷积核(赋初始值) cv::Mat kernel_V = (cv::Mat_<float>(3,3) << 0, 0, 0, 0, 1, 0, 0, -1, 0); //求垂直梯度所使用的卷积核(赋初始值) cv::filter2D(image_gray,gradiant_H,gradiant_H.depth(),kernel_H); cv::filter2D(image_gray,gradiant_V,gradiant_V.depth(),kernel_V); cv::Mat gradMag_mat(image.rows,image.rows,CV_32F,cv::Scalar(0)); cv::add(cv::abs(gradiant_H),cv::abs(gradiant_V),gradMag_mat);//水平与垂直滤波结果的绝对值相加,可以得到近似梯度大小 ////如果要显示梯度大小这个图,因为gradMag_mat深度是CV_32F,所以需要先转换为CV_8U //cv::Mat testMat; //gradMag_mat.convertTo(testMat,CV_8U,1,0); //cv::imshow("Image Show Window2",testMat); //计算能量线 cv::Mat energyMat(image.rows,image.cols,CV_32F,cv::Scalar(0));//累计能量矩阵 cv::Mat traceMat(image.rows,image.cols,CV_32F,cv::Scalar(0));//能量最小轨迹矩阵 calculateEnergy(gradMag_mat,energyMat,traceMat); //找出最小能量线 cv::Mat minTrace(image.rows,1,CV_32F,cv::Scalar(0));//能量最小轨迹矩阵中的最小的一条的轨迹 getMinEnergyTrace(energyMat,traceMat,minTrace); //显示最小能量线 cv::Mat tmpImage(image.rows,image.cols,image.type()); image.copyTo(tmpImage); for (int i = 0;i < image.rows;i++) { int k = minTrace.at<float>(i,0); tmpImage.at<cv::Vec3b>(i,k)[0] = 0; tmpImage.at<cv::Vec3b>(i,k)[1] = 0; tmpImage.at<cv::Vec3b>(i,k)[2] = 255; } cv::imshow("Image Show Window (缩小)",tmpImage); //删除一列 cv::Mat image2(image.rows,image.cols-1,image.type()); cv::Mat beDeletedLine(image.rows,1,CV_8UC3);//记录被删掉的那一列的值 delOneCol(image,image2,minTrace,beDeletedLine); cv::imshow("Image Show Window",image2); image2.copyTo(outImage); minTrace.copyTo(outMinTrace); beDeletedLine.copyTo(outDeletedLine); }
void TilesetEditor::tileModified(const QPersistentModelIndex &mTile) { if (m_projectWidget->projectOfIndex(mTile) == m_projectWidget->projectOfIndex(m_tilesetIndex)) { int mId = mTile.data(ProjectWidget::IdRole).toInt(); int i=0; while (i<tilesId.size() && tilesId.at(i) != mId) i++; if (i<tilesId.size() && tilesId.at(i) == mId) { QImage tmpImage(mTile.data(ProjectWidget::TileRole).value<QImage>()); if (tmpImage.size() == QSize(wm_tilesWidth->value(), wm_tilesHeight->value())) tiles.at(i)->setPixmap(QPixmap::fromImage(tmpImage)); else { tilesId[i] = -1; tiles.at(i)->clearTile(); } } } }
void StandbyState::transformSelection(Editor* editor, MouseMessage* msg, HandleType handle) { Document* document = editor->document(); for (auto docView : UIContext::instance()->getAllDocumentViews(document)) { if (docView->editor()->isMovingPixels()) { // TODO Transfer moving pixels state to this editor docView->editor()->dropMovingPixels(); } } try { // Clear brush preview, as the extra cel will be replaced with the // transformed image. editor->brushPreview().hide(); EditorCustomizationDelegate* customization = editor->getCustomizationDelegate(); base::UniquePtr<Image> tmpImage(new_image_from_mask(editor->getSite())); PixelsMovementPtr pixelsMovement( new PixelsMovement(UIContext::instance(), editor->getSite(), tmpImage, document->mask(), "Transformation")); // If the Ctrl key is pressed start dragging a copy of the selection if ((customization) && int(customization->getPressedKeyAction(KeyContext::TranslatingSelection) & KeyAction::CopySelection)) pixelsMovement->copyMask(); else pixelsMovement->cutMask(); editor->setState(EditorStatePtr(new MovingPixelsState(editor, msg, pixelsMovement, handle))); } catch (const LockedDocumentException&) { // Other editor is locking the document. // TODO steal the PixelsMovement of the other editor and use it for this one. StatusBar::instance()->showTip(1000, "The sprite is locked in other editor"); editor->showMouseCursor(kForbiddenCursor); } catch (const std::bad_alloc&) { StatusBar::instance()->showTip(1000, "Not enough memory to transform the selection"); editor->showMouseCursor(kForbiddenCursor); } }
void egami::ImageMono::resize(const ivec2& _size, const ivec2& _startPos) { if (_size == m_size) { // same size == > nothing to do ... return; } // grow size : egami::ImageMono tmpImage(*this); m_size=_size; uint8_t tmpBg(0); m_data.resize(m_size.x()*m_size.y(), tmpBg); for (int32_t jjj=0; jjj<m_size.y(); jjj++) { for (int32_t iii=0; iii<m_size.y(); iii++) { ivec2 tmppos(iii,jjj); set(tmppos, tmpImage.get(tmppos)); } } }
void StatisticsObject::loadImage(int frameIdx) { if (frameIdx==INT_INVALID || frameIdx >= numFrames()) { p_displayImage = QPixmap(); return; } // create empty image QImage tmpImage(internalScaleFactor()*width(), internalScaleFactor()*height(), QImage::Format_ARGB32); tmpImage.fill(qRgba(0, 0, 0, 0)); // clear with transparent color p_displayImage.convertFromImage(tmpImage); // draw statistics drawStatisticsImage(frameIdx); p_lastIdx = frameIdx; }
void System3d::setWebcamImage(sensor_msgs::Image msg) { cv_bridge::CvImagePtr cv_ptr; cv_ptr = cv_bridge::toCvCopy(msg, sensor_msgs::image_encodings::BGR8); //webcamImageTmp = cv_ptr->image; Mat tmpImage, tmpImage2; transpose(cv_ptr->image, tmpImage2); flip(tmpImage2, tmpImage, 1); Size imgSize(8*IMG_HEIGHT, 8*IMG_WIDTH); //Rect area( imgSize.width, 0, imgSize.height); int rowStart = 50; int colStart = 0; Range row(rowStart, rowStart + imgSize.height); Range col(colStart, colStart + imgSize.width); webcamImageTmp = tmpImage(row, col); }
void TilesetEditor::refresh() { if (m_tilesetIndex != QPersistentModelIndex() && m_tilesetIndex.isValid()) { for (int i=0; i<tiles.size(); i++) delete tiles.at(i); tiles.clear(); tilesId.clear(); m_scene->clear(); drawGrid(); //Extraire la liste des tiles du tileset : QByteArray data = m_tilesetIndex.data(ProjectWidget::TilesetRole).toByteArray(); QDataStream stream(&data, QIODevice::ReadOnly); QVector<int> tmpTilesId; stream >> tmpTilesId; QByteArray pData = m_tilesetIndex.data(ProjectWidget::TilesetPropertiesRole).toByteArray(); QDataStream pStream(&pData, QIODevice::ReadOnly); QVector<unsigned char> tmpProperties; pStream >> tmpProperties; QPersistentModelIndex projectIndex = m_projectWidget->projectOfIndex(m_tilesetIndex); for (int i=0; i<tmpTilesId.size(); i++) { addNewTile(tmpTilesId.at(i)); if ((tmpProperties.at(i) & 0x01) == 0x01) tiles.at(i)->setBlocking(true); else tiles.at(i)->setBlocking(false); //Si le tile est lié à un tile du projet, on change l'image du tile pour celle du tile correspondant. if (tilesId.at(i) >= 0) { QPersistentModelIndex tileIndex = m_projectWidget->itemFromTileId(tilesId.at(i), projectIndex); if (tileIndex != QPersistentModelIndex()) { QImage tmpImage(tileIndex.data(ProjectWidget::TileRole).value<QImage>()); if (tmpImage.size() == QSize(wm_tilesWidth->value(), wm_tilesHeight->value())) tiles.at(i)->setPixmap(QPixmap::fromImage(tmpImage)); else { tilesId[i] = -1; } } } } //wm_view->setFixedHeight(((tiles.size()-1)/6+1)*oldTilesHeight*2+10); wm_view->setFixedWidth(oldTilesWidth*6*2+26); m_scene->setSceneRect(0, 0, oldTilesWidth*6, ((tiles.size()-1)/6+1)*oldTilesHeight); } }
void DecodeVideo::run() { int frameFinished = 0; AVFrame *pFrame = avcodec_alloc_frame(); SDL_LockMutex(mutex); avcodec_decode_video(pCodecCtx, pFrame, &frameFinished,packet.data,packet.size); SDL_UnlockMutex(mutex); AVFrame *pFrameRGB; pFrameRGB = avcodec_alloc_frame(); avpicture_fill((AVPicture *)pFrameRGB, bufferRGB, PIX_FMT_RGB24,pCodecCtx->width, pCodecCtx->height); SwsContext *convert_ctx = sws_getContext(width,height,pix_fmt,width,height,PIX_FMT_RGB24,SWS_BICUBIC, NULL,NULL,NULL); sws_scale(convert_ctx,(const uint8_t* const*)pFrame->data,pFrame->linesize,0,height,pFrameRGB->data,pFrameRGB->linesize); QImage tmpImage((uchar *)bufferRGB,width,height,QImage::Format_RGB888); QImage image = tmpImage.copy(); av_free(pFrameRGB); sws_freeContext(convert_ctx); emit readOneFrame(image); av_free_packet(&packet); }
void recoverOneLine(cv::Mat& inImage,cv::Mat&inTrace,cv::Mat& inDeletedLine,cv::Mat& outImage) { cv::Mat recorvedImage(inImage.rows,inImage.cols+1,CV_8UC3); for (int i = 0; i < inImage.rows; i++) { int k = inTrace.at<float>(i); for (int j = 0; j < k; j++) { recorvedImage.at<cv::Vec3b>(i,j)[0] = inImage.at<cv::Vec3b>(i,j)[0]; recorvedImage.at<cv::Vec3b>(i,j)[1] = inImage.at<cv::Vec3b>(i,j)[1]; recorvedImage.at<cv::Vec3b>(i,j)[2] = inImage.at<cv::Vec3b>(i,j)[2]; } recorvedImage.at<cv::Vec3b>(i,k)[0] = inDeletedLine.at<cv::Vec3b>(i,0)[0]; recorvedImage.at<cv::Vec3b>(i,k)[1] = inDeletedLine.at<cv::Vec3b>(i,0)[1]; recorvedImage.at<cv::Vec3b>(i,k)[2] = inDeletedLine.at<cv::Vec3b>(i,0)[2]; for (int j = k + 1;j < inImage.cols + 1; j++) { recorvedImage.at<cv::Vec3b>(i,j)[0] = inImage.at<cv::Vec3b>(i,j-1)[0]; recorvedImage.at<cv::Vec3b>(i,j)[1] = inImage.at<cv::Vec3b>(i,j-1)[1]; recorvedImage.at<cv::Vec3b>(i,j)[2] = inImage.at<cv::Vec3b>(i,j-1)[2]; } } //显示恢复的轨迹 cv::Mat tmpImage(recorvedImage.rows,recorvedImage.cols,recorvedImage.type()); recorvedImage.copyTo(tmpImage); for (int i = 0;i < tmpImage.rows;i++) { int k = inTrace.at<float>(i,0); tmpImage.at<cv::Vec3b>(i,k)[0] = 0; tmpImage.at<cv::Vec3b>(i,k)[1] = 255; tmpImage.at<cv::Vec3b>(i,k)[2] = 0; } cv::imshow("Image Show Window (放大)",tmpImage); recorvedImage.copyTo(outImage); }
void QgsMapHitTest::run() { // TODO: do we need this temp image? QImage tmpImage( mSettings.outputSize(), mSettings.outputImageFormat() ); tmpImage.setDotsPerMeterX( mSettings.outputDpi() * 25.4 ); tmpImage.setDotsPerMeterY( mSettings.outputDpi() * 25.4 ); QPainter painter( &tmpImage ); QgsRenderContext context = QgsRenderContext::fromMapSettings( mSettings ); context.setPainter( &painter ); // we are not going to draw anything, but we still need a working painter Q_FOREACH ( const QString& layerID, mSettings.layers() ) { QgsVectorLayer* vl = qobject_cast<QgsVectorLayer*>( QgsMapLayerRegistry::instance()->mapLayer( layerID ) ); if ( !vl || !vl->rendererV2() ) continue; if ( vl->hasScaleBasedVisibility() && ( mSettings.scale() < vl->minimumScale() || mSettings.scale() > vl->maximumScale() ) ) { mHitTest[vl] = SymbolV2Set(); // no symbols -> will not be shown continue; } if ( mSettings.hasCrsTransformEnabled() ) { context.setCoordinateTransform( mSettings.layerTransform( vl ) ); context.setExtent( mSettings.outputExtentToLayerExtent( vl, mSettings.visibleExtent() ) ); } context.expressionContext() << QgsExpressionContextUtils::layerScope( vl ); SymbolV2Set& usedSymbols = mHitTest[vl]; runHitTestLayer( vl, usedSymbols, context ); } painter.end(); }
void GLImageDrawable::updateShadow() { if(!m_shadowDrawable) return; QImage sourceImg = m_imageWithBorder.isNull() ? m_image : m_imageWithBorder; QSizeF originalSizeWithBorder = sourceImg.size(); QPointF scale = m_glw ? QPointF(m_glw->transform().m11(), m_glw->transform().m22()) : QPointF(1.,1.); if(scale.x() < 1.25 && scale.y() < 1.25) scale = QPointF(1,1); double radius = m_shadowBlurRadius; // create temporary pixmap to hold a copy of the text double radiusSpacing = radius;// / 1.75;// * 1.5; double radius2 = radius * 2; // double offx = 0; //fabs(m_shadowOffset.x()); // double offy = 0; //fabs(m_shadowOffset.y()); double newWidth = originalSizeWithBorder.width() + radius2 * scale.x();// blur on both sides //+ offx * scale.x(); double newHeight = originalSizeWithBorder.height() + radius2 * scale.y();// blur on both sides //+ offy * scale.y(); QSizeF blurSize(newWidth,newHeight); // blurSize.rwidth() *= scale.x(); // blurSize.rheight() *= scale.y(); //qDebug() << "GLImageDrawable::applyBorderAndShadow(): Blur size:"<<blurSize<<", originalSizeWithBorder:"<<originalSizeWithBorder<<", radius:"<<radius<<", radius2:"<<radius2<<", m_shadowOffset:"<<m_shadowOffset<<", offx:"<<offx<<", offy:"<<offy<<", scale:"<<scale; QImage tmpImage(blurSize.toSize(),QImage::Format_ARGB32_Premultiplied); memset(tmpImage.scanLine(0),0,tmpImage.byteCount()); // render the source image into a temporary buffer for bluring QPainter tmpPainter(&tmpImage); //tmpPainter.scale(scale.x(),scale.y()); tmpPainter.save(); QPointF translate1(radiusSpacing, radiusSpacing); translate1.rx() *= scale.x(); translate1.ry() *= scale.y(); //qDebug() << "stage1: radiusSpacing:"<<radiusSpacing<<", m_shadowOffset:"<<m_shadowOffset<<", translate1:"<<translate1; //qDebug() << "GLImageDrawable::updateShadow(): translate1:"<<translate1<<", scale:"<<scale; tmpPainter.translate(translate1); tmpPainter.drawImage(0,0,sourceImg); tmpPainter.restore(); // color the orignal image by applying a color to the copy using a QPainter::CompositionMode_DestinationIn operation. // This produces a homogeneously-colored pixmap. QRect imgRect = tmpImage.rect(); tmpPainter.setCompositionMode(QPainter::CompositionMode_SourceIn); QColor color = m_shadowColor; // clamp m_shadowOpacity to 1.0 because we handle values >1.0 by repainting the blurred image over itself (m_shadowOpacity-1) times. color.setAlpha((int)(255.0 * (m_shadowOpacity > 1.0 ? 1.0 : m_shadowOpacity))); tmpPainter.fillRect(imgRect, color); tmpPainter.end(); // blur the colored text ImageFilters::blurImage(tmpImage, (int)(radius * scale.x())); if(m_shadowOpacity > 1.0) { QPainter painter2(&tmpImage); int times = (int)(m_shadowOpacity - 1.0); // Cap at 10 - an arbitrary cap just to prevent the user from taxing the CPU too much. if(times > 10) times = 10; double finalOpacity = m_shadowOpacity - ((int)m_shadowOpacity); if(finalOpacity < 0.001) finalOpacity = 1.0; QImage copy = tmpImage.copy(); for(int i=0; i<times-1; i++) painter2.drawImage(0,0,copy); //qDebug() << "Overpaint feature: times:"<<times<<", finalOpacity:"<<finalOpacity; painter2.setOpacity(finalOpacity); painter2.drawImage(0,0,copy); painter2.setOpacity(1.0); } // { // QPainter painter2(&tmpImage); // painter2.setPen(Qt::yellow); // // QPointF translate1(radiusSpacing, // radiusSpacing); // translate1.rx() *= scale.x(); // translate1.ry() *= scale.y(); // //qDebug() << "stage1: radiusSpacing:"<<radiusSpacing<<", m_shadowOffset:"<<m_shadowOffset<<", translate1:"<<translate1; // //qDebug() << "GLImageDrawable::updateShadow(): translate1:"<<translate1<<", scale:"<<scale; // // painter2.translate(translate1); // painter2.drawImage(0,0,sourceImg); // painter2.drawRect(sourceImg.rect()); // // } // Notice: Older versions of this shadow code drew the sourceImg back on top of the shadow - // Since we are drawaing the drop shadow as a separate texture below the real image in the // m_shadowDrawable, we are not going to draw the sourceImg on top now. //qDebug() << "GLImageDrawable::updateShadow(): shadow location:"<<point<<", size:"<<tmpImage.size()<<", rect().topLeft():"<<rect().topLeft()<<", m_shadowOffset:"<<m_shadowOffset<<", radiusSpacing:"<<radiusSpacing; bool scaleFlag = dynamic_cast<GLTextDrawable*>(this) == NULL; // double scale_w = scaleFlag ? fabs((double)(rect().width() - sourceImg.width())) / sourceImg.width() : 1.0; // double scale_h = scaleFlag ? fabs((double)(rect().height() - sourceImg.height())) / sourceImg.height() : 1.0; double scale_w = scaleFlag ? m_targetRect.width() / m_sourceRect.width() : 1.0; double scale_h = scaleFlag ? m_targetRect.height() / m_sourceRect.height() : 1.0; // scale_w *= 2; // scale_h *= 2; QSizeF size(((double)tmpImage.width()) * scale_w, ((double)tmpImage.height()) * scale_h); QPointF point = rect().topLeft() + QPointF(m_shadowOffset.x() * scale_w, m_shadowOffset.y() * scale_h) - QPointF(m_shadowBlurRadius * scale_w,m_shadowBlurRadius * scale_h); //qDebug() << "GLImageDrawable::updateShadow: "<<(QObject*)this<<" m_targetRect:"<<m_targetRect.size()<<", m_sourceRect:"<<m_sourceRect.size()<<", scale:"<<scale_w<<"x"<<scale_h<<", tmpImage:"<<tmpImage.size()<<", new size:"<<size<<", point:"<<point; m_shadowDrawable->setRect(QRectF(point, size)); m_shadowDrawable->setImage(tmpImage); //updateGL(); }
bool SkBlurMask::BlurGroundTruth(SkMask* dst, const SkMask& src, SkScalar provided_radius, Style style, SkIPoint* margin) { if (src.fFormat != SkMask::kA8_Format) { return false; } float radius = SkScalarToFloat(SkScalarMul(provided_radius, kBlurRadiusFudgeFactor)); float stddev = SkScalarToFloat(radius) /2.0f; float variance = stddev * stddev; int windowSize = SkScalarCeil(stddev*4); // round window size up to nearest odd number windowSize |= 1; SkAutoTMalloc<float> gaussWindow(windowSize); int halfWindow = windowSize >> 1; gaussWindow[halfWindow] = 1; float windowSum = 1; for (int x = 1 ; x <= halfWindow ; ++x) { float gaussian = expf(-x*x / variance); gaussWindow[halfWindow + x] = gaussWindow[halfWindow-x] = gaussian; windowSum += 2*gaussian; } // leave the filter un-normalized for now; we will divide by the normalization // sum later; int pad = halfWindow; if (margin) { margin->set( pad, pad ); } dst->fBounds = src.fBounds; dst->fBounds.outset(pad, pad); dst->fRowBytes = dst->fBounds.width(); dst->fFormat = SkMask::kA8_Format; dst->fImage = NULL; if (src.fImage) { size_t dstSize = dst->computeImageSize(); if (0 == dstSize) { return false; // too big to allocate, abort } int srcWidth = src.fBounds.width(); int srcHeight = src.fBounds.height(); int dstWidth = dst->fBounds.width(); const uint8_t* srcPixels = src.fImage; uint8_t* dstPixels = SkMask::AllocImage(dstSize); SkAutoTCallVProc<uint8_t, SkMask_FreeImage> autoCall(dstPixels); // do the actual blur. First, make a padded copy of the source. // use double pad so we never have to check if we're outside anything int padWidth = srcWidth + 4*pad; int padHeight = srcHeight; int padSize = padWidth * padHeight; SkAutoTMalloc<uint8_t> padPixels(padSize); memset(padPixels, 0, padSize); for (int y = 0 ; y < srcHeight; ++y) { uint8_t* padptr = padPixels + y * padWidth + 2*pad; const uint8_t* srcptr = srcPixels + y * srcWidth; memcpy(padptr, srcptr, srcWidth); } // blur in X, transposing the result into a temporary floating point buffer. // also double-pad the intermediate result so that the second blur doesn't // have to do extra conditionals. int tmpWidth = padHeight + 4*pad; int tmpHeight = padWidth - 2*pad; int tmpSize = tmpWidth * tmpHeight; SkAutoTMalloc<float> tmpImage(tmpSize); memset(tmpImage, 0, tmpSize*sizeof(tmpImage[0])); for (int y = 0 ; y < padHeight ; ++y) { uint8_t *srcScanline = padPixels + y*padWidth; for (int x = pad ; x < padWidth - pad ; ++x) { float *outPixel = tmpImage + (x-pad)*tmpWidth + y + 2*pad; // transposed output uint8_t *windowCenter = srcScanline + x; for (int i = -pad ; i <= pad ; ++i) { *outPixel += gaussWindow[pad+i]*windowCenter[i]; } *outPixel /= windowSum; } } // blur in Y; now filling in the actual desired destination. We have to do // the transpose again; these transposes guarantee that we read memory in // linear order. for (int y = 0 ; y < tmpHeight ; ++y) { float *srcScanline = tmpImage + y*tmpWidth; for (int x = pad ; x < tmpWidth - pad ; ++x) { float *windowCenter = srcScanline + x; float finalValue = 0; for (int i = -pad ; i <= pad ; ++i) { finalValue += gaussWindow[pad+i]*windowCenter[i]; } finalValue /= windowSum; uint8_t *outPixel = dstPixels + (x-pad)*dstWidth + y; // transposed output int integerPixel = int(finalValue + 0.5f); *outPixel = SkClampMax( SkClampPos(integerPixel), 255 ); } } dst->fImage = dstPixels; // if need be, alloc the "real" dst (same size as src) and copy/merge // the blur into it (applying the src) if (style == kInner_Style) { // now we allocate the "real" dst, mirror the size of src size_t srcSize = src.computeImageSize(); if (0 == srcSize) { return false; // too big to allocate, abort } dst->fImage = SkMask::AllocImage(srcSize); merge_src_with_blur(dst->fImage, src.fRowBytes, srcPixels, src.fRowBytes, dstPixels + pad*dst->fRowBytes + pad, dst->fRowBytes, srcWidth, srcHeight); SkMask::FreeImage(dstPixels); } else if (style != kNormal_Style) { clamp_with_orig(dstPixels + pad*dst->fRowBytes + pad, dst->fRowBytes, srcPixels, src.fRowBytes, srcWidth, srcHeight, style); } (void)autoCall.detach(); } if (style == kInner_Style) { dst->fBounds = src.fBounds; // restore trimmed bounds dst->fRowBytes = src.fRowBytes; } return true; }
Region* Image::extract(unsigned int x, unsigned int y, bool eightWay) { Image tmpImage(width, height); if ((x < 0) || (y < 0) || ( x >= width) || (y >= height)) return NULL; if (this->operator ()(x, y) != 255) { return NULL; } //if the pixel at (x, y) is white, then we can do a flood fill tmpImage.clear(0); int minX, maxX, minY, maxY; std::stack<Point> floodStack; Point p(x, y); minX = p.x; maxX = p.x; minY = p.y; maxY = p.y; tmpImage(p.x, p.y) = 255; floodStack.push(p); do { p = floodStack.top(); floodStack.pop(); //now push all unvisited neighbours on stack int i, j; for (i = -1; i <= 1; i++) { for (j = -1; j <= 1; j++) { if (((signed int)(p.x + i) < 0) || ((signed int)(p.y + j) < 0) || ( (p.x + i) >= width) || ((p.y + j) >= height)) { continue; } if (!eightWay) { if ((i != 0) && (j != 0)) { continue; } } //now add that point if ((tmpImage(p.x + i, p.y + j) != 255) && (this->operator ()(p.x + i, p.y + j) == 255)) { //mark it as visited tmpImage(p.x + i, p.y + j) = 255; //and add it floodStack.push(Point(p.x + i, p.y + j)); //now update the min and max if (p.x + i < minX) minX = p.x + i; if (p.x + i > maxX) maxX = p.x + i; if (p.y + j < minY) minY = p.y + j; if (p.y + j > maxY) maxY = p.y + j; } } } }while (!floodStack.empty()); //now we know the flood fill is included in the rectable delimited by minX, maxX, minY, maxY //This will be the region that we'll return Region* r = new Region(maxX - minX + 1, maxY - minY + 1, minX, minY); int i, j; for (j = minY; j <= maxY; j++) { for (i = minX; i <= maxX; i++) { (*r)(i - minX, j - minY) = tmpImage(i, j); } } return r; }
QImage RichTextRenderer::renderText() { // qDebug()<<itemName()<<"TextBoxWarmingThread::run(): htmlCode:"<<htmlCode; //qDebug() << "RichTextRenderer::renderText(): HTML:"<<html(); //qDebug() << "RichTextRenderer::update(): Update Start..."; //qDebug() << "RichTextRenderer::renderText(): \t in thread:"<<QThread::currentThreadId(); if(m_updateTimer.isActive()) m_updateTimer.stop(); QTime renderTime; renderTime.start(); QTextDocument doc; QTextDocument shadowDoc; if (Qt::mightBeRichText(html())) { doc.setHtml(html()); shadowDoc.setHtml(html()); } else { doc.setPlainText(html()); shadowDoc.setPlainText(html()); } int textWidth = m_textWidth; doc.setTextWidth(textWidth); shadowDoc.setTextWidth(textWidth); // Apply outline pen to the html QTextCursor cursor(&doc); cursor.select(QTextCursor::Document); QTextCharFormat format; QPen p(Qt::NoPen); if(outlineEnabled()) { p = outlinePen(); p.setJoinStyle(Qt::MiterJoin); } format.setTextOutline(p); //format.setForeground(fillEnabled() ? fillBrush() : Qt::NoBrush); //Qt::white); cursor.mergeCharFormat(format); // Setup the shadow text formatting if enabled if(shadowEnabled()) { if(shadowBlurRadius() <= 0.05) { QTextCursor cursor(&shadowDoc); cursor.select(QTextCursor::Document); QTextCharFormat format; format.setTextOutline(Qt::NoPen); format.setForeground(shadowBrush()); cursor.mergeCharFormat(format); } } QSizeF shadowSize = shadowEnabled() ? QSizeF(shadowOffsetX(),shadowOffsetY()) : QSizeF(0,0); QSizeF docSize = doc.size(); QSizeF padSize(12.,12.); QSizeF sumSize = (docSize + shadowSize + padSize);//.toSize(); QSizeF scaledSize = QSizeF(sumSize.width() * m_scaling.x(), sumSize.height() * m_scaling.y()); if(m_scaling.x() != 1. || m_scaling.y() != 1.) { //qDebug() << "RichTextRenderer::renderText(): Orig size:"<<sumSize<<", scaled size:"<<scaledSize<<", scaling:"<<m_scaling; m_rawSize = sumSize; } //qDebug() << "RichTextRenderer::update(): textWidth: "<<textWidth<<", shadowSize:"<<shadowSize<<", docSize:"<<docSize<<", sumSize:"<<sumSize; QImage cache(scaledSize.toSize(),QImage::Format_ARGB32); //_Premultiplied); memset(cache.scanLine(0),0,cache.byteCount()); double padSizeHalfX = padSize.width() / 2; double padSizeHalfY = padSize.height() / 2; QPainter textPainter(&cache); textPainter.scale(m_scaling.x(), m_scaling.y()); //textPainter.fillRect(cache.rect(),Qt::transparent); QAbstractTextDocumentLayout::PaintContext pCtx; //qDebug() << "RichTextRenderer::renderText(): shadowEnabled():"<<shadowEnabled()<<", shadowBlurRadius():"<<shadowBlurRadius(); if(shadowEnabled()) { if(shadowBlurRadius() <= 0.05) { // render a "cheap" version of the shadow using the shadow text document textPainter.save(); textPainter.translate(shadowOffsetX(),shadowOffsetY()); shadowDoc.documentLayout()->draw(&textPainter, pCtx); textPainter.restore(); } else { double radius = shadowBlurRadius(); // create temporary pixmap to hold a copy of the text QSizeF blurSize = ImageFilters::blurredSizeFor(doc.size(), (int)radius); QSizeF scaledBlurSize = QSize(blurSize.width() * m_scaling.x(), blurSize.height() * m_scaling.y()); //QSize docSize = doc.size(); //qDebug() << "RichTextRenderer::renderText(): [shadow] radius:"<<radius<<" blurSize:"<<blurSize<<", scaling:"<<m_scaling<<", scaledBlurSize:"<<scaledBlurSize; //qDebug() << "Blur size:"<<blurSize<<", doc:"<<doc.size()<<", radius:"<<radius; QImage tmpImage(scaledBlurSize.toSize(),QImage::Format_ARGB32_Premultiplied); memset(tmpImage.scanLine(0),0,tmpImage.byteCount()); // render the text QPainter tmpPainter(&tmpImage); tmpPainter.scale(m_scaling.x(), m_scaling.y()); tmpPainter.save(); tmpPainter.translate(radius + padSizeHalfX, radius + padSizeHalfY); doc.documentLayout()->draw(&tmpPainter, pCtx); tmpPainter.restore(); // blacken the text by applying a color to the copy using a QPainter::CompositionMode_DestinationIn operation. // This produces a homogeneously-colored pixmap. QRect rect = tmpImage.rect(); tmpPainter.setCompositionMode(QPainter::CompositionMode_SourceIn); tmpPainter.fillRect(rect, shadowBrush().color()); tmpPainter.end(); // blur the colored text ImageFilters::blurImage(tmpImage, (int)radius); // render the blurred text at an offset into the cache textPainter.save(); textPainter.translate(shadowOffsetX() - radius, shadowOffsetY() - radius); textPainter.drawImage(0, 0, tmpImage); textPainter.restore(); } } textPainter.translate(padSizeHalfX, padSizeHalfY); doc.documentLayout()->draw(&textPainter, pCtx); textPainter.end(); m_image = cache.convertToFormat(QImage::Format_ARGB32); emit textRendered(m_image); //qDebug() << "RichTextRenderer::renderText(): Render finished, elapsed:"<<renderTime.elapsed()<<"ms"; //m_image.save("debug-text.png"); return m_image; }
//! creates/loads an animated mesh from the file. //! \return Pointer to the created mesh. Returns 0 if loading failed. //! If you no longer need the mesh, you should call IAnimatedMesh::drop(). //! See IReferenceCounted::drop() for more information. IAnimatedMesh* COCTLoader::createMesh(io::IReadFile* file) { if (!file) return 0; octHeader header; file->read(&header, sizeof(octHeader)); octVert * verts = new octVert[header.numVerts]; octFace * faces = new octFace[header.numFaces]; octTexture * textures = new octTexture[header.numTextures]; octLightmap * lightmaps = new octLightmap[header.numLightmaps]; octLight * lights = new octLight[header.numLights]; file->read(verts, sizeof(octVert) * header.numVerts); file->read(faces, sizeof(octFace) * header.numFaces); //TODO: Make sure id is in the legal range for Textures and Lightmaps u32 i; for (i = 0; i < header.numTextures; i++) { octTexture t; file->read(&t, sizeof(octTexture)); textures[t.id] = t; } for (i = 0; i < header.numLightmaps; i++) { octLightmap t; file->read(&t, sizeof(octLightmap)); lightmaps[t.id] = t; } file->read(lights, sizeof(octLight) * header.numLights); //TODO: Now read in my extended OCT header (flexible lightmaps and vertex normals) // This is the method Nikolaus Gebhardt used in the Q3 loader -- create a // meshbuffer for every possible combination of lightmap and texture including // a "null" texture and "null" lightmap. Ones that end up with nothing in them // will be removed later. SMesh * Mesh = new SMesh(); for (i=0; i<(header.numTextures+1) * (header.numLightmaps+1); ++i) { scene::SMeshBufferLightMap* buffer = new scene::SMeshBufferLightMap(); buffer->Material.MaterialType = video::EMT_LIGHTMAP; buffer->Material.Lighting = false; Mesh->addMeshBuffer(buffer); buffer->drop(); } // Build the mesh buffers for (i = 0; i < header.numFaces; i++) { if (faces[i].numVerts < 3) continue; const f32* const a = verts[faces[i].firstVert].pos; const f32* const b = verts[faces[i].firstVert+1].pos; const f32* const c = verts[faces[i].firstVert+2].pos; const core::vector3df normal = core::plane3df(core::vector3df(a[0],a[1],a[2]), core::vector3df(b[0],c[1],c[2]), core::vector3df(c[0],c[1],c[2])).Normal; const u32 textureID = core::min_(s32(faces[i].textureID), s32(header.numTextures - 1)) + 1; const u32 lightmapID = core::min_(s32(faces[i].lightmapID),s32(header.numLightmaps - 1)) + 1; SMeshBufferLightMap * meshBuffer = (SMeshBufferLightMap*)Mesh->getMeshBuffer(lightmapID * (header.numTextures + 1) + textureID); const u32 base = meshBuffer->Vertices.size(); // Add this face's verts u32 v; for (v = 0; v < faces[i].numVerts; ++v) { octVert * vv = &verts[faces[i].firstVert + v]; video::S3DVertex2TCoords vert; vert.Pos.set(vv->pos[0], vv->pos[1], vv->pos[2]); vert.Color = video::SColor(0,255,255,255); vert.Normal.set(normal); if (textureID == 0) { // No texture -- just a lightmap. Thus, use lightmap coords for texture 1. // (the actual texture will be swapped later) vert.TCoords.set(vv->lc[0], vv->lc[1]); } else { vert.TCoords.set(vv->tc[0], vv->tc[1]); vert.TCoords2.set(vv->lc[0], vv->lc[1]); } meshBuffer->Vertices.push_back(vert); } // Now add the indices // This weird loop turns convex polygons into triangle strips. // I do it this way instead of a simple fan because it usually looks a lot better in wireframe, for example. // High, Low u32 h = faces[i].numVerts - 1; u32 l = 0; for (v = 0; v < faces[i].numVerts - 2; ++v) { const u32 center = (v & 1)? h - 1: l + 1; meshBuffer->Indices.push_back(base + h); meshBuffer->Indices.push_back(base + l); meshBuffer->Indices.push_back(base + center); if (v & 1) --h; else ++l; } } // load textures core::array<video::ITexture*> tex; tex.reallocate(header.numTextures + 1); tex.push_back(0); const core::stringc relpath = FileSystem->getFileDir(file->getFileName())+"/"; for (i = 1; i < (header.numTextures + 1); i++) { core::stringc path(textures[i-1].fileName); path.replace('\\','/'); if (FileSystem->existFile(path)) tex.push_back(SceneManager->getVideoDriver()->getTexture(path)); else // try to read in the relative path of the OCT file tex.push_back(SceneManager->getVideoDriver()->getTexture( (relpath + path) )); } // prepare lightmaps core::array<video::ITexture*> lig; lig.set_used(header.numLightmaps + 1); lig[0] = 0; const u32 lightmapWidth = 128; const u32 lightmapHeight = 128; const core::dimension2d<u32> lmapsize(lightmapWidth, lightmapHeight); bool oldMipMapState = SceneManager->getVideoDriver()->getTextureCreationFlag(video::ETCF_CREATE_MIP_MAPS); SceneManager->getVideoDriver()->setTextureCreationFlag(video::ETCF_CREATE_MIP_MAPS, false); video::CImage tmpImage(video::ECF_R8G8B8, lmapsize); for (i = 1; i < (header.numLightmaps + 1); ++i) { core::stringc lightmapname = file->getFileName(); lightmapname += ".lightmap."; lightmapname += (int)i; const octLightmap* lm = &lightmaps[i-1]; for (u32 x=0; x<lightmapWidth; ++x) { for (u32 y=0; y<lightmapHeight; ++y) { tmpImage.setPixel(x, y, video::SColor(255, lm->data[x][y][2], lm->data[x][y][1], lm->data[x][y][0])); } } lig[i] = SceneManager->getVideoDriver()->addTexture(lightmapname.c_str(), &tmpImage); } SceneManager->getVideoDriver()->setTextureCreationFlag(video::ETCF_CREATE_MIP_MAPS, oldMipMapState); // Free stuff delete [] verts; delete [] faces; delete [] textures; delete [] lightmaps; delete [] lights; // attach materials for (i = 0; i < header.numLightmaps + 1; i++) { for (u32 j = 0; j < header.numTextures + 1; j++) { u32 mb = i * (header.numTextures + 1) + j; SMeshBufferLightMap * meshBuffer = (SMeshBufferLightMap*)Mesh->getMeshBuffer(mb); meshBuffer->Material.setTexture(0, tex[j]); meshBuffer->Material.setTexture(1, lig[i]); if (meshBuffer->Material.getTexture(0) == 0) { // This material has no texture, so we'll just show the lightmap if there is one. // We swapped the texture coordinates earlier. meshBuffer->Material.setTexture(0, meshBuffer->Material.getTexture(1)); meshBuffer->Material.setTexture(1, 0); } if (meshBuffer->Material.getTexture(1) == 0) { // If there is only one texture, it should be solid and lit. // Among other things, this way you can preview OCT lights. meshBuffer->Material.MaterialType = video::EMT_SOLID; meshBuffer->Material.Lighting = true; } } } // delete all buffers without geometry in it. i = 0; while(i < Mesh->MeshBuffers.size()) { if (Mesh->MeshBuffers[i]->getVertexCount() == 0 || Mesh->MeshBuffers[i]->getIndexCount() == 0 || Mesh->MeshBuffers[i]->getMaterial().getTexture(0) == 0) { // Meshbuffer is empty -- drop it Mesh->MeshBuffers[i]->drop(); Mesh->MeshBuffers.erase(i); } else { ++i; } } // create bounding box for (i = 0; i < Mesh->MeshBuffers.size(); ++i) { Mesh->MeshBuffers[i]->recalculateBoundingBox(); } Mesh->recalculateBoundingBox(); // Set up an animated mesh to hold the mesh SAnimatedMesh* AMesh = new SAnimatedMesh(); AMesh->Type = EAMT_OCT; AMesh->addMesh(Mesh); AMesh->recalculateBoundingBox(); Mesh->drop(); return AMesh; }
void TextBoxContent::renderShadow(QPainter *painter, QAbstractTextDocumentLayout::PaintContext *pCtx) { AbstractVisualItem *model = modelItem(); if(qFuzzyIsNull(model->shadowBlurRadius())) { // render a "cheap" version of the shadow using the shadow text document painter->save(); painter->translate(model->shadowOffsetX(),model->shadowOffsetY()); m_shadowText->documentLayout()->draw(painter, *pCtx); painter->restore(); } else { // double radius = model->shadowBlurRadius(); // double radiusSquared = radius*radius; // // // create temporary pixmap to hold a copy of the text // double blurSize = (int)(radiusSquared*2); // QSize shadowSize(blurSize,blurSize); // QPixmap tmpPx(contentsRect().size()+shadowSize); // tmpPx.fill(Qt::transparent); // // // render the text // QPainter tmpPainter(&tmpPx); // tmpPainter.save(); // tmpPainter.translate(radiusSquared, radiusSquared); // m_text->documentLayout()->draw(&tmpPainter, *pCtx); // tmpPainter.restore(); // // // blacken the text by applying a color to the copy using a QPainter::CompositionMode_DestinationIn operation. // // This produces a homogeneously-colored pixmap. // QRect rect = QRect(0, 0, tmpPx.width(), tmpPx.height()); // tmpPainter.setCompositionMode(QPainter::CompositionMode_SourceIn); // tmpPainter.fillRect(rect, model->shadowBrush().color()); // tmpPainter.end(); // // // blur the colored text // QImage orignalImage = tmpPx.toImage(); // QImage blurredImage = ImageFilters::blurred(orignalImage, rect, (int)radius); // QPixmap blurredPixmap = QPixmap::fromImage(blurredImage); // // // render the blurred text at an offset into the cache // painter->save(); // painter->translate(model->shadowOffsetX() - radiusSquared, // model->shadowOffsetY() - radiusSquared); // painter->drawPixmap(0, 0, blurredPixmap); // painter->restore(); // New method of rendering shadows double radius = model->shadowBlurRadius(); // create temporary pixmap to hold a copy of the text QSizeF blurSize = ImageFilters::blurredSizeFor(model->contentsRect().size(), (int)radius); //qDebug() << "Blur size:"<<blurSize<<", doc:"<<doc.size()<<", radius:"<<radius; QImage tmpImage(blurSize.toSize(),QImage::Format_ARGB32_Premultiplied); memset(tmpImage.scanLine(0),0,tmpImage.byteCount()); // render the text QPainter tmpPainter(&tmpImage); tmpPainter.save(); tmpPainter.translate(radius, radius); m_text->documentLayout()->draw(&tmpPainter, *pCtx); tmpPainter.restore(); // blacken the text by applying a color to the copy using a QPainter::CompositionMode_DestinationIn operation. // This produces a homogeneously-colored pixmap. QRect rect = tmpImage.rect(); tmpPainter.setCompositionMode(QPainter::CompositionMode_SourceIn); tmpPainter.fillRect(rect, model->shadowBrush().color()); tmpPainter.end(); // blur the colored text ImageFilters::blurImage(tmpImage, (int)radius); // render the blurred text at an offset into the cache painter->save(); painter->translate(model->shadowOffsetX() - radius, model->shadowOffsetY() - radius); painter->drawImage(0, 0, tmpImage); painter->restore(); } }
void TrafficLightDetector::brightnessDetect(const cv::Mat &input) { cv::Mat tmpImage; input.copyTo(tmpImage); /* contrast correction */ cv::Mat tmp; cvtColor(tmpImage, tmp, CV_BGR2HSV); std::vector<cv::Mat> hsv_channel; split(tmp, hsv_channel); float correction_factor = 10.0; uchar lut[256]; for (int i=0; i<256; i++) { lut[i] = 255.0 / (1 + exp(-correction_factor*(i-128)/255)); } LUT(hsv_channel[2], cv::Mat(cv::Size(256, 1), CV_8U, lut), hsv_channel[2]); merge(hsv_channel, tmp); cvtColor(tmp, tmpImage, CV_HSV2BGR); for (int i = 0; i < static_cast<int>(contexts.size()); i++) { Context context = contexts.at(i); if (context.topLeft.x > context.botRight.x) continue; /* extract region of interest from input image */ cv::Mat roi = tmpImage(cv::Rect(context.topLeft, context.botRight)); /* convert color space (BGR -> HSV) */ cv::Mat roi_HSV; cvtColor(roi, roi_HSV, CV_BGR2HSV); /* search the place where traffic signals seem to be */ cv::Mat signalMask = signalDetect_inROI(roi_HSV, input.clone(), context.lampRadius, context.topLeft); /* detect which color is dominant */ cv::Mat extracted_HSV; roi.copyTo(extracted_HSV, signalMask); // extracted_HSV.copyTo(roi); // imshow("tmpImage", tmpImage); // waitKey(5); cvtColor(extracted_HSV, extracted_HSV, CV_BGR2HSV); int red_pixNum = 0; int yellow_pixNum = 0; int green_pixNum = 0; int valid_pixNum = 0; for (int y=0; y<extracted_HSV.rows; y++) { for (int x=0; x<extracted_HSV.cols; x++) { /* extract H, V value from pixel */ double hue = Actual_Hue(extracted_HSV.at<cv::Vec3b>(y, x)[0]); uchar val = extracted_HSV.at<cv::Vec3b>(y, x)[2]; if (val == 0) { continue; // this is masked pixel } valid_pixNum++; /* search which color is actually bright */ if (IsRange(thSet.Red.Hue.lower, thSet.Red.Hue.upper, hue)) { red_pixNum++; } if (IsRange(thSet.Yellow.Hue.lower, thSet.Yellow.Hue.upper, hue)) { yellow_pixNum++; } if (IsRange(thSet.Green.Hue.lower, thSet.Green.Hue.upper, hue)) { green_pixNum++; } } } // std::cout << "(green, yellow, red) / valid = (" << green_pixNum << ", " << yellow_pixNum << ", " << red_pixNum << ") / " << valid_pixNum <<std::endl; bool isRed_bright; bool isYellow_bright; bool isGreen_bright; if (valid_pixNum > 0) { isRed_bright = ( ((double)red_pixNum / valid_pixNum) > 0.45) ? true : false; // detect red a little largely isYellow_bright = ( ((double)yellow_pixNum / valid_pixNum) > 0.5) ? true : false; isGreen_bright = ( ((double)green_pixNum / valid_pixNum) > 0.5) ? true : false; } else { isRed_bright = false; isYellow_bright = false; isGreen_bright = false; } int currentLightsCode = getCurrentLightsCode(isRed_bright, isYellow_bright, isGreen_bright); contexts.at(i).lightState = determineState(contexts.at(i).lightState, currentLightsCode, &(contexts.at(i).stateJudgeCount)); roi.setTo(cv::Scalar(0)); } }
//static void GradientsBase::solveImage(imageType_t const &rLaplaceImage_p, imageType_t &rSolution_p) { int const nRows=rLaplaceImage_p.Height(); int const nCols=rLaplaceImage_p.Width(); int const nChannels=rLaplaceImage_p.NumberOfChannels(); imageType_t::color_space colorSpace=rLaplaceImage_p.ColorSpace(); #ifdef USE_FFTW // adapted from http://www.umiacs.umd.edu/~aagrawal/software.html, AssertColImage(rLaplaceImage_p); // just in case we accidentally change this, because code below believes in double... Assert(typeid(realType_t)==typeid(double)); // check assumption of row major format Assert(rLaplaceImage_p.PixelAddress(0,0)+1==rLaplaceImage_p.PixelAddress(1,0)); rSolution_p.AllocateData(nCols,nRows,nChannels,colorSpace); rSolution_p.ResetSelections(); rSolution_p.Black(); #ifdef USE_THREADS // threaded version int const nElements=nRows*nCols; int const nThreads=Thread::NumberOfThreads(nElements); if(fftw_init_threads()==0){ throw Error("Problem initilizing threads"); } fftw_plan_with_nthreads(nThreads); #endif for(int chan=0;chan<nChannels;++chan){ TimeMessage startSolver(String("FFTW Solver, Channel ")+String(chan)); // FIXME see if fttw_allocate gives us something... imageType_t fcos(nCols,nRows); #if 0 // During experiment, the higher optimization did not give us anything except for an additional delay. May change later. fftw_plan pForward= fftw_plan_r2r_2d(nRows, nCols, const_cast<double *>(rLaplaceImage_p.PixelData(chan)), fcos.PixelData(), FFTW_REDFT10, FFTW_REDFT10, FFTW_MEASURE); fftw_plan pInverse = fftw_plan_r2r_2d(nRows, nCols, fcos.PixelData(), rSolution_p.PixelData(chan), FFTW_REDFT01, FFTW_REDFT01, FFTW_ESTIMATE); #else fftw_plan pForward= fftw_plan_r2r_2d(nRows, nCols, const_cast<double *>(rLaplaceImage_p.PixelData(chan)), fcos.PixelData(), FFTW_REDFT10, FFTW_REDFT10, FFTW_MEASURE); fftw_plan pInverse = fftw_plan_r2r_2d(nRows, nCols, fcos.PixelData(), rSolution_p.PixelData(chan), FFTW_REDFT01, FFTW_REDFT01, FFTW_ESTIMATE); #endif // find DCT fftw_execute(pForward); realType_t const pi=pcl::Pi(); for(int row = 0 ; row < nRows; ++row){ for(int col = 0 ; col < nCols; ++col){ fcos.Pixel(col,row) /= 2*cos(pi*col/( (double) nCols)) - 2 + 2*cos(pi*row/((double) nRows)) - 2; } } fcos.Pixel(0,0)=0.0; // Inverse DCT fftw_execute(pInverse); fftw_destroy_plan(pForward); fftw_destroy_plan(pInverse); } #endif #ifdef USE_PIFFT // use PI FFT based solver by Carlos Milovic F. rLaplaceImage_p.ResetSelections(); rSolution_p.AllocateData(nCols,nRows,nChannels,colorSpace); rSolution_p.ResetSelections(); // current solver handles only one channel per run. for(int chan=0;chan<nChannels;++chan){ TimeMessage startSolver(String("PIFFT Solver, Channel ")+String(chan)); imageType_t tmpImage(nCols,nRows); rLaplaceImage_p.SelectChannel(chan); tmpImage.Assign(rLaplaceImage_p); __SolvePoisson(tmpImage); rSolution_p.SelectChannel(chan); rSolution_p.Mov(tmpImage); } #endif }
void TextBoxWarmingThread::run() { if(!m_model) { qDebug()<<"TextBoxWarmingThread::run(): m_model is null"; return; } //qDebug()<<"TextBoxWarmingThread::run(): model ptr:"<<m_model<<", attempting to dynamic cast"; TextBoxItem * model = dynamic_cast<TextBoxItem*>((AbstractVisualItem*)m_model); //int sleepTime = (int)(((float)qrand()) / ((float)RAND_MAX) * 10000.0 + 2000.0); //qDebug()<<"TextBoxWarmingThread::run(): modelItem:"<<model->itemName();//<<": Cache redraw, sleep: "<<sleepTime; // Sleep doesnt work - if I sleep, then it seems the cache is never updated! //sleep((unsigned long)sleepTime); //sleep(1000); QString htmlCode = model->text(); // qDebug()<<model->itemName()<<"TextBoxWarmingThread::run(): htmlCode:"<<htmlCode; QTextDocument doc; QTextDocument shadowDoc; doc.setHtml(htmlCode); shadowDoc.setHtml(htmlCode); int textWidth = model->contentsRect().toRect().width(); doc.setTextWidth(textWidth); shadowDoc.setTextWidth(textWidth); // Apply outline pen to the html QTextCursor cursor(&doc); cursor.select(QTextCursor::Document); QTextCharFormat format; QPen p(Qt::NoPen); if(model && model->outlineEnabled()) { p = model->outlinePen(); p.setJoinStyle(Qt::MiterJoin); } format.setTextOutline(p); format.setForeground(model && model->fillType() == AbstractVisualItem::Solid ? model->fillBrush() : Qt::NoBrush); //Qt::white); cursor.mergeCharFormat(format); #if QT46_SHADOW_ENAB == 0 // Setup the shadow text formatting if enabled if(model && model->shadowEnabled()) { if(qFuzzyIsNull(model->shadowBlurRadius())) { QTextCursor cursor(&shadowDoc); cursor.select(QTextCursor::Document); QTextCharFormat format; format.setTextOutline(Qt::NoPen); format.setForeground(model ? model->shadowBrush() : Qt::black); cursor.mergeCharFormat(format); } } #endif QSizeF shadowSize = model->shadowEnabled() ? QSizeF(model->shadowOffsetX(),model->shadowOffsetY()) : QSizeF(0,0); QImage *cache = new QImage((model->contentsRect().size()+shadowSize).toSize(),QImage::Format_ARGB32_Premultiplied); memset(cache->scanLine(0),0,cache->byteCount()); QPainter textPainter(cache); textPainter.fillRect(cache->rect(),Qt::transparent); QAbstractTextDocumentLayout::PaintContext pCtx; #if QT46_SHADOW_ENAB == 0 if(model->shadowEnabled()) { if(qFuzzyIsNull(model->shadowBlurRadius())) { // render a "cheap" version of the shadow using the shadow text document textPainter.save(); textPainter.translate(model->shadowOffsetX(),model->shadowOffsetY()); shadowDoc.documentLayout()->draw(&textPainter, pCtx); textPainter.restore(); } else { // double radius = model->shadowBlurRadius(); // double radiusSquared = radius*radius; // // // create temporary pixmap to hold a copy of the text // double blurSize = (int)(radiusSquared*2); // QSize shadowSize(blurSize,blurSize); // QImage tmpImage((model->contentsRect().size()+shadowSize).toSize(),QImage::Format_ARGB32); // memset(tmpImage.scanLine(0),0,tmpImage.byteCount()); // // // render the text // QPainter tmpPainter(&tmpImage); // tmpPainter.fillRect(tmpImage.rect(),Qt::transparent); // // tmpPainter.save(); // tmpPainter.translate(radiusSquared, radiusSquared); // doc.documentLayout()->draw(&tmpPainter, pCtx); // tmpPainter.restore(); // // // blacken the text by applying a color to the copy using a QPainter::CompositionMode_DestinationIn operation. // // This produces a homogeneously-colored pixmap. // QRect rect = tmpImage.rect(); // tmpPainter.setCompositionMode(QPainter::CompositionMode_SourceIn); // tmpPainter.fillRect(rect, model->shadowBrush().color()); // tmpPainter.end(); // // // blur the colored text // QImage blurredImage = ImageFilters::blurred(tmpImage, rect, (int)radius); // // // render the blurred text at an offset into the cache // textPainter.save(); // textPainter.translate(model->shadowOffsetX() - radiusSquared, // model->shadowOffsetY() - radiusSquared); // textPainter.drawImage(0, 0, blurredImage.copy(blurredImage.rect())); // textPainter.restore(); // New method of rendering shadows double radius = model->shadowBlurRadius(); // create temporary pixmap to hold a copy of the text QSizeF blurSize = ImageFilters::blurredSizeFor(model->contentsRect().size(), (int)radius); //qDebug() << "Blur size:"<<blurSize<<", doc:"<<doc.size()<<", radius:"<<radius; QImage tmpImage(blurSize.toSize(),QImage::Format_ARGB32_Premultiplied); memset(tmpImage.scanLine(0),0,tmpImage.byteCount()); // render the text QPainter tmpPainter(&tmpImage); tmpPainter.save(); tmpPainter.translate(radius, radius); doc.documentLayout()->draw(&tmpPainter, pCtx); tmpPainter.restore(); // blacken the text by applying a color to the copy using a QPainter::CompositionMode_DestinationIn operation. // This produces a homogeneously-colored pixmap. QRect rect = tmpImage.rect(); tmpPainter.setCompositionMode(QPainter::CompositionMode_SourceIn); tmpPainter.fillRect(rect, model->shadowBrush().color()); tmpPainter.end(); // blur the colored text ImageFilters::blurImage(tmpImage, (int)radius); // render the blurred text at an offset into the cache textPainter.save(); textPainter.translate(model->shadowOffsetX() - radius, model->shadowOffsetY() - radius); textPainter.drawImage(0, 0, tmpImage); textPainter.restore(); } } #endif doc.documentLayout()->draw(&textPainter, pCtx); textPainter.end(); emit renderDone(cache); }