void renderWatermark(QImage & image, const QString & wmText, const QFont & wmFont, const unsigned int wmOpacity, double pA, double pB, double pC, double pD) { const double pi = 3.14159265358979323846; double w = ((double)image.width() - pA); double h = ((double)image.height() - pB); double theta = (pi/-2.0) + atan(w / h); double l = sqrt((w * w) + (h * h)); const double sintheta = sin(theta); const double costheta = cos(theta); double margin_width = pC; double margin_height = pD; int offset = (int)(l * 0.05); int l2 = (int)(l * 0.9); int x = (int)(sintheta * h) + offset; int y = (int)(costheta * h); QFont fnt = wmFont; QFontMetrics fm = QFontMetrics(fnt); QFontInfo fi(fnt); QString family = fi.family(); QList<int> sizes = QFontDatabase().pointSizes(family); qSort(sizes); for(int i = sizes.size() - 1; i > 0; i--) { fnt.setPointSize(sizes[i]); fm = QFontMetrics(fnt); if(fm.boundingRect(wmText).width() < l2) break; } int fh = fm.height(); y = y - (fh/2); //NB QPixmap not safe outside of main thread, using QImage instead QImage wm(image.width(), image.height(), QImage::Format_RGB32); wm.fill(0xFFFFFFFF); QPainter pPainter; pPainter.begin(&wm); pPainter.setFont(fnt); pPainter.translate(margin_width, margin_height); pPainter.rotate((theta/pi)*180); pPainter.drawText(x, y, l2, fh, Qt::AlignCenter, wmText); pPainter.end(); double opacity = wmOpacity / 255.0; double opacity_inv = 1.0 - opacity; QRgb s = 0; QRgb d = 0; for(y = 0; y < image.height(); y++) { for(x = 0; x < image.width(); x++) { s = wm.pixel(x, y); if((s & 0x00ffffff) == 0x00ffffff) continue; // if it's white just skip it d = image.pixel(x, y); image.setPixel(x, y, qRgb( (int)((qRed(s) * opacity) + (qRed(d) * opacity_inv)), (int)((qGreen(s) * opacity) + (qGreen(d) * opacity_inv)), (int)((qBlue(s) * opacity) + (qBlue(d) * opacity_inv)) )); } } }
void LibartCanvas::drawImage(QImage image, SVGStylableImpl *style, const SVGMatrixImpl *matrix, const KSVGPolygon& clippingPolygon) { SVGShapeImpl *shape = dynamic_cast<SVGShapeImpl *>(style); if(shape) { if(image.depth() != 32) image = image.convertDepth(32); ArtSVP *imageBorder = svpFromPolygon(clippingPolygon); ArtSVP *clipSvp = clipSingleSVP(imageBorder, shape); ArtDRect bbox; art_drect_svp(&bbox, clipSvp); // clamp to viewport int x0 = int(bbox.x0); int y0 = int(bbox.y0); // Use inclusive coords for x1/y1 for clipToBuffer int x1 = int(ceil(bbox.x1)) - 1; int y1 = int(ceil(bbox.y1)) - 1; if(x0 < int(m_width) && y0 < int(m_height) && x1 >= 0 && y1 >= 0) { clipToBuffer(x0, y0, x1, y1); QRect screenBBox(x0, y0, x1 - x0 + 1, y1 - y0 + 1); QByteArray mask = SVGMaskElementImpl::maskRectangle(shape, screenBBox); double affine[6]; KSVGHelper::matrixToAffine(matrix, affine); ksvg_art_rgb_affine_clip(clipSvp, m_buffer + x0 * nrChannels() + y0 * rowStride(), x0, y0, x1 + 1, y1 + 1, rowStride(), nrChannels(), image.bits(), image.width(), image.height(), image.width() * 4, affine, int(style->getOpacity() * 255), (const art_u8 *)mask.data()); } art_svp_free(imageBorder); art_svp_free(clipSvp); } }
void qAnimationDlg::render() { if (!m_view3d) { assert(false); return; } QString outputFilename = outputFileLineEdit->text(); //save to persistent settings { QSettings settings; settings.beginGroup("qAnimation"); settings.setValue("filename", outputFilename); settings.endGroup(); } setEnabled(false); //count the total number of frames int frameCount = countFrames(0); int fps = fpsSpinBox->value(); int superRes = superResolutionSpinBox->value(); //show progress dialog QProgressDialog progressDialog(QString("Frames: %1").arg(frameCount), "Cancel", 0, frameCount, this); progressDialog.setWindowTitle("Render"); progressDialog.show(); QApplication::processEvents(); #ifdef QFFMPEG_SUPPORT //get original viewport size QSize originalViewSize = m_view3d->size(); //hack: as the encoder requires that the video dimensions are multiples of 8, we resize the window a little bit... { //find the nearest multiples of 8 QSize customSize = originalViewSize; if (originalViewSize.width() % 8 || originalViewSize.height() % 8) { if (originalViewSize.width() % 8) customSize.setWidth((originalViewSize.width() / 8 + 1) * 8); if (originalViewSize.height() % 8) customSize.setHeight((originalViewSize.height() / 8 + 1) * 8); m_view3d->resize(customSize); QApplication::processEvents(); } } int bitrate = bitrateSpinBox->value() * 1024; int gop = fps; QVideoEncoder encoder(outputFilename, m_view3d->width(), m_view3d->height(), bitrate, gop, static_cast<unsigned>(fpsSpinBox->value())); QString errorString; if (!encoder.open(&errorString)) { QMessageBox::critical(this, "Error", QString("Failed to open file for output: %1").arg(errorString)); setEnabled(true); return; } #endif bool lodWasEnabled = m_view3d->isLODEnabled(); m_view3d->setLODEnabled(false); int frameIndex = 0; bool success = true; size_t vp1 = 0, vp2 = 0; while (getNextSegment(vp1, vp2)) { Step& step1 = m_videoSteps[vp1]; Step& step2 = m_videoSteps[vp2]; ViewInterpolate interpolator(step1.viewport, step2.viewport); int frameCount = static_cast<int>( fps * step1.duration_sec ); interpolator.setMaxStep(frameCount); cc2DViewportObject current_params; while ( interpolator.nextView( current_params ) ) { applyViewport ( ¤t_params ); //render to image QImage image = m_view3d->renderToImage(superRes, false, false, true ); if (image.isNull()) { QMessageBox::critical(this, "Error", "Failed to grab the screen!"); success = false; break; } if (superRes > 1) { image = image.scaled(image.width()/superRes, image.height()/superRes, Qt::IgnoreAspectRatio, Qt::SmoothTransformation); } #ifdef QFFMPEG_SUPPORT if (!encoder.encodeImage(image, frameIndex, &errorString)) { QMessageBox::critical(this, "Error", QString("Failed to encode frame #%1: %2").arg(frameIndex+1).arg(errorString)); success = false; break; } #else QString filename = QString("frame_%1.png").arg(frameIndex, 6, 10, QChar('0')); QString fullPath = QDir(outputFilename).filePath(filename); if (!image.save(fullPath)) { QMessageBox::critical(this, "Error", QString("Failed to save frame #%1").arg(frameIndex+1)); success = false; break; } #endif ++frameIndex; progressDialog.setValue(frameIndex); QApplication::processEvents(); if (progressDialog.wasCanceled()) { QMessageBox::warning(this, "Warning", QString("Process has been cancelled")); success = false; break; } } if (!success) { break; } if (vp2 == 0) { //stop loop here! break; } vp1 = vp2; } m_view3d->setLODEnabled(lodWasEnabled); #ifdef QFFMPEG_SUPPORT encoder.close(); //hack: restore original size m_view3d->resize(originalViewSize); QApplication::processEvents(); #endif progressDialog.hide(); QApplication::processEvents(); if (success) { QMessageBox::information(this, "Job done", "The animation has been saved successfully"); } setEnabled(true); }
void ImageOperations::featureScale( const OpGrayImage& img, OpGrayImage& imgRet ) { QImage qimg = img.getQtImage(); QImage qimgRet; qimgRet.create(qimg.width()/2, qimg.height()/2, 32); for( int y=0; y<qimg.height(); y+=2 ) { for( int x=0; x<qimg.width(); x+=2 ) { if( (int(x/2) < qimgRet.width()) && (int(y/2) < qimgRet.height())) { if( (qRed (qimg.pixel(x,y)) == 0) && // If pixel in (qGreen(qimg.pixel(x,y)) == 0) && // image contains feature (qBlue (qimg.pixel(x,y)) == 0)) { // set in scaled qimgRet.setPixel(int(x/2), int(y/2), qRgb(0,0,0)); } else { if ((x < qimg.width()-1) && // Else, if pixel ((qRed (qimg.pixel(x+1,y)) == 0) && // to the right (qGreen(qimg.pixel(x+1,y)) == 0) && // contains feature (qBlue (qimg.pixel(x+1,y)) == 0))) { // set in scaled. qimgRet.setPixel(int(x/2), int(y/2), qRgb(0,0,0)); } else { if( (y < qimg.height()-1) && // Else if pixel ((qRed (qimg.pixel(x,y+1)) == 0) && // below contains (qGreen(qimg.pixel(x,y+1)) == 0) && // feature, set (qBlue (qimg.pixel(x,y+1)) == 0))) { // in scaled. qimgRet.setPixel(int(x/2), int(y/2), qRgb(0,0,0)); } else { // Else if pixel if( ((x < qimg.width()-1) && // to the right (y < qimg.height()-1)) && // and below ((qRed (qimg.pixel(x,y+1)) == 0) && // contains (qGreen(qimg.pixel(x,y+1)) == 0) && // feature, set (qBlue (qimg.pixel(x,y+1)) == 0))) { // it in scaled. qimgRet.setPixel(int(x/2), int(y/2), qRgb(0,0,0)); } else { // Else set qimgRet.setPixel(int(x/2), int(y/2), // non-feature in qRgb(255,255,255)); // scaled. } } } } } } } OpGrayImage tmp(qimgRet); imgRet = tmp; }
void ImageOperations::colorCanny( const OpRGBImage& img, OpGrayImage& imgRet, float dThreshLow, float dThreshHigh, float dSigma ) { //printf("ImageOperations::colorCanny() ...\n"); QImage qimg = img.getQtImage(); int w = qimg.width(); int h = qimg.height(); //- create a YCC color-opponent image from the input image -// OpGrayImage imgY ( w, h ); OpGrayImage imgC1( w, h ); OpGrayImage imgC2( w, h ); for (int y=0; y < h; y++) { for (int x=0; x < w; x++) { // img is the original QImage // uint *p = (uint *) qimg.scanLine(y) + x; //-- convert rgb to YCC --// imgY(x,y) = (1.0/3.0)*(qRed(*p) + qGreen(*p) + qBlue(*p)); imgC1(x,y) = 2.0*(0.5*qRed(*p) - 0.5*qGreen(*p)); imgC2(x,y) = 2.0*(0.5*qRed(*p) + 0.5*qGreen(*p) - qBlue(*p)); } } //-- apply the filter --// OpGrayImage imgDx( w, h ); OpGrayImage imgDy( w, h ); //-- fast gauss --// OpGrayImage imgYDx( w, h ); OpGrayImage imgYDy( w, h ); imgY.opFastGaussDxDy( dSigma, imgYDx, imgYDy ); OpGrayImage imgC1Dx( w, h ); OpGrayImage imgC1Dy( w, h ); imgC1.opFastGaussDxDy( dSigma, imgC1Dx, imgC1Dy ); OpGrayImage imgC2Dx( w, h ); OpGrayImage imgC2Dy( w, h ); imgC2.opFastGaussDxDy( dSigma, imgC2Dx, imgC2Dy ); for (int y=0; y < h; y++) { for (int x=0; x < w; x++) { imgDx(x,y) = sqrt( imgYDx(x,y).value()*imgYDx(x,y).value() + imgC1Dx(x,y).value()*imgC1Dx(x,y).value() + imgC2Dx(x,y).value()*imgC2Dx(x,y).value() ); imgDy(x,y) = sqrt( imgYDy(x,y).value()*imgYDy(x,y).value() + imgC1Dy(x,y).value()*imgC1Dy(x,y).value() + imgC2Dy(x,y).value()*imgC2Dy(x,y).value() ); } } /* //-- slow gauss --// OpGrayImage imgYDx = imgY.opGaussDerivGx( m_dSigma ); OpGrayImage imgYDy = imgY.opGaussDerivGy( m_dSigma ); OpGrayImage imgC1Dx = imgC1.opGaussDerivGx( m_dSigma ); OpGrayImage imgC1Dy = imgC1.opGaussDerivGy( m_dSigma ); OpGrayImage imgC2Dx = imgC2.opGaussDerivGx( m_dSigma ); OpGrayImage imgC2Dy = imgC2.opGaussDerivGy( m_dSigma ); for (int y=0; y < m_img.height(); y++) for (int x=0; x < m_img.width(); x++) { imgDx(x,y) = sqrt( imgYDx(x,y).value()*imgYDx(x,y).value() + imgC1Dx(x,y).value()*imgC1Dx(x,y).value() + imgC2Dx(x,y).value()*imgC2Dx(x,y).value() ); imgDy(x,y) = sqrt( imgYDy(x,y).value()*imgYDy(x,y).value() + imgC1Dy(x,y).value()*imgC1Dy(x,y).value() + imgC2Dy(x,y).value()*imgC2Dy(x,y).value() ); } */ //-- apply the Canny operator --// OpGrayImage resultImg( w, h ); imgRet = resultImg; imgRet = cannyEdgesDxDy( imgDx, imgDy, dThreshLow, dThreshHigh ); }
void Layer::paint(QPainter *painter, const QStyleOptionGraphicsItem *option, QWidget *widget) { QtPainter *qvpainter = NULL; #ifdef QT_OPENGL_LIB QPainter *fboPainter; QGLFramebufferObject *fbo = NULL; QGLWidget *qglWidget = qobject_cast<QGLWidget *>(widget); if (qglWidget) { // on-screen OpenGL QGLContext *context = const_cast<QGLContext *>(qglWidget->context()); qvpainter = new OpenGLPainter(painter, context); } else if (cacheMode() != QGraphicsItem::NoCache && QGLFramebufferObject::hasOpenGLFramebufferObjects()) { // caching, try FBO // if we have direct rendering and FBO support, make use of // FBO, but this could still just be in software // NOTE: In Qt 4.6, 'painter' would already target an FBO, if we // were using the 'OpenGL2' paint engine. We have decided to stick // with the original engine for now, as the OpenGL2 engine relies // heavily on shaders, which is slow for our use case. // Apparently, we must use the QGLContext associated with // the view being painted. Thus, PlotView tracks whether it is // inside a paintEvent, so we can get the current QGLWidget. OverlayScene *overlayScene = qobject_cast<OverlayScene *>(scene()); if (overlayScene) qglWidget = qobject_cast<QGLWidget *>(overlayScene->view()->viewport()); else { QList<QGraphicsView *> views = scene()->views(); for (int i = 0; i < views.size() && !qglWidget; i++) { PlotView *view = qobject_cast<PlotView *>(views[i]); if (view && view->isPainting()) qglWidget = qobject_cast<QGLWidget *>(view->viewport()); } } if (qglWidget) { QSize size(painter->device()->width(), painter->device()->height()); QGLContext *context = const_cast<QGLContext *>(qglWidget->context()); // GC during paint callback may have reset this if (qglWidget->context() != QGLContext::currentContext()) qglWidget->makeCurrent(); // NOTE: need Qt 4.6 for antialiasing to work with FBOs #if QT_VERSION >= 0x40600 if (!fboMultisamplingFailed) { QGLFramebufferObjectFormat fboFormat; fboFormat.setAttachment(QGLFramebufferObject::CombinedDepthStencil); fboFormat.setSamples(4); // 4X antialiasing should be enough? qInstallMsgHandler(fboDebugMsgCatcher); fbo = new QGLFramebufferObject(size, fboFormat); qInstallMsgHandler(0); if (fboMultisamplingFailed) { delete fbo; fbo = NULL; } } #endif if (!fbo) fbo = new QGLFramebufferObject(size); // clear the FBO fboPainter = new QPainter(fbo); fboPainter->setCompositionMode(QPainter::CompositionMode_Source); fboPainter->fillRect(0, 0, size.width(), size.height(), Qt::transparent); fboPainter->setCompositionMode(QPainter::CompositionMode_SourceOver); qvpainter = new OpenGLPainter(fboPainter, context); qvpainter->setTransform(painter->worldTransform()); } } #endif if (!qvpainter) // fallback to Qt renderer qvpainter = new QtPainter(painter); // NOTE: in QT 4.6 exposedRect will just be the bounding rect, by default paintPlot(qvpainter, option->exposedRect); delete qvpainter; #ifdef QT_OPENGL_LIB if (fbo) { // silliness: download to image, only to upload to texture painter->setWorldMatrixEnabled(false); qglWidget->makeCurrent(); // gc during callback may have cleared this // need to tell Qt that 'fboImage' is actually premultiplied QImage fboImage = fbo->toImage(); const uchar *data = fboImage.bits(); // no deep copy QImage premultImage = QImage(data, fboImage.width(), fboImage.height(), QImage::Format_ARGB32_Premultiplied); // Not sure why this can't be (0, 0)... painter->drawImage(QPointF(1, -1), premultImage); delete fboPainter; delete fbo; } #endif }
void BuildFontDialog::pickLetter(const QPoint & p) { if (selectionArbitrary->isChecked()) { if(arbitraryLastPoint.x() < 0) { arbitraryLastPoint = p; } else { QRect rect(arbitraryLastPoint, p); if(rect.height() <= 0) rect.setHeight(10); if(rect.width() <= 0) rect.setWidth(10); //TODO Code smell!! QImage picked = originalPage.copy(rect); QLabel* label = radio2label[this->buttonGroup->checkedButton()]; //TODO duplicate code!!! label->setPixmap(QPixmap::fromImage(picked)); label->adjustSize(); FontLetterInfo info; info.img = QPixmap::fromImage(picked); info.topLeft = QPoint(-1,-picked.height()) ; info.width = info.img.width(); char letter = radioBtn2Char[(QRadioButton*)(this->buttonGroup->checkedButton())]; builtFont[letter] = info; QRadioButton *next= (QRadioButton *)(this->buttonGroup->button(this->buttonGroup->id(this->buttonGroup->checkedButton())-1)); if(next != NULL) { next->setChecked(true); } arbitraryLastPoint.setX(-1); } return; } for(int i = 0; i < letterRects.size(); i++) { QRect rect = letterRects[i]; if(p.x() >= rect.left() && p.x() <= rect.right() && p.y() >= rect.top() && p.y() <= rect.bottom()) { //qDebug() << "wrapped in" << p << rect; QImage picked = originalPage.copy(rect); QLabel* label = radio2label[this->buttonGroup->checkedButton()]; label->setPixmap(QPixmap::fromImage(picked)); label->adjustSize(); int baseline = baselines[i]; //TODO : vulnerable!!! char letter = radioBtn2Char[(QRadioButton*)(this->buttonGroup->checkedButton())]; FontLetterInfo info; info.img = QPixmap::fromImage(picked); qDebug() << "baseline" << info.img.height() << baseline; info.topLeft = QPoint(-1,-baseline) ; info.width = info.img.width(); builtFont[letter] = info; qDebug() << "add to map" << letter; QRadioButton *next= (QRadioButton *)(this->buttonGroup->button(this->buttonGroup->id(this->buttonGroup->checkedButton())-1)); if(next != NULL) { next->setChecked(true); } break; } } }
void GLWidget::paintGL() { int i; QList<Point3D> points; QImage image; //Set backgroung glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT); rotateCamera(); drawGround( 10.0f, 1.0f, 0); //Using stack of matrix glPushMatrix(); //Draw all objects glEnable(GL_TEXTURE_2D); glPixelStorei(GL_UNPACK_ALIGNMENT, 1); glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR); glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR); glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE); glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE); //colors of glObject are replaced by texture. other - GL_REPLACE - combination glTexEnvi(GL_TEXTURE_ENV, GL_TEXTURE_ENV_MODE, GL_MODULATE); float refl; int shine; //need to draw pictures then objects //Drawing images for( i = 0; i < Scene::Instance().stub_objects().size(); i++) { //checking type of object - lense or picture LensObjectStub* n = dynamic_cast<LensObjectStub*>(Scene::Instance().stub_objects()[i]); if (!(n)) { shine = 10; refl = 0.5; //textures PictureObjectStub* picobj = (PictureObjectStub*)Scene::Instance().stub_objects()[i]; image = convertToGLFormat(picobj->image()); glTexImage2D(GL_TEXTURE_2D, 0, 3, (GLsizei)image.width(), (GLsizei)image.height(), 0, GL_RGBA, GL_UNSIGNED_BYTE, image.bits()); if (Scene::Instance().stub_objects()[i]->selected()) { glColor4f( 0.1, 0.1, 1.0, 1.0); } else { glColor4f( 1.0, 1.0, 1.0, 1.0); } float specref[] = { refl, refl, refl}; glMaterialfv( GL_FRONT_AND_BACK, GL_SPECULAR, specref); glMaterialf( GL_FRONT_AND_BACK, GL_SHININESS, shine); //0..128 - reflection points = Scene::Instance().stub_objects()[i]->getPoints(); //qDebug() << points; //Drawing pictures glBegin( GL_QUADS); //Draw normals for lighting TODO //glNormal3f( x, y ,z); glTexCoord2f(0, 1); glVertex3f( points[0].x, points[0].y, points[0].z); glTexCoord2f(1, 1); glVertex3f( points[1].x, points[1].y, points[1].z); glTexCoord2f(1, 0); glVertex3f( points[2].x, points[2].y, points[2].z); glTexCoord2f(0, 0); glVertex3f( points[3].x, points[3].y, points[3].z); glEnd(); } } //Drawing lenses for( i = 0; i < Scene::Instance().stub_objects().size(); i++) { //checking type of object - lense or picture LensObjectStub* n = dynamic_cast<LensObjectStub*>(Scene::Instance().stub_objects()[i]); if (n) { shine = 128; refl = 1.0; glEnable(GL_CULL_FACE); glCullFace(GL_FRONT); image = convertToGLFormat(n->heightMap1()); glTexImage2D(GL_TEXTURE_2D, 0, 3, (GLsizei)image.width(), (GLsizei)image.height(), 0, GL_RGBA, GL_UNSIGNED_BYTE, image.bits()); if (Scene::Instance().stub_objects()[i]->selected()) { glColor4f( 0.1, 0.1, 1.0, 0.5); } else { glColor4f( 1.0, 1.0, 1.0, 0.5); } float specref[] = { refl, refl, refl}; glMaterialfv( GL_FRONT_AND_BACK, GL_SPECULAR, specref); glMaterialf( GL_FRONT_AND_BACK, GL_SHININESS, shine); //0..128 - reflection points = Scene::Instance().stub_objects()[i]->getPoints(); //qDebug() << points; //Drawing pictures glBegin( GL_QUADS); //Draw normals for lighting TODO //glNormal3f( x, y ,z); glTexCoord2f(0, 1); glVertex3f( points[0].x, points[0].y, points[0].z); glTexCoord2f(1, 1); glVertex3f( points[1].x, points[1].y, points[1].z); glTexCoord2f(1, 0); glVertex3f( points[2].x, points[2].y, points[2].z); glTexCoord2f(0, 0); glVertex3f( points[3].x, points[3].y, points[3].z); glEnd(); //draw back with diff texture glCullFace(GL_BACK); image = convertToGLFormat(n->heightMap2()); glTexImage2D(GL_TEXTURE_2D, 0, 3, (GLsizei)image.width(), (GLsizei)image.height(), 0, GL_RGBA, GL_UNSIGNED_BYTE, image.bits()); glBegin( GL_QUADS); //Draw again wi other texture glTexCoord2f(0, 1); glVertex3f( points[0].x, points[0].y, points[0].z); glTexCoord2f(1, 1); glVertex3f( points[1].x, points[1].y, points[1].z); glTexCoord2f(1, 0); glVertex3f( points[2].x, points[2].y, points[2].z); glTexCoord2f(0, 0); glVertex3f( points[3].x, points[3].y, points[3].z); glEnd(); glCullFace(GL_FRONT_AND_BACK); glDisable(GL_CULL_FACE); } } /*glPushMatrix(); glBegin(GL_LINES); glVertex3f(8, -9, 5); glVertex3f(0, 0, 0); glEnd(); glPopMatrix();*/ glPopMatrix(); drawCamera(); //clear drawing command stack swapBuffers(); }
/*! Returns a pixmap with size \a s and mode \a m, generating one if needed. Generated pixmaps are cached. */ QPixmap QIconSet::pixmap( Size s, Mode m ) const { if ( !d ) { QPixmap r; return r; } QImage i; QIconSetPrivate * p = ((QIconSet *)this)->d; QPixmap * pm = 0; if ( s == Large ) { switch( m ) { case Normal: if ( !p->large.pm ) { ASSERT( p->small.pm ); i = p->small.pm->convertToImage(); i = i.smoothScale( i.width() * 3 / 2, i.height() * 3 / 2 ); p->large.pm = new QPixmap; p->large.generated = TRUE; p->large.pm->convertFromImage( i ); if ( !p->large.pm->mask() ) { i = i.createHeuristicMask(); QBitmap tmp; tmp.convertFromImage( i, Qt::MonoOnly + Qt::ThresholdDither ); p->large.pm->setMask( tmp ); } } pm = p->large.pm; break; case Active: if ( !p->largeActive.pm ) { p->largeActive.pm = new QPixmap( pixmap( Large, Normal ) ); p->largeActive.generated = TRUE; } pm = p->largeActive.pm; break; case Disabled: if ( !p->largeDisabled.pm ) { #if defined(_WS_QWS_) && !defined(QT_NO_DEPTH_32) p->largeDisabled.pm = newDisablePixmap(pixmap(Large,Normal)); #else QBitmap tmp; if ( p->large.generated && !p->smallDisabled.generated && p->smallDisabled.pm && !p->smallDisabled.pm->isNull() ) { // if there's a hand-drawn disabled small image, // but the normal big one is generated, use the // hand-drawn one to generate this one. i = p->smallDisabled.pm->convertToImage(); i = i.smoothScale( i.width() * 3 / 2, i.height() * 3 / 2 ); p->largeDisabled.pm = new QPixmap; p->largeDisabled.pm->convertFromImage( i ); if ( !p->largeDisabled.pm->mask() ) { i = i.createHeuristicMask(); tmp.convertFromImage( i, Qt::MonoOnly + Qt::ThresholdDither ); } } else { if (pixmap( Large, Normal).mask()) tmp = *pixmap( Large, Normal).mask(); else { QPixmap conv = pixmap( Large, Normal ); if ( !conv.isNull() ) { i = conv.convertToImage(); i = i.createHeuristicMask(); tmp.convertFromImage( i, Qt::MonoOnly + Qt::ThresholdDither ); } } p->largeDisabled.pm = new QPixmap( p->large.pm->width()+1, p->large.pm->height()+1); QColorGroup dis( QApplication::palette().disabled() ); p->largeDisabled.pm->fill( dis.background() ); QPainter painter( p->largeDisabled.pm ); painter.setPen( dis.base() ); painter.drawPixmap( 1, 1, tmp ); painter.setPen( dis.foreground() ); painter.drawPixmap( 0, 0, tmp ); } if ( !p->largeDisabled.pm->mask() ) { if ( !tmp.mask() ) tmp.setMask( tmp ); QBitmap mask( d->largeDisabled.pm->size() ); mask.fill( Qt::color0 ); QPainter painter( &mask ); painter.drawPixmap( 0, 0, tmp ); painter.drawPixmap( 1, 1, tmp ); painter.end(); p->largeDisabled.pm->setMask( mask ); } #endif p->largeDisabled.generated = TRUE; } pm = p->largeDisabled.pm; break; } } else { switch( m ) { case Normal: if ( !p->small.pm ) { ASSERT( p->large.pm ); i = p->large.pm->convertToImage(); i = i.smoothScale( i.width() * 2 / 3, i.height() * 2 / 3 ); p->small.pm = new QPixmap; p->small.generated = TRUE; p->small.pm->convertFromImage( i ); if ( !p->small.pm->mask() ) { i = i.createHeuristicMask(); QBitmap tmp; tmp.convertFromImage( i, Qt::MonoOnly + Qt::ThresholdDither ); p->small.pm->setMask( tmp ); } } pm = p->small.pm; break; case Active: if ( !p->smallActive.pm ) { p->smallActive.pm = new QPixmap( pixmap( Small, Normal ) ); p->smallActive.generated = TRUE; } pm = p->smallActive.pm; break; case Disabled: if ( !p->smallDisabled.pm ) { #if defined(_WS_QWS_) && !defined(QT_NO_DEPTH_32) p->smallDisabled.pm = newDisablePixmap(pixmap(Small,Normal)); #else QBitmap tmp; if ( p->small.generated && !p->largeDisabled.generated && p->largeDisabled.pm && !p->largeDisabled.pm->isNull() ) { // if there's a hand-drawn disabled large image, // but the normal small one is generated, use the // hand-drawn one to generate this one. i = p->largeDisabled.pm->convertToImage(); i = i.smoothScale( i.width() * 3 / 2, i.height() * 3 / 2 ); p->smallDisabled.pm = new QPixmap; p->smallDisabled.pm->convertFromImage( i ); if ( !p->smallDisabled.pm->mask() ) { i = i.createHeuristicMask(); tmp.convertFromImage( i, Qt::MonoOnly + Qt::ThresholdDither ); } } else { if ( pixmap( Small, Normal).mask()) tmp = *pixmap( Small, Normal).mask(); else { QPixmap conv = pixmap( Small, Normal ); if ( !conv.isNull() ) { i = conv.convertToImage(); i = i.createHeuristicMask(); tmp.convertFromImage( i, Qt::MonoOnly + Qt::ThresholdDither ); } } p->smallDisabled.pm = new QPixmap( p->small.pm->width()+1, p->small.pm->height()+1); QColorGroup dis( QApplication::palette().disabled() ); p->smallDisabled.pm->fill( dis.background() ); QPainter painter( p->smallDisabled.pm ); painter.setPen( dis.base() ); painter.drawPixmap( 1, 1, tmp ); painter.setPen( dis.foreground() ); painter.drawPixmap( 0, 0, tmp ); } if ( !p->smallDisabled.pm->mask() ) { if ( !tmp.mask() ) tmp.setMask( tmp ); QBitmap mask( d->smallDisabled.pm->size() ); mask.fill( Qt::color0 ); QPainter painter( &mask ); painter.drawPixmap( 0, 0, tmp ); painter.drawPixmap( 1, 1, tmp ); painter.end(); p->smallDisabled.pm->setMask( mask ); } #endif p->smallDisabled.generated = TRUE; } pm = p->smallDisabled.pm; break; } } ASSERT( pm ); return *pm; }
void KisBrush::generateMaskAndApplyMaskOrCreateDab(KisFixedPaintDeviceSP dst, ColoringInformation* coloringInformation, double scaleX, double scaleY, double angle, const KisPaintInformation& info_, double subPixelX, double subPixelY, qreal softnessFactor) const { Q_ASSERT(valid()); Q_UNUSED(info_); Q_UNUSED(softnessFactor); angle += d->angle; // Make sure the angle stay in [0;2*M_PI] if (angle < 0) angle += 2 * M_PI; if (angle > 2 * M_PI) angle -= 2 * M_PI; scaleX *= d->scale; scaleY *= d->scale; double scale = 0.5 * (scaleX + scaleY); prepareBrushPyramid(); QImage outputImage = d->brushPyramid->createImage(scale, -angle, subPixelX, subPixelY); qint32 maskWidth = outputImage.width(); qint32 maskHeight = outputImage.height(); dst->setRect(QRect(0, 0, maskWidth, maskHeight)); dst->initialize(); quint8* color = 0; if (coloringInformation) { if (dynamic_cast<PlainColoringInformation*>(coloringInformation)) { color = const_cast<quint8*>(coloringInformation->color()); } } const KoColorSpace *cs = dst->colorSpace(); qint32 pixelSize = cs->pixelSize(); quint8 *dabPointer = dst->data(); quint8 *rowPointer = dabPointer; quint8 *alphaArray = new quint8[maskWidth]; bool hasColor = this->hasColor(); for (int y = 0; y < maskHeight; y++) { #if QT_VERSION >= 0x040700 const quint8* maskPointer = outputImage.constScanLine(y); #else const quint8* maskPointer = outputImage.scanLine(y); #endif if (coloringInformation) { for (int x = 0; x < maskWidth; x++) { if (color) { memcpy(dabPointer, color, pixelSize); } else { memcpy(dabPointer, coloringInformation->color(), pixelSize); coloringInformation->nextColumn(); } dabPointer += pixelSize; } } if (hasColor) { const quint8 *src = maskPointer; quint8 *dst = alphaArray; for (int x = 0; x < maskWidth; x++) { const QRgb *c = reinterpret_cast<const QRgb*>(src); *dst = KoColorSpaceMaths<quint8>::multiply(255 - qGray(*c), qAlpha(*c)); src += 4; dst++; } } else { const quint8 *src = maskPointer; quint8 *dst = alphaArray; for (int x = 0; x < maskWidth; x++) { const QRgb *c = reinterpret_cast<const QRgb*>(src); *dst = KoColorSpaceMaths<quint8>::multiply(255 - *src, qAlpha(*c)); src += 4; dst++; } } cs->applyAlphaU8Mask(rowPointer, alphaArray, maskWidth); rowPointer += maskWidth * pixelSize; dabPointer = rowPointer; if (!color && coloringInformation) { coloringInformation->nextRow(); } } delete alphaArray; }
/** Take data from image, draw text at x|y with specified parameters. If destPainter is null, draw to image, if destPainter is not null, draw directly using the painter. Returns modified area of image. */ QRect InsertTextWidget::composeImage(DImg* const image, QPainter* const destPainter, int x, int y, QFont font, float pointSize, int textRotation, QColor textColor, int textOpacity, int alignMode, const QString& textString, bool transparentBackground, QColor backgroundColor, BorderMode borderMode, int borderWidth, int spacing, float fontScale) { /* The problem we have to solve is that we have no pixel access to font rendering, we have to let Qt do the drawing. On the other hand we need to support 16 bit, which cannot be done with QPixmap. The current solution cuts out the text area, lets Qt do its drawing, converts back and blits to original. */ int maxWidth, maxHeight; if (x == -1 && y == -1) { maxWidth = image->width(); maxHeight = image->height(); } else { maxWidth = image->width() - x; maxHeight = image->height() - y; } fontScale = qMax(0.01f, fontScale); // find out size of the area that we are drawing to font.setPointSizeF(pointSize); QFontMetrics fontMt(font); QRect fontRect = fontMt.boundingRect(0, 0, qRound(maxWidth / fontScale), qRound(maxHeight / fontScale), alignMode, textString); fontRect.setWidth(qRound(fontRect.width() * fontScale)); fontRect.setHeight(qRound(fontRect.height() * fontScale)); if (!fontRect.isValid()) { return QRect(); } int fontWidth, fontHeight; switch (textRotation) { case ROTATION_NONE: case ROTATION_180: default: fontWidth = fontRect.width(); fontHeight = fontRect.height(); break; case ROTATION_90: case ROTATION_270: fontWidth = fontRect.height(); fontHeight = fontRect.width(); break; } // x, y == -1 means that we have to find a good initial position for the text here if (x == -1 && y == -1) { int boxWidth = fontWidth + 2 * borderWidth + 2 * spacing; int boxHeight = fontHeight + 2 * borderWidth + 2 * spacing; // was a valid position hint stored from last use? if (d->positionHint.isValid()) { // We assume that people tend to orient text along the edges, // so we do some guessing so that positions such as "in the lower right corner" // will be remembered across different image sizes. // get relative positions float fromTop = (float)d->positionHint.top() / 10000.0; float fromBottom = 1.0 - (float)d->positionHint.bottom() / 10000.0; float fromLeft = (float)d->positionHint.left() / 10000.0; float fromRight = 1.0 - (float)d->positionHint.right() / 10000.0; // calculate horizontal position if (fromLeft < fromRight) { x = qRound(fromLeft * maxWidth); // we are placing from the smaller distance, // so if now the larger distance is actually too small, // fall back to standard placement, nothing to lose. if (x + boxWidth > maxWidth) { x = qMax( (maxWidth - boxWidth) / 2, 0); } } else { x = maxWidth - qRound(fromRight * maxWidth) - boxWidth; if ( x < 0 ) { x = qMax( (maxWidth - boxWidth) / 2, 0); } } // calculate vertical position if (fromTop < fromBottom) { y = qRound(fromTop * maxHeight); if (y + boxHeight > maxHeight) { y = qMax( (maxHeight - boxHeight) / 2, 0); } } else { y = maxHeight - qRound(fromBottom * maxHeight) - boxHeight; if ( y < 0 ) { y = qMax( (maxHeight - boxHeight) / 2, 0); } } if (! QRect(x, y, boxWidth, boxHeight). intersects(QRect(0, 0, maxWidth, maxHeight)) ) { // emergency fallback - nothing is visible x = qMax( (maxWidth - boxWidth) / 2, 0); y = qMax( (maxHeight - boxHeight) / 2, 0); } // invalidate position hint, use only once d->positionHint = QRect(); } else { // use standard position x = qMax( (maxWidth - boxWidth) / 2, 0); y = qMax( (maxHeight - boxHeight) / 2, 0); } } // create a rectangle relative to image QRect drawRect( x, y, fontWidth + 2 * borderWidth + 2 * spacing, fontHeight + 2 * borderWidth + 2 * spacing); // create a rectangle relative to textArea, excluding the border QRect textAreaBackgroundRect( borderWidth, borderWidth, fontWidth + 2 * spacing, fontHeight + 2 * spacing); // create a rectangle relative to textArea, excluding the border and spacing QRect textAreaTextRect( borderWidth + spacing, borderWidth + spacing, fontWidth, fontHeight ); // create a rectangle relative to textArea, including the border, // for drawing the rectangle, taking into account that the width of the QPen goes in and out in equal parts QRect textAreaDrawRect( borderWidth / 2, borderWidth / 2, fontWidth + borderWidth + 2 * spacing, fontHeight + borderWidth + 2 * spacing ); // cut out the text area DImg textArea = image->copy(drawRect); if (textArea.isNull()) { return QRect(); } // compose semi-transparent background over textArea DColorComposer* composer = DColorComposer::getComposer(DColorComposer::PorterDuffNone); if (transparentBackground) { DImg transparentLayer(textAreaBackgroundRect.width(), textAreaBackgroundRect.height(), textArea.sixteenBit(), true); DColor transparent(backgroundColor); transparent.setAlpha(d->transparency); if (image->sixteenBit()) { transparent.convertToSixteenBit(); } transparentLayer.fill(transparent); textArea.bitBlendImage(composer, &transparentLayer, 0, 0, transparentLayer.width(), transparentLayer.height(), textAreaBackgroundRect.x(), textAreaBackgroundRect.y()); } DImg textNotDrawn; if (textArea.sixteenBit()) { textNotDrawn = textArea.copy(); textNotDrawn.convertToEightBit(); } else { textNotDrawn = textArea; } // We have no direct pixel access to font rendering, so now we need to use Qt/X11 for the drawing // convert text area to pixmap QPixmap pixmap; if (destPainter) { // We working on tool preview, deal with CM as well pixmap = d->iface->convertToPixmap(textNotDrawn); } else { // We working on target image. Do no apply double CM adjustment here. pixmap = textNotDrawn.convertToPixmap(); } int fontScaleWidth = qRound(fontWidth / fontScale); int fontScaleHeight = qRound(fontHeight / fontScale); QPixmap textPixmap(fontScaleWidth, fontScaleHeight); textPixmap.fill(Qt::transparent); QPainter tp(&textPixmap); tp.setOpacity((qreal)textOpacity / 100.0); tp.setPen(QPen(textColor, 1)); tp.setFont(font); switch (textRotation) { case ROTATION_NONE: tp.drawText(0, 0, fontScaleWidth, fontScaleHeight, alignMode, textString); break; case ROTATION_90: tp.translate(fontScaleWidth, 0); tp.rotate(90.0); tp.drawText(0, 0, fontScaleHeight, fontScaleWidth, alignMode, textString); break; case ROTATION_180: tp.translate(fontScaleWidth, fontScaleHeight); tp.rotate(180.0); tp.drawText(0, 0, fontScaleWidth, fontScaleHeight, alignMode, textString); break; case ROTATION_270: tp.translate(0, fontScaleHeight); tp.rotate(270.0); tp.drawText(0, 0, fontScaleHeight, fontScaleWidth, alignMode, textString); break; } tp.end(); // paint on pixmap QPainter p(&pixmap); p.drawPixmap(textAreaTextRect, textPixmap.scaled(fontWidth, fontHeight, Qt::IgnoreAspectRatio, Qt::SmoothTransformation)); // Drawing rectangle around text. if (borderMode == BORDER_NORMAL) // Decorative border using text color. { p.setPen( QPen(textColor, borderWidth, Qt::SolidLine, Qt::SquareCap, Qt::RoundJoin) ) ; p.drawRect(textAreaDrawRect); } else if (borderMode == BORDER_SUPPORT) // Make simple dot line border to help user. { p.setPen(QPen(Qt::white, 1, Qt::SolidLine)); p.drawRect(textAreaDrawRect); p.setPen(QPen(Qt::red, 1, Qt::DotLine)); p.drawRect(textAreaDrawRect); } p.end(); if (!destPainter) { // convert to QImage, then to DImg QImage pixmapImage = pixmap.toImage(); DImg textDrawn(pixmapImage.width(), pixmapImage.height(), false, true, pixmapImage.bits()); // This does not work: during the conversion, colors are altered significantly (diffs of 1 to 10 in each component), // so we cannot find out which pixels have actually been touched. /* // Compare the result of drawing with the previous version. // Set all unchanged pixels to transparent DColor color, ncolor; uchar *ptr, *nptr; ptr = textDrawn.bits(); nptr = textNotDrawn.bits(); int bytesDepth = textDrawn.bytesDepth(); int numPixels = textDrawn.width() * textDrawn.height(); for (int i = 0; i < numPixels; ++i, ptr+= bytesDepth, nptr += bytesDepth) { color.setColor(ptr, false); ncolor.setColor(nptr, false); if ( color.red() == ncolor.red() && color.green() == ncolor.green() && color.blue() == ncolor.blue()) { color.setAlpha(0); color.setPixel(ptr); } } // convert to 16 bit if needed */ textDrawn.convertToDepthOfImage(&textArea); // now compose to original: only pixels affected by drawing text and border are changed, not whole area textArea.bitBlendImage(composer, &textDrawn, 0, 0, textDrawn.width(), textDrawn.height(), 0, 0); // copy result to original image image->bitBltImage(&textArea, drawRect.x(), drawRect.y()); } else { destPainter->drawPixmap(drawRect.x(), drawRect.y(), pixmap, 0, 0, pixmap.width(), pixmap.height()); } delete composer; return drawRect; }
// return average value of fft power iterating over r values QVector<float> FFT::extract(const QImage &data, const int &x, const int &y) const { // the array created here might be bigger than image size - it has to be // a square of side length 2^n const int w = mSize.width(); const int h = mSize.height(); const int dw = data.width(); const int dh = data.height(); ComplexArray *ca = new ComplexArray(boost::extents[w][h]); // fill only the data that exists in the image for (int ay = 0; ay < h; ay++) { int py = (y - h / 2 + ay) + dh; while (py >= dh) { py -= dh; } const float wY = mWindow.at(ay); #ifdef HAS_IMAGE_CONSTSCANLINE const uchar *d = data.constScanLine(py); #else const uchar *d = data.scanLine(py); #endif for (int ax = 0; ax < w; ax++) { const float wX = mWindow.at(ax); int px = (x - w / 2 + ax) + dw; while (px >= dw) { px -= dw; } (*ca)[ay][ax] = Complex(d[px] * wX * wY, 0); } } perform(ca); float minm = 0; float maxm = 0; for (int j = 0; j < w; j++) { for (int k = 0; k < h; k++) { float magnitude = (*ca)[j][k].abs(); if (magnitude > maxm) { maxm = magnitude; } else if (magnitude < minm) { minm = magnitude; } } } float c = 255.0 / log(1.0 + abs(maxm - minm)); QVector<float> result; const int maxR = w / 2; #ifdef HAS_VECTOR_RESERVE result.reserve(maxR); #endif for (int r = 1; r < maxR; r++) { int count = 0; float sum = 0; for (int i = 0; i < 360; i += 5) { const int x = r * cos(i) + maxR; const int y = r * sin(i) + maxR; float p = (*ca)[x][y].abs(); p = c * log(1.0 + p); sum += p; count++; } result.append(sum / count); } delete ca; return result; }
// Draws a cached pixmap with shadow void StyleHelper::drawIconWithShadow(const QIcon &icon, const QRect &rect, QPainter *p, QIcon::Mode iconMode, int radius, const QColor &color, const QPoint &offset) { QPixmap cache; QString pixmapName = QString("icon %0 %1 %2").arg(icon.cacheKey()).arg(iconMode).arg(rect.height()); if (!QPixmapCache::find(pixmapName, cache)) { QPixmap px = icon.pixmap(rect.size()); cache = QPixmap(px.size() + QSize(radius * 2, radius * 2)); cache.fill(Qt::transparent); QPainter cachePainter(&cache); if (iconMode == QIcon::Disabled) { QImage im = px.toImage().convertToFormat(QImage::Format_ARGB32); for (int y=0; y<im.height(); ++y) { QRgb *scanLine = (QRgb*)im.scanLine(y); for (int x=0; x<im.width(); ++x) { QRgb pixel = *scanLine; char intensity = qGray(pixel); *scanLine = qRgba(intensity, intensity, intensity, qAlpha(pixel)); ++scanLine; } } px = QPixmap::fromImage(im); } // Draw shadow QImage tmp(px.size() + QSize(radius * 2, radius * 2 + 1), QImage::Format_ARGB32_Premultiplied); tmp.fill(Qt::transparent); QPainter tmpPainter(&tmp); tmpPainter.setCompositionMode(QPainter::CompositionMode_Source); tmpPainter.drawPixmap(QPoint(radius, radius), px); tmpPainter.end(); // blur the alpha channel QImage blurred(tmp.size(), QImage::Format_ARGB32_Premultiplied); blurred.fill(Qt::transparent); QPainter blurPainter(&blurred); qt_blurImage(&blurPainter, tmp, radius, false, true); blurPainter.end(); tmp = blurred; // blacken the image... tmpPainter.begin(&tmp); tmpPainter.setCompositionMode(QPainter::CompositionMode_SourceIn); tmpPainter.fillRect(tmp.rect(), color); tmpPainter.end(); tmpPainter.begin(&tmp); tmpPainter.setCompositionMode(QPainter::CompositionMode_SourceIn); tmpPainter.fillRect(tmp.rect(), color); tmpPainter.end(); // draw the blurred drop shadow... cachePainter.drawImage(QRect(0, 0, cache.rect().width(), cache.rect().height()), tmp); // Draw the actual pixmap... cachePainter.drawPixmap(QPoint(radius, radius) + offset, px); QPixmapCache::insert(pixmapName, cache); } QRect targetRect = cache.rect(); targetRect.moveCenter(rect.center()); p->drawPixmap(targetRect.topLeft() - offset, cache); }
void ORPrintRender::renderPage(ORODocument * pDocument, int pageNb, QPainter *painter, qreal xDpi, qreal yDpi, QSize margins, int printResolution) { OROPage * p = pDocument->page(pageNb); if(((!p->backgroundImage().isNull()) && (p->backgroundOpacity() != 0)) || ((!p->watermarkText().isEmpty()) && (p->watermarkOpacity() != 0))) { // Do some simple processing used by both Background and Watermark const int resolution = 100; bool doBgWm = false; int printMarginWidth = margins.width(); int printMarginHeight = margins.height(); QString pageSize = pDocument->pageOptions().getPageSize(); int pageWidth = 0; int pageHeight = 0; if(pageSize == "Custom") { // if this is custom sized sheet of paper we will just use those values pageWidth = (int)(pDocument->pageOptions().getCustomWidth() * resolution); pageHeight = (int)(pDocument->pageOptions().getCustomHeight() * resolution); } else { // lookup the correct size information for the specified size paper PageSizeInfo pi = PageSizeInfo::getByName(pageSize); if(!pi.isNull()) { pageWidth = (int)((pi.width() / 100.0) * resolution); pageHeight = (int)((pi.height() / 100.0) * resolution); } } if(!pDocument->pageOptions().isPortrait()) { int tmp = pageWidth; pageWidth = pageHeight; pageHeight = tmp; } if(pageWidth < 1 || pageHeight < 1) { // whoops we couldn't find it.... we will use the values from the painter // and add in the margins of the printer to get what should be the correct // size of the sheet of paper we are printing to. pageWidth = (int)(((painter->viewport().width() + printMarginWidth + printMarginWidth) / xDpi) * resolution); pageHeight = (int)(((painter->viewport().height() + printMarginHeight + printMarginHeight) / yDpi) * resolution); } QImage image = QImage(pageWidth, pageHeight, QImage::Format_RGB32); QPainter gPainter; if(gPainter.begin(&image)) gPainter.fillRect(gPainter.viewport(), QColor(Qt::white)); // Render Background if((!p->backgroundImage().isNull()) && (p->backgroundOpacity() != 0)) { doBgWm = true; QPointF ps = p->backgroundPosition(); QSizeF sz = p->backgroundSize(); QRectF rc = QRectF(ps.x() * resolution, ps.y() * resolution, sz.width() * resolution, sz.height() * resolution); renderBackground(image, p->backgroundImage(), rc.toRect(), p->backgroundScale(), p->backgroundScaleMode(), p->backgroundAlign(), p->backgroundOpacity()); } // Render Watermark if((!p->watermarkText().isEmpty()) && (p->watermarkOpacity() != 0)) { doBgWm = true; renderWatermark(image, p->watermarkText(), p->watermarkFont(), p->watermarkOpacity(), ((pDocument->pageOptions().getMarginLeft() + pDocument->pageOptions().getMarginRight()) * resolution), ((pDocument->pageOptions().getMarginTop() + pDocument->pageOptions().getMarginBottom()) * resolution), pDocument->pageOptions().getMarginLeft() * resolution, pDocument->pageOptions().getMarginTop() * resolution); } if(doBgWm) { QRectF target(-printMarginWidth, -printMarginHeight, (painter->viewport().width() + printMarginWidth + printMarginWidth), (painter->viewport().height() + printMarginHeight + printMarginHeight)); QRectF source(0, 0, image.width(), image.height()); painter->drawImage(target, image, source); } } // Render Page Objects for(int i = 0; i < p->primitives(); i++) { OROPrimitive * prim = p->primitive(i); QPen pen(prim->pen()); painter->save(); painter->setPen(pen); painter->setBrush(prim->brush()); QPointF ps = prim->position(); if(prim->rotationAxis().isNull()) { painter->translate(ps.x() * xDpi, ps.y() * yDpi); painter->rotate(prim->rotation()); // rotation around the origin of the primitive (not the center) } else { // rotation around the defined axis qreal xRot = prim->rotationAxis().x(); qreal yRot = prim->rotationAxis().y(); painter->translate(xRot * xDpi, yRot * yDpi); painter->rotate(prim->rotation()); painter->translate((ps.x() - xRot) * xDpi, (ps.y() - yRot) * yDpi); } if(prim->type() == OROTextBox::TextBox) { OROTextBox * tb = (OROTextBox*)prim; painter->setFont(tb->font()); QSizeF sz = tb->size(); QRectF rc = QRectF(0, 0, sz.width() * xDpi, sz.height() * yDpi); painter->drawText(rc, tb->flags(), tb->text()); } else if(prim->type() == OROLine::Line) { OROLine * ln = (OROLine*)prim; QPointF s = ln->startPoint(); QPointF e = ln->endPoint(); pen.setWidthF((ln->weight() / 100) * printResolution); painter->setPen(pen); painter->drawLine(QLineF(0, 0, (e.x()-s.x()) * xDpi, (e.y()-s.y()) * yDpi)); } else if(prim->type() == OROImage::Image) { OROImage * im = (OROImage*)prim; QSizeF sz = im->size(); QRectF rc = QRectF(0, 0, sz.width() * xDpi, sz.height() * yDpi); QImage img = im->image(); if(im->scaled()) img = img.scaled(rc.size().toSize(), (Qt::AspectRatioMode)im->aspectRatioMode(), (Qt::TransformationMode)im->transformationMode()); QRectF sr = QRectF(QPointF(0.0, 0.0), rc.size().boundedTo(img.size())); painter->drawImage(rc.topLeft(), img, sr); } else if(prim->type() == ORORect::Rect) { ORORect * re = (ORORect*)prim; QSizeF sz = re->size(); QRectF rc = QRectF(0, 0, sz.width() * xDpi, sz.height() * yDpi); pen.setWidthF((re->weight() / 100) * printResolution); painter->setPen(pen); painter->drawRect(rc); } else { qDebug("unrecognized primitive type"); } painter->restore(); } }
static bool write_pbm_image(QIODevice *out, const QImage &sourceImage, const QByteArray &sourceFormat) { QByteArray str; QImage image = sourceImage; QByteArray format = sourceFormat; format = format.left(3); // ignore RAW part bool gray = format == "pgm"; if (format == "pbm") { image = image.convertToFormat(QImage::Format_Mono); } else if (image.depth() == 1) { image = image.convertToFormat(QImage::Format_Indexed8); } else { switch (image.format()) { case QImage::Format_RGB16: case QImage::Format_RGB666: case QImage::Format_RGB555: case QImage::Format_RGB888: case QImage::Format_RGB444: image = image.convertToFormat(QImage::Format_RGB32); break; case QImage::Format_ARGB8565_Premultiplied: case QImage::Format_ARGB6666_Premultiplied: case QImage::Format_ARGB8555_Premultiplied: case QImage::Format_ARGB4444_Premultiplied: image = image.convertToFormat(QImage::Format_ARGB32); break; default: break; } } if (image.depth() == 1 && image.colorCount() == 2) { if (qGray(image.color(0)) < qGray(image.color(1))) { // 0=dark/black, 1=light/white - invert image.detach(); for (int y=0; y<image.height(); y++) { uchar *p = image.scanLine(y); uchar *end = p + image.bytesPerLine(); while (p < end) *p++ ^= 0xff; } } } uint w = image.width(); uint h = image.height(); str = "P\n"; str += QByteArray::number(w); str += ' '; str += QByteArray::number(h); str += '\n'; switch (image.depth()) { case 1: { str.insert(1, '4'); if (out->write(str, str.length()) != str.length()) return false; w = (w+7)/8; for (uint y=0; y<h; y++) { uchar* line = image.scanLine(y); if (w != (uint)out->write((char*)line, w)) return false; } } break; case 8: { str.insert(1, gray ? '5' : '6'); str.append("255\n"); if (out->write(str, str.length()) != str.length()) return false; QVector<QRgb> color = image.colorTable(); uint bpl = w*(gray ? 1 : 3); uchar *buf = new uchar[bpl]; for (uint y=0; y<h; y++) { uchar *b = image.scanLine(y); uchar *p = buf; uchar *end = buf+bpl; if (gray) { while (p < end) { uchar g = (uchar)qGray(color[*b++]); *p++ = g; } } else { while (p < end) { QRgb rgb = color[*b++]; *p++ = qRed(rgb); *p++ = qGreen(rgb); *p++ = qBlue(rgb); } } if (bpl != (uint)out->write((char*)buf, bpl)) return false; } delete [] buf; } break; case 32: { str.insert(1, gray ? '5' : '6'); str.append("255\n"); if (out->write(str, str.length()) != str.length()) return false; uint bpl = w*(gray ? 1 : 3); uchar *buf = new uchar[bpl]; for (uint y=0; y<h; y++) { QRgb *b = (QRgb*)image.scanLine(y); uchar *p = buf; uchar *end = buf+bpl; if (gray) { while (p < end) { uchar g = (uchar)qGray(*b++); *p++ = g; } } else { while (p < end) { QRgb rgb = *b++; *p++ = qRed(rgb); *p++ = qGreen(rgb); *p++ = qBlue(rgb); } } if (bpl != (uint)out->write((char*)buf, bpl)) return false; } delete [] buf; } break; default: return false; } return true; }
StillImage::StillImage(const VideoInfo &backgroundVideoInfo, const QImage &image, IScriptEnvironment* env) : m_videoInfo(backgroundVideoInfo) { m_videoInfo.pixel_type = (image.format() == QImage::Format_ARGB32 || image.format() == QImage::Format_ARGB32_Premultiplied) ? VideoInfo::CS_BGR32 : VideoInfo::CS_BGR24; m_frame = env->NewVideoFrame(m_videoInfo); unsigned char* frameBits = m_frame->GetWritePtr(); env->BitBlt(frameBits, m_frame->GetPitch(), image.mirrored(false, true).bits(), m_frame->GetPitch(), image.bytesPerLine(), image.height()); }
/** * Program entrypoint. */ int main(int argc, char **argv) { QApplication app(argc, argv); QWidget window; unsigned char *raw_data; unsigned char *buf; int *color_freqs; int *unique_pals; int unique_pals_count; int *pal_indexes; unsigned char *pal_data; int *pal_sizes; unsigned char *tile_data; int tile_data_sz; unsigned char *attrib_data; int attrib_data_sz; const char *input_filename = 0; const char *chr_output_filename = 0; const char *pal_output_filename = 0; const char *attrib_output_filename = 0; int width = -1; int height = -1; /* Process arguments. */ if (!process_args(argv, &input_filename, &chr_output_filename, &pal_output_filename, &attrib_output_filename, &width, &height)) { return(-1); } /* check arguments */ if (!input_filename) { fprintf(stderr, "img2nes: no filename given\n"); return(-1); } #if 0 if (width == -1 || height == -1) { fprintf(stderr, "img2nes: please specify width and height of image\n"); return(-1); } if (width % 16 || height % 16) { fprintf(stderr, "img2nes: width and height must be multiples of 16\n"); return(-1); } #endif QImage originalImage = QImage(input_filename).convertToFormat(QImage::Format_RGB32); if (originalImage.isNull()) { fprintf(stderr, "img2nes: unable to read image '%s'\n", input_filename); return(-1); } width = originalImage.width(); height = originalImage.height(); /* allocate some buffers */ raw_data = (unsigned char *)malloc(width * height * 3); buf = (unsigned char *)malloc(width * height); tile_data_sz = (height / 8) * (width / 8) * 16 * sizeof(unsigned char); tile_data = (unsigned char *)malloc(tile_data_sz); if (!raw_data || !buf || !tile_data) { fprintf(stderr, "img2nes: failed to allocate memory for image data\n"); return(0); } #if 0 /* read input image */ if (!read_image(input_filename, width, height, /*has_alpha=*/0, raw_data)) return(-1); #endif /* convert raw RGB to nes palette indices */ convert_to_nes_colors(originalImage.bits(), width, height, /*has_alpha=*/1, buf); QImage originalNesImage = to_qimage(buf, width, height); QImage reducedColorsImage; QImage reducedPalettesImage; /* that was the easy part */ { int i, j; int cw = width / 16; int ch = height / 16; color_freqs = (int*)calloc(cw * ch, 64 * sizeof(int)); pal_data = (unsigned char *)malloc(cw * ch * 4 * sizeof(unsigned char)); pal_sizes = (int*)malloc(cw * ch * sizeof(int)); /* process all 16x16 blocks */ for (i = 0; i < ch; ++i) { for (j = 0; j < cw; ++j) { int num_colors; unsigned char pal[64]; int *f = &color_freqs[(i*cw+j)*64]; /* count frequency of colors */ count_nes_color_frequencies(&buf[i*16*width+(j*16)], 16, 16, width, f); f[0x0F] = INT_MAX; /* record the colors that are actually used */ nes_palette_from_color_frequencies(f, pal, &num_colors); #if 0 { printf("palette: "); for (int k = 0; k < num_colors; ++k) { printf("%.2X ", pal[k]); } printf("\n"); } #endif /* reduce number of colors if necessary */ reduce_colors(&buf[i*16*width+(j*16)], 16, 16, width, f, pal, &num_colors); /* sort the palette so that different palettes can be easily compared */ qsort(pal, num_colors, sizeof(unsigned char), compare_nes_colors); #if 0 { printf("sorted palette: "); for (int k = 0; k < num_colors; ++k) { printf("%.2X ", pal[k]); } printf("\n"); } #endif /* store the final palette for this 16x16 block */ assert(num_colors <= 4); memcpy(&pal_data[(i*cw+j)*4], pal, num_colors * sizeof(unsigned char)); pal_sizes[i*cw+j] = num_colors; } } reducedColorsImage = to_qimage(buf, width, height); /* now each 16x16 block uses max 4 unique colors the next step is to reduce the number of 4-color palettes, if necessary */ unique_pals = (int*)malloc(cw * ch * sizeof(int)); unique_pals_count = 0; pal_indexes = (int*)malloc(cw * ch * sizeof(int)); /* find unique palettes and record their usage */ find_unique_palettes(pal_data, cw * ch, pal_sizes, unique_pals, &unique_pals_count, pal_indexes); /* printf("unique palettes: %d\n", unique_pals_count); */ /* reduce number of palettes used if necessary */ reduce_palettes(buf, width, height, pal_data, pal_sizes, unique_pals, &unique_pals_count, pal_indexes); reducedPalettesImage = to_qimage(buf, width, height); /* encode tiles */ encode_nes_tiles(buf, width, height, pal_data, pal_sizes, unique_pals, pal_indexes, tile_data); /* write tiles */ write_tiles(chr_output_filename, tile_data, tile_data_sz); /* write palette data */ write_palettes(pal_output_filename, pal_data, pal_sizes, unique_pals, unique_pals_count); /* encode attribute data */ attrib_data_sz = (cw/2)*(ch/2); attrib_data = (unsigned char *)malloc(attrib_data_sz); encode_nes_attributes(cw, ch, pal_indexes, attrib_data); /* write attribute data */ write_attributes(attrib_output_filename, attrib_data, attrib_data_sz); } QHBoxLayout *box = new QHBoxLayout(&window); box->addWidget(create_label(originalImage)); box->addWidget(create_label(originalNesImage)); box->addWidget(create_label(reducedColorsImage)); box->addWidget(create_label(reducedPalettesImage)); window.show(); app.exec(); /* Cleanup */ free(raw_data); free(buf); free(tile_data); free(pal_data); free(pal_sizes); free(unique_pals); free(pal_indexes); free(color_freqs); free(attrib_data); /* All done. */ return 0; }
void TvShowWidgetSeason::setSeason(TvShow *show, int season) { onClear(); m_show = show; m_season = season; emit sigSetActionSearchEnabled(false, WidgetTvShows); ui->title->setText(QString(show->name()) + " - " + tr("Season %1").arg(season)); if (!m_show->seasonPosterImage(season).isNull()) { QImage img = QImage::fromData(m_show->seasonPosterImage(season)); ui->poster->setPixmap(QPixmap::fromImage(img).scaledToWidth(200, Qt::SmoothTransformation)); ui->posterResolution->setText(QString("%1x%2").arg(img.width()).arg(img.height())); ui->buttonPreviewPoster->setEnabled(true); m_currentPoster = img; } else if (!Manager::instance()->mediaCenterInterfaceTvShow()->seasonPosterImageName(m_show, season).isEmpty()) { QPixmap p(Manager::instance()->mediaCenterInterfaceTvShow()->seasonPosterImageName(m_show, season)); ui->poster->setPixmap(p.scaledToWidth(200, Qt::SmoothTransformation)); ui->posterResolution->setText(QString("%1x%2").arg(p.width()).arg(p.height())); ui->buttonPreviewPoster->setEnabled(true); m_currentPoster = p.toImage(); } else { ui->poster->setPixmap(QPixmap(":/img/poster.png")); ui->posterResolution->clear(); ui->buttonPreviewPoster->setEnabled(false); } if (!m_show->seasonBackdropImage(season).isNull()) { QImage img = QImage::fromData(m_show->seasonBackdropImage(season)); ui->backdrop->setPixmap(QPixmap::fromImage(img).scaledToWidth(200, Qt::SmoothTransformation)); ui->backdropResolution->setText(QString("%1x%2").arg(img.width()).arg(img.height())); ui->buttonPreviewBackdrop->setEnabled(true); m_currentBackdrop = img; } else if (!Manager::instance()->mediaCenterInterfaceTvShow()->seasonBackdropImageName(m_show, season).isEmpty()) { QPixmap p(Manager::instance()->mediaCenterInterfaceTvShow()->seasonBackdropImageName(m_show, season)); ui->backdrop->setPixmap(p.scaledToWidth(200, Qt::SmoothTransformation)); ui->backdropResolution->setText(QString("%1x%2").arg(p.width()).arg(p.height())); ui->buttonPreviewBackdrop->setEnabled(true); m_currentBackdrop = p.toImage(); } else { ui->backdrop->setPixmap(QPixmap(":/img/missing_art.png").scaled(64, 64, Qt::KeepAspectRatio, Qt::SmoothTransformation)); ui->backdropResolution->clear(); ui->buttonPreviewBackdrop->setEnabled(false); } if (!m_show->seasonBannerImage(season).isNull()) { QImage img = QImage::fromData(m_show->seasonBannerImage(season)); ui->banner->setPixmap(QPixmap::fromImage(img).scaledToWidth(200, Qt::SmoothTransformation)); ui->bannerResolution->setText(QString("%1x%2").arg(img.width()).arg(img.height())); ui->buttonPreviewBanner->setEnabled(true); m_currentBanner = img; } else if (!Manager::instance()->mediaCenterInterfaceTvShow()->seasonBannerImageName(m_show, season).isEmpty()) { QPixmap p(Manager::instance()->mediaCenterInterfaceTvShow()->seasonBannerImageName(m_show, season)); ui->banner->setPixmap(p.scaledToWidth(200, Qt::SmoothTransformation)); ui->bannerResolution->setText(QString("%1x%2").arg(p.width()).arg(p.height())); ui->buttonPreviewBanner->setEnabled(true); m_currentBanner = p.toImage(); } else { ui->banner->setPixmap(QPixmap(":/img/missing_art_small.png")); ui->bannerResolution->clear(); ui->buttonPreviewBanner->setEnabled(false); } onSetEnabled(!show->downloadsInProgress()); emit sigSetActionSaveEnabled(!show->downloadsInProgress(), WidgetTvShows); }
/** * Loads the thumbnail from the metadata. * If no thumbnail is embedded, the whole image * is loaded and downsampled in a fast manner. * @param file the file to be loaded * @param ba the file buffer (can be empty) * @param forceLoad the loading flag (e.g. exiv only) * @param maxThumbSize the maximal thumbnail size to be loaded * @param minThumbSize the minimal thumbnail size to be loaded * @return QImage the loaded image. Null if no image * could be loaded at all. **/ QImage DkThumbNail::computeIntern(const QString& filePath, const QSharedPointer<QByteArray> ba, int forceLoad, int maxThumbSize, int minThumbSize) { DkTimer dt; //qDebug() << "[thumb] file: " << file.absoluteFilePath(); // see if we can read the thumbnail from the exif data QImage thumb; DkMetaDataT metaData; QSharedPointer<QByteArray> baZip = QSharedPointer<QByteArray>(); #ifdef WITH_QUAZIP if (QFileInfo(mFile).dir().path().contains(DkZipContainer::zipMarker())) baZip = DkZipContainer::extractImage(DkZipContainer::decodeZipFile(filePath), DkZipContainer::decodeImageFile(filePath)); #endif try { if (baZip && !baZip->isEmpty()) metaData.readMetaData(filePath, baZip); else if (!ba || ba->isEmpty()) metaData.readMetaData(filePath); else metaData.readMetaData(filePath, ba); // read the full image if we want to create new thumbnails if (forceLoad != force_save_thumb) thumb = metaData.getThumbnail(); } catch(...) { // do nothing - we'll load the full file } removeBlackBorder(thumb); if (thumb.isNull() && forceLoad == force_exif_thumb) return QImage(); bool exifThumb = !thumb.isNull(); int orientation = metaData.getOrientation(); int imgW = thumb.width(); int imgH = thumb.height(); int tS = minThumbSize; // as found at: http://olliwang.com/2010/01/30/creating-thumbnail-images-in-qt/ QFileInfo fInfo(filePath); QString lFilePath = fInfo.isSymLink() ? fInfo.symLinkTarget() : filePath; fInfo = lFilePath; QImageReader* imageReader = 0; if (!ba || ba->isEmpty()) imageReader = new QImageReader(lFilePath); else { QBuffer buffer; buffer.setData(ba->data()); buffer.open(QIODevice::ReadOnly); imageReader = new QImageReader(&buffer, fInfo.suffix().toStdString().c_str()); buffer.close(); } if (thumb.isNull() || (thumb.width() < tS && thumb.height() < tS)) { imgW = imageReader->size().width(); // crash detected: unhandled exception at 0x66850E9A (msvcr110d.dll) in nomacs.exe: 0xC0000005: Access violation reading location 0x0000C788. imgH = imageReader->size().height(); // locks the file! } if (forceLoad != DkThumbNailT::force_exif_thumb && (imgW > maxThumbSize || imgH > maxThumbSize)) { if (imgW > imgH) { imgH = qRound((float)maxThumbSize / imgW * imgH); imgW = maxThumbSize; } else if (imgW < imgH) { imgW = qRound((float)maxThumbSize / imgH * imgW); imgH = maxThumbSize; } else { imgW = maxThumbSize; imgH = maxThumbSize; } } bool rescale = forceLoad == force_save_thumb; if (forceLoad != force_exif_thumb && (thumb.isNull() || thumb.width() < tS && thumb.height() < tS || forceLoad == force_full_thumb || forceLoad == force_save_thumb)) { // braces // flip size if the image is rotated by 90° if (metaData.isTiff() && abs(orientation) == 90) { int tmpW = imgW; imgW = imgH; imgH = tmpW; qDebug() << "EXIF size is flipped..."; } QSize initialSize = imageReader->size(); imageReader->setScaledSize(QSize(imgW, imgH)); thumb = imageReader->read(); // try to read the image if (thumb.isNull()) { DkBasicLoader loader; if (baZip && !baZip->isEmpty()) { if (loader.loadGeneral(lFilePath, baZip, true, true)) thumb = loader.image(); } else { if (loader.loadGeneral(lFilePath, ba, true, true)) thumb = loader.image(); } } // the image is not scaled correctly yet if (rescale && !thumb.isNull() && (imgW == -1 || imgH == -1)) { imgW = thumb.width(); imgH = thumb.height(); if (imgW > maxThumbSize || imgH > maxThumbSize) { if (imgW > imgH) { imgH = qRound((float)maxThumbSize / imgW * imgH); imgW = maxThumbSize; } else if (imgW < imgH) { imgW = qRound((float)maxThumbSize / imgH * imgW); imgH = maxThumbSize; } else { imgW = maxThumbSize; imgH = maxThumbSize; } } thumb = thumb.scaled(QSize(imgW*2, imgH*2), Qt::KeepAspectRatio, Qt::FastTransformation); thumb = thumb.scaled(QSize(imgW, imgH), Qt::KeepAspectRatio, Qt::SmoothTransformation); } // is there a nice solution to do so?? imageReader->setFileName("josef"); // image reader locks the file -> but there should not be one so we just set it to another file... } else if (rescale) { thumb = thumb.scaled(QSize(imgW, imgH), Qt::IgnoreAspectRatio, Qt::SmoothTransformation); } if (imageReader) delete imageReader; if (orientation != -1 && orientation != 0 && (metaData.isJpg() || metaData.isRaw())) { QTransform rotationMatrix; rotationMatrix.rotate((double)orientation); thumb = thumb.transformed(rotationMatrix); } // save the thumbnail if the caller either forces it, or the save thumb is requested and the image did not have any before if (rescale || (forceLoad == save_thumb && !exifThumb)) { try { QImage sThumb = thumb.copy(); if (orientation != -1 && orientation != 0) { QTransform rotationMatrix; rotationMatrix.rotate(-(double)orientation); sThumb = sThumb.transformed(rotationMatrix); } metaData.setThumbnail(sThumb); if (!ba || ba->isEmpty()) metaData.saveMetaData(lFilePath); else metaData.saveMetaData(lFilePath, ba); qDebug() << "[thumb] saved to exif data"; } catch(...) { qDebug() << "Sorry, I could not save the metadata"; } } if (!thumb.isNull()) qDebug() << "[thumb] " << fInfo.fileName() << "(" << thumb.width() << " x " << thumb.height() << ") loaded in: " << dt.getTotal() << ((exifThumb) ? " from EXIV" : " from File"); return thumb; }
/* Main method. * Opening file. * Creating binary pixel array. * Counting the number of silhouettes on the image by calling main logic method. * Printing the binary array into the file if debug == true. */ int main(){ /* Open image */ if (FILENAME == " ") { cout << " Please enter the path to your file: "; getline(cin, FILENAME); } if (debug) { cout << "d: Initiated opening the image file." << endl; } QImage* image = loadImage(); if (debug) { cout << "d: Image file successfully open." << endl; } if(debug){ cout << "d: Image width: " << image->width()<<endl; cout << "d: Image height: " << image->height()<<endl; } int imgHeight = image->height(); int imgWidth = image->width(); /* Calculating the MINIMUM_SILHOUETTE_SIZE given the image size */ MINIMUM_SILHOUETTE_SIZE = (imgHeight*imgWidth) * MINIMUM_SILHOUETTE_SIZE; /* Creating pixel array */ if (debug) { cout << "d: Initiated the creation of the array.." << endl; } //initializing int **pixelArray; pixelArray = new int *[imgHeight]; for (int i=0; i < imgHeight; i++) { pixelArray[i] = new int[imgWidth]; } //filling //imageBinarization(pixelArray, imgHeight, imgWidth); /// NOTE! QImage pixel( col , row ) for (int i = 0; i < imgHeight; i++) { //rows for (int j = 0; j < imgWidth; j++) { //cols /* First binarization method.*/ QColor curPixCol = image->pixel(j, i); if (curPixCol.lightness() >= LIGHTNESS) { pixelArray[i][j] = 0; } else { pixelArray[i][j] = 1; } } } if (debug) { cout << "d: Array created successfully." << endl; } /* Counting the number of objects on the image */ if (debug) { cout << "d: Initiated the counting of the number of silhouettes.." << endl; } int count = getCountOfObj(pixelArray, imgHeight, imgWidth); if (debug) { cout << "d: Counting of the number completed." << endl; } if (debug && createFile_debug) { cout << "d: cf: Initiated the writing binary array into file.." << endl; ///just to see how it looks. QFile file("silhouettes.txt"); QTextStream out(&file); if(file.open(QIODevice::Append)) { for (int i = 0; i < imgHeight; i++) { for (int j = 0; j < imgWidth; j++) { if (pixelArray[i][j] == 0) { out << "-"; } else { out << pixelArray[i][j]; } } out << endl; } } file.close(); cout << "d: cf: Binary array successfully written into the file.." << endl; } /* Clear memory */ if (debug) { cout << "d: Initiated the clearing the memory.." << endl; } for (int i=0; i < imgHeight; i++) { delete pixelArray[i]; } delete [] pixelArray; delete image; if (debug) { cout << "d: Memory successfully cleared.." << endl; } /* Done! */ if (debug) { cout << endl << endl; } cout << " The number of silhouettes: " << count << endl; system("pause"); return 0; }
//===========================================================================// // void ImageOperations::subsample(QImage image, QImage* ret, // // bool resize = true) // //===========================================================================// // INPUTS: image: QImage to be sub-sampled by factor 2. // // resize: flag to indicate whether the size of the image should // // be decreased by factor 2 as part of sub-sampling or // // pixel values duplicated to maintain the original size. // // OUTPUTS: QImage: Sub-sampled image of either equal or half the original // // size. // // OPERATION: The average of a pixel and its right, lower and lower-right // // neighbours is computed and used as value for the pixel(s) in // // the sub-sampled image. // //===========================================================================// void ImageOperations::subsample( const QImage& qimg, QImage& qimgRet, bool bResize) { int subs_w, subs_h; if( bResize ) { // Compute the subs_w = int(qimg.width()/2); // size of the subs_h = int(qimg.height()/2); // output image. } else { subs_w = qimg.width(); subs_h = qimg.height(); } qimgRet.create(subs_w, subs_h, 32); int col_r, col_g, col_b; for (int y=0; y<qimg.height(); y+=2) { // Traverse image for (int x=0; x<qimg.width(); x+=2) { // and compute col_r = qRed(qimg.pixel(x,y)); // average of col_g = qGreen(qimg.pixel(x,y)); // the pixel and col_b = qBlue(qimg.pixel(x,y)); // its neighbours int sum = 1; // to the right, if (x < qimg.width()-1) { // the bottom as col_r += qRed (qimg.pixel(x+1, y)); // well as the col_g += qGreen(qimg.pixel(x+1,y)); // bottom-right. col_b += qBlue (qimg.pixel(x+1,y)); // Consider image sum++; // borders. } if (y < qimg.height()-1) { col_r += qRed (qimg.pixel(x,y+1)); col_g += qGreen(qimg.pixel(x,y+1)); col_b += qBlue (qimg.pixel(x,y+1)); sum++; } if ((x<qimg.width()-1) && (y<qimg.height()-1)) { col_r += qRed (qimg.pixel(x+1, y+1)); col_g += qGreen(qimg.pixel(x+1, y+1)); col_b += qBlue (qimg.pixel(x+1, y+1)); sum++; } col_r /= sum; col_g /= sum; col_b /= sum; if (bResize) { // Set the output if ((int(x/2) < subs_w) && (int(y/2) < subs_h)) { // image's pixels qimgRet.setPixel(int(x/2), int(y/2), // to the computed qRgb(col_r, col_g, col_b)); // values. } } else { // In the case that qimgRet.setPixel(x, y, qRgb(col_r, col_g, // the image is col_b)); // not being re- if (x < qimg.width()-1) { // sized, four qimgRet.setPixel(x+1, y, qRgb(col_r, col_g, // pixels hold col_b)); // the same value } // (with the if (y < qimg.height()-1) { // exception of qimgRet.setPixel(x, y+1, qRgb(col_r, col_g, // the borders). col_b)); } if ((x<qimg.width()-1) && (y<qimg.height()-1)) { qimgRet.setPixel(x+1, y+1, qRgb(col_r, col_g, col_b)); } } } } }
QImage KShadowEngine::makeShadow(const QPixmap &textPixmap, const QColor &bgColor) { QImage result; // create a new image for for the shaddow int w = textPixmap.width(); int h = textPixmap.height(); // avoid calling these methods for every pixel int bgRed = bgColor.red(); int bgGreen = bgColor.green(); int bgBlue = bgColor.blue(); int thick = m_shadowSettings->thickness() >> 1; double alphaShadow; /* * This is the source pixmap */ QImage img = textPixmap.convertToImage().convertDepth(32); /* * Resize the image if necessary */ if((result.width() != w) || (result.height() != h)) { result.create(w, h, 32); } result.fill(0); // all black result.setAlphaBuffer(true); for(int i = thick; i < w - thick; i++) { for(int j = thick; j < h - thick; j++) { switch(m_shadowSettings->algorithm()) { case KShadowSettings::DoubleLinearDecay: alphaShadow = doubleLinearDecay(img, i, j); break; case KShadowSettings::RadialDecay: alphaShadow = radialDecay(img, i, j); break; case KShadowSettings::NoDecay: alphaShadow = noDecay(img, i, j); break; case KShadowSettings::DefaultDecay: default: alphaShadow = defaultDecay(img, i, j); } alphaShadow = (alphaShadow > m_shadowSettings->maxOpacity()) ? m_shadowSettings->maxOpacity() : alphaShadow; // update the shadow's i,j pixel. result.setPixel(i, j, qRgba(bgRed, bgGreen, bgBlue, (int)alphaShadow)); } } return result; }
//===========================================================================// // void ImageOperations::edgeSobel(QImage image, QImage* ret, int thresh) // //===========================================================================// // INPUTS: image: QImage whose edges are to be detected. // // thresh: integer value above which a gradient difference will // // be classified as an edge. // // OUTPUTS: QImage: image containing the detected edges as feature points // // which are marked by a value of RGB = 000. All other regions // // of the output image have the value 255/255/255. // // OPERATION: The approximation to the two Sobel convolution matrixes is // // used which computes the gradient value of a pixel in a single // // pass. Then the passed threshold is applied to those values and // // the output image generated accordingly. // //===========================================================================// void ImageOperations::edgeSobel( const QImage& qimg, QImage& qimgRet, int nThresh ) { qimgRet = qimg.copy(); for( int y=0; y < qimg.height(); y++ ) { // Traverse image for( int x=0; x < qimg.width(); x++ ) { // and compute // grayscale int val = 0; // gradient value if( (x>0) && (y>0) ) // taking into val += qGray(qimg.pixel(x-1, y-1)); // consideration if( y>0 ) // the edges. val += 2*qGray(qimg.pixel(x, y-1)); // (later the if( (x<qimg.width()-1) && (y>0) ) // edge values val += qGray(qimg.pixel(x+1, y-1)); // will be over- if( (x>0) && (y<qimg.height()-1) ) // written by non- val -= qGray(qimg.pixel(x-1, y+1)); // features). if( y<qimg.height()-1 ) // val -= 2*qGray(qimg.pixel(x, y+1)); // if( (x<qimg.width()-1) && (y<qimg.height()-1) ) // val -= qGray(qimg.pixel(x+1, y+1)); // val = abs(val); // int val2 = 0; if( (x<qimg.width()-1) && (y>0) ) val2 += qGray(qimg.pixel(x+1, y-1)); if( x<qimg.width()-1 ) val2 += 2*qGray(qimg.pixel(x+1, y)); if( (x<qimg.width()-1) && (y<qimg.height()-1) ) val2 += qGray(qimg.pixel(x+1, y+1)); if( (x>0) && (y>0) ) val2 -= qGray(qimg.pixel(x-1, y-1)); if( x>0 ) val2 -= 2*qGray(qimg.pixel(x-1, y)); if( (x>0) && (y<qimg.height()-1) ) val2 -= qGray(qimg.pixel(x-1, y+1)); val2 = abs(val2); val += val2; if (val > 255) val = 255; if (nThresh == 0) { // If thresh=0, qimgRet.setPixel(x, y, qRgb(val, val, val)); // output value. } else { // Normal case: if( (val >= nThresh) && // If Threshold (x != 0) && (x != qimg.width()-1) && // surpassed and (y != 0) && (y != qimg.height()-1)) { // not on the qimgRet.setPixel(x, y, qRgb(0, 0, 0)); // border, set // feature. } else { // Else set non- qimgRet.setPixel(x, y, qRgb(255, 255, 255)); // feature. } } } } }
void Ui3Reader::embed(const char *project, const QStringList &images) { QString cProject = convertToCIdentifier( project ); QStringList::ConstIterator it; out << "/****************************************************************************\n"; out << "** Image collection for project '" << project << "'.\n"; out << "**\n"; out << "** Generated from reading image files: \n"; for ( it = images.begin(); it != images.end(); ++it ) out << "** " << *it << "\n"; out << "**\n"; out << "** Created: " << QDateTime::currentDateTime().toString() << "\n"; out << "** by: The User Interface Compiler for Qt version " << QT_VERSION_STR << "\n"; out << "**\n"; out << "** WARNING! All changes made in this file will be lost!\n"; out << "****************************************************************************/\n"; out << "\n"; out << "#include <qimage.h>\n"; out << "#include <qmime.h>\n"; out << "#include <q3mimefactory.h>\n"; out << "#include <q3dragobject.h>\n"; out << "\n"; QList<EmbedImage*> list_image; int image_count = 0; for ( it = images.begin(); it != images.end(); ++it ) { QImage img; if ( !img.load( *it ) ) { fprintf( stderr, "uic: cannot load image file %s\n", (*it).latin1() ); continue; } EmbedImage *e = new EmbedImage; e->width = img.width(); e->height = img.height(); e->depth = img.depth(); e->numColors = img.colorCount(); e->colorTable = new QRgb[e->numColors]; e->alpha = img.hasAlphaBuffer(); QVector<QRgb> ct = img.colorTable(); memcpy(e->colorTable, ct.constData(), e->numColors*sizeof(QRgb)); QFileInfo fi( *it ); e->name = fi.fileName(); e->cname = QString::fromLatin1("image_%1").arg( image_count++); list_image.append( e ); out << "// " << *it << "\n"; QString s; if ( e->depth == 1 ) img = img.convertBitOrder(QImage::BigEndian); out << s.sprintf( "static const unsigned char %s_data[] = {", e->cname.latin1() ); #ifndef QT_NO_IMAGE_COLLECTION_COMPRESSION e->compressed = #endif embedData( out, img.bits(), img.byteCount() ); out << "\n};\n\n"; if ( e->numColors ) { out << s.sprintf( "static const QRgb %s_ctable[] = {", e->cname.latin1() ); embedData( out, e->colorTable, e->numColors ); out << "\n};\n\n"; } } if ( !list_image.isEmpty() ) { out << "static const struct EmbedImage {\n" " int width, height, depth;\n" " const unsigned char *data;\n" #ifndef QT_NO_IMAGE_COLLECTION_COMPRESSION " ulong compressed;\n" #endif " int numColors;\n" " const QRgb *colorTable;\n" " bool alpha;\n" " const char *name;\n" "} embed_image_vec[] = {\n"; EmbedImage *e = 0; int i; for (i = 0; i < list_image.count(); ++i) { e = list_image.at(i); out << " { " << e->width << ", " << e->height << ", " << e->depth << ", " << "(const unsigned char*)" << e->cname << "_data, " #ifndef QT_NO_IMAGE_COLLECTION_COMPRESSION << e->compressed << ", " #endif << e->numColors << ", "; if ( e->numColors ) out << e->cname << "_ctable, "; else out << "0, "; if ( e->alpha ) out << "true, "; else out << "false, "; out << '\"' << e->name << "\" },\n"; delete e; } #ifndef QT_NO_IMAGE_COLLECTION_COMPRESSION out << " { 0, 0, 0, 0, 0, 0, 0, 0, 0 }\n};\n"; #else out << " { 0, 0, 0, 0, 0, 0, 0, 0 }\n};\n"; #endif out << "\n" "static QImage uic_findImage( const QString& name )\n" "{\n" " for ( int i=0; embed_image_vec[i].data; i++ ) {\n" " if ( QString::fromUtf8(embed_image_vec[i].name) == name ) {\n" #ifndef QT_NO_IMAGE_COLLECTION_COMPRESSION " QByteArray baunzip;\n" " baunzip = qUncompress( embed_image_vec[i].data, \n" " embed_image_vec[i].compressed );\n" " QImage img((uchar*)baunzip.data(),\n" " embed_image_vec[i].width,\n" " embed_image_vec[i].height,\n" " embed_image_vec[i].depth,\n" " (QRgb*)embed_image_vec[i].colorTable,\n" " embed_image_vec[i].numColors,\n" " QImage::BigEndian\n" " );\n" " img = img.copy();\n" #else " QImage img((uchar*)embed_image_vec[i].data,\n" " embed_image_vec[i].width,\n" " embed_image_vec[i].height,\n" " embed_image_vec[i].depth,\n" " (QRgb*)embed_image_vec[i].colorTable,\n" " embed_image_vec[i].numColors,\n" " QImage::BigEndian\n" " );\n" #endif " if ( embed_image_vec[i].alpha )\n" " img.setAlphaBuffer(true);\n" " return img;\n" " }\n" " }\n" " return QImage();\n" "}\n\n"; out << "class MimeSourceFactory_" << cProject << " : public Q3MimeSourceFactory\n"; out << "{\n"; out << "public:\n"; out << " MimeSourceFactory_" << cProject << "() {}\n"; out << " ~MimeSourceFactory_" << cProject << "() {}\n"; out << " const QMimeSource* data( const QString& abs_name ) const {\n"; out << "\tconst QMimeSource* d = Q3MimeSourceFactory::data( abs_name );\n"; out << "\tif ( d || abs_name.isNull() ) return d;\n"; out << "\tQImage img = uic_findImage( abs_name );\n"; out << "\tif ( !img.isNull() )\n"; out << "\t ((Q3MimeSourceFactory*)this)->setImage( abs_name, img );\n"; out << "\treturn Q3MimeSourceFactory::data( abs_name );\n"; out << " };\n"; out << "};\n\n"; out << "static Q3MimeSourceFactory* factory = 0;\n"; out << "\n"; out << "void qInitImages_" << cProject << "()\n"; out << "{\n"; out << " if ( !factory ) {\n"; out << "\tfactory = new MimeSourceFactory_" << cProject << ";\n"; out << "\tQ3MimeSourceFactory::defaultFactory()->addFactory( factory );\n"; out << " }\n"; out << "}\n\n"; out << "void qCleanupImages_" << cProject << "()\n"; out << "{\n"; out << " if ( factory ) {\n"; out << "\tQ3MimeSourceFactory::defaultFactory()->removeFactory( factory );\n"; out << "\tdelete factory;\n"; out << "\tfactory = 0;\n"; out << " }\n"; out << "}\n\n"; out << "class StaticInitImages_" << cProject << "\n"; out << "{\n"; out << "public:\n"; out << " StaticInitImages_" << cProject << "() { qInitImages_" << cProject << "(); }\n"; out << "#if defined(Q_OS_SCO) || defined(Q_OS_UNIXWARE)\n"; out << " ~StaticInitImages_" << cProject << "() { }\n"; out << "#else\n"; out << " ~StaticInitImages_" << cProject << "() { qCleanupImages_" << cProject << "(); }\n"; out << "#endif\n"; out << "};\n\n"; out << "static StaticInitImages_" << cProject << " staticImages;\n"; } }
void ThumbGenerator::loadFile(QImage& image, const QFileInfo& fi) { static int sequence = 0; if (GalleryUtil::IsMovie(fi.filePath())) { bool thumbnailCreated = false; QDir tmpDir("/tmp/mythgallery"); if (!tmpDir.exists()) { if (!tmpDir.mkdir(tmpDir.absolutePath())) { LOG(VB_GENERAL, LOG_ERR, "Unable to create temp dir for movie thumbnail creation: " + tmpDir.absolutePath()); } } if (tmpDir.exists()) { QString thumbFile = QString("%1.png") .arg(++sequence,8,10,QChar('0')); QString cmd = "mythpreviewgen"; QStringList args; args << logPropagateArgs.split(" ", QString::SkipEmptyParts); args << "--infile" << '"' + fi.absoluteFilePath() + '"'; args << "--outfile" << '"' + tmpDir.filePath(thumbFile) + '"'; MythSystem ms(cmd, args, kMSRunShell); ms.SetDirectory(tmpDir.absolutePath()); ms.Run(); if (ms.Wait() == GENERIC_EXIT_OK) { QFileInfo thumb(tmpDir.filePath(thumbFile)); if (thumb.exists()) { QImage img(thumb.absoluteFilePath()); image = img; thumbnailCreated = true; } } } if (!thumbnailCreated) { QImage *img = GetMythUI()->LoadScaleImage("gallery-moviethumb.png"); if (img) { image = *img; } } } else { #ifdef EXIF_SUPPORT // Try to get thumbnail from exif data ExifData *ed = exif_data_new_from_file(fi.absoluteFilePath() .toLocal8Bit().constData()); if (ed && ed->data) { image.loadFromData(ed->data, ed->size); } if (ed) exif_data_free(ed); if (image.width() > m_width && image.height() > m_height) return; #endif #ifdef DCRAW_SUPPORT QString extension = fi.suffix(); QSet<QString> dcrawFormats = DcrawFormats::getFormats(); int rotateAngle; if (dcrawFormats.contains(extension) && (rotateAngle = DcrawHandler::loadThumbnail(&image, fi.absoluteFilePath())) != -1 && image.width() > m_width && image.height() > m_height) { if (rotateAngle != 0) { QMatrix matrix; matrix.rotate(rotateAngle); image = image.transformed(matrix); } return; } #endif image.load(fi.absoluteFilePath()); } }
bool Q_INTERNAL_WIN_NO_THROW QPNGImageWriter::writeImage(const QImage& image, volatile int quality_in, const QString &description, int off_x_in, int off_y_in) { QPoint offset = image.offset(); int off_x = off_x_in + offset.x(); int off_y = off_y_in + offset.y(); png_structp png_ptr; png_infop info_ptr; png_ptr = png_create_write_struct(PNG_LIBPNG_VER_STRING,0,0,0); if (!png_ptr) { return false; } png_set_error_fn(png_ptr, 0, 0, qt_png_warning); info_ptr = png_create_info_struct(png_ptr); if (!info_ptr) { png_destroy_write_struct(&png_ptr, 0); return false; } if (setjmp(png_jmpbuf(png_ptr))) { png_destroy_write_struct(&png_ptr, &info_ptr); return false; } int quality = quality_in; if (quality >= 0) { if (quality > 9) { qWarning("PNG: Quality %d out of range", quality); quality = 9; } png_set_compression_level(png_ptr, quality); } png_set_write_fn(png_ptr, (void*)this, qpiw_write_fn, qpiw_flush_fn); int color_type = 0; if (image.colorCount()) { if (image.isGrayscale()) color_type = PNG_COLOR_TYPE_GRAY; else color_type = PNG_COLOR_TYPE_PALETTE; } else if (image.format() == QImage::Format_Grayscale8) color_type = PNG_COLOR_TYPE_GRAY; else if (image.hasAlphaChannel()) color_type = PNG_COLOR_TYPE_RGB_ALPHA; else color_type = PNG_COLOR_TYPE_RGB; png_set_IHDR(png_ptr, info_ptr, image.width(), image.height(), image.depth() == 1 ? 1 : 8, // per channel color_type, 0, 0, 0); // sets #channels if (gamma != 0.0) { png_set_gAMA(png_ptr, info_ptr, 1.0/gamma); } if (image.format() == QImage::Format_MonoLSB) png_set_packswap(png_ptr); if (color_type == PNG_COLOR_TYPE_PALETTE) { // Paletted int num_palette = qMin(256, image.colorCount()); png_color palette[256]; png_byte trans[256]; int num_trans = 0; for (int i=0; i<num_palette; i++) { QRgb rgba=image.color(i); palette[i].red = qRed(rgba); palette[i].green = qGreen(rgba); palette[i].blue = qBlue(rgba); trans[i] = qAlpha(rgba); if (trans[i] < 255) { num_trans = i+1; } } png_set_PLTE(png_ptr, info_ptr, palette, num_palette); if (num_trans) { png_set_tRNS(png_ptr, info_ptr, trans, num_trans, 0); } } // Swap ARGB to RGBA (normal PNG format) before saving on // BigEndian machines if (QSysInfo::ByteOrder == QSysInfo::BigEndian) { png_set_swap_alpha(png_ptr); } // Qt==ARGB==Big(ARGB)==Little(BGRA). But RGB888 is RGB regardless if (QSysInfo::ByteOrder == QSysInfo::LittleEndian && image.format() != QImage::Format_RGB888) { png_set_bgr(png_ptr); } if (off_x || off_y) { png_set_oFFs(png_ptr, info_ptr, off_x, off_y, PNG_OFFSET_PIXEL); } if (frames_written > 0) png_set_sig_bytes(png_ptr, 8); if (image.dotsPerMeterX() > 0 || image.dotsPerMeterY() > 0) { png_set_pHYs(png_ptr, info_ptr, image.dotsPerMeterX(), image.dotsPerMeterY(), PNG_RESOLUTION_METER); } set_text(image, png_ptr, info_ptr, description); png_write_info(png_ptr, info_ptr); if (image.depth() != 1) png_set_packing(png_ptr); if (color_type == PNG_COLOR_TYPE_RGB && image.format() != QImage::Format_RGB888) png_set_filler(png_ptr, 0, QSysInfo::ByteOrder == QSysInfo::BigEndian ? PNG_FILLER_BEFORE : PNG_FILLER_AFTER); if (looping >= 0 && frames_written == 0) { uchar data[13] = "NETSCAPE2.0"; // 0123456789aBC data[0xB] = looping%0x100; data[0xC] = looping/0x100; png_write_chunk(png_ptr, const_cast<png_bytep>((const png_byte *)"gIFx"), data, 13); } if (ms_delay >= 0 || disposal!=Unspecified) { uchar data[4]; data[0] = disposal; data[1] = 0; data[2] = (ms_delay/10)/0x100; // hundredths data[3] = (ms_delay/10)%0x100; png_write_chunk(png_ptr, const_cast<png_bytep>((const png_byte *)"gIFg"), data, 4); } int height = image.height(); int width = image.width(); switch (image.format()) { case QImage::Format_Mono: case QImage::Format_MonoLSB: case QImage::Format_Indexed8: case QImage::Format_Grayscale8: case QImage::Format_RGB32: case QImage::Format_ARGB32: case QImage::Format_RGB888: { png_bytep* row_pointers = new png_bytep[height]; for (int y=0; y<height; y++) row_pointers[y] = const_cast<png_bytep>(image.constScanLine(y)); png_write_image(png_ptr, row_pointers); delete [] row_pointers; } break; default: { QImage::Format fmt = image.hasAlphaChannel() ? QImage::Format_ARGB32 : QImage::Format_RGB32; QImage row; png_bytep row_pointers[1]; for (int y=0; y<height; y++) { row = image.copy(0, y, width, 1).convertToFormat(fmt); row_pointers[0] = const_cast<png_bytep>(row.constScanLine(0)); png_write_rows(png_ptr, row_pointers, 1); } } break; } png_write_end(png_ptr, info_ptr); frames_written++; png_destroy_write_struct(&png_ptr, &info_ptr); return true; }
QStandardItem* QgsLegendModel::itemFromSymbol( QgsSymbol* s, int opacity, const QString& layerID ) { QgsComposerSymbolItem* currentSymbolItem = 0; //label QString itemText; QString label; QString lowerValue = s->lowerValue(); QString upperValue = s->upperValue(); label = s->label(); //Take the label as item text if it is there if ( !label.isEmpty() ) { itemText = label; } //take single value else if ( lowerValue == upperValue || upperValue.isEmpty() ) { itemText = lowerValue; } else //or value range { itemText = lowerValue + " - " + upperValue; } //icon item QImage symbolImage; switch ( s->type() ) { case QGis::Point: symbolImage = s->getPointSymbolAsImage(); break; case QGis::Line: symbolImage = s->getLineSymbolAsImage(); break; case QGis::Polygon: symbolImage = s->getPolygonSymbolAsImage(); break; default: return 0; } if ( opacity != 255 ) { //todo: manipulate image pixel by pixel... QRgb oldColor; for ( int i = 0; i < symbolImage.height(); ++i ) { QRgb* scanLineBuffer = ( QRgb* ) symbolImage.scanLine( i ); for ( int j = 0; j < symbolImage.width(); ++j ) { oldColor = symbolImage.pixel( j, i ); scanLineBuffer[j] = qRgba( qRed( oldColor ), qGreen( oldColor ), qBlue( oldColor ), opacity ); } } } currentSymbolItem = new QgsComposerSymbolItem( itemText ); if ( mHasTopLevelWindow )//only use QIcon / QPixmap if we have a running x-server { currentSymbolItem->setIcon( QIcon( QPixmap::fromImage( symbolImage ) ) ); } if ( !currentSymbolItem ) { return 0; } //Pass deep copy of QgsSymbol as user data. Cast to void* necessary such that QMetaType handles it QgsSymbol* symbolCopy = new QgsSymbol( *s ); currentSymbolItem->setSymbol( symbolCopy ); currentSymbolItem->setFlags( Qt::ItemIsEnabled | Qt::ItemIsSelectable ); currentSymbolItem ->setLayerID( layerID ); return currentSymbolItem; }
bool ConvertDepthCvMat8uToYellowQImage(const cv::Mat & srcDepthMat, QImage & destImg) { if (srcDepthMat.empty() || destImg.isNull()) { return false; } assert (srcDepthMat.size().height == destImg.height() && srcDepthMat.size().width == destImg.width()); assert (srcDepthMat.type() == CV_8UC1); const int nXRes = srcDepthMat.size().width; const int nYRes = srcDepthMat.size().height; const int srcRowStep = srcDepthMat.step; const uchar * srcRowPtr = NULL; const uchar * srcDataPtr = NULL; float depthHist[256]; memset(depthHist, 0, 256 * sizeof(float)); unsigned int pointsNumber = 0; srcRowPtr = srcDepthMat.data; for (int y = 0; y < nYRes; ++y, srcRowPtr += srcRowStep) { srcDataPtr = srcRowPtr; for (int x = 0; x < nXRes; ++x, ++srcDataPtr) { uchar tmp = *srcDataPtr; if ( tmp ) { ++depthHist[tmp]; ++pointsNumber; } } } for (int i = 1; i < 256; ++i) { depthHist[i] += depthHist[i-1]; } if (pointsNumber > 0) { for (int i = 1; i < 256; ++i) { depthHist[i] = 256.0f * (1.0f - depthHist[i] / (float)(pointsNumber)); } } srcRowPtr = srcDepthMat.data; for (int y = 0; y < nYRes; ++y, srcRowPtr += srcRowStep) { srcDataPtr = srcRowPtr; uchar * imageptr = destImg.scanLine(y); for (int x = 0; x < nXRes; ++x, ++srcDataPtr, imageptr += 4) { uchar tmp = *srcDataPtr; if ( tmp ) { imageptr[0] = 0; imageptr[1] = depthHist[tmp]; imageptr[2] = depthHist[tmp]; imageptr[3] = 0xff; } else { imageptr[3] = imageptr[2] = imageptr[1] = imageptr[0] = 0; } } } return true; }
Halftoner::Halftoner( const QPixmap& src, QImage& dest, int scale, bool generateGCode, const CNCParameters& params ) : m_cutCount(0) { QImage src_img( src.toImage() ); int offset = params.m_step/2; int radius = params.m_step/2; double max_dot_size( params.m_fullToolWidth * params.m_maxCutPercent ); double scale_factor( scale ); bool write_y(true); dest.fill( qRgb(0, 0, 0 ) ); // Basic approach: Step through the source image and convert each // point to a circle in the destination image and a tool cut in the // g code. Every other row is offset by half a step to achieve the // zig-zag pattern of a typical halftone image. for ( int y = params.m_step/2, cy = src_img.height()/params.m_step; y < src_img.height(); y+=params.m_step, --cy, write_y = true ) { for ( int x = offset, cx = 1; x < src_img.width(); x+=params.m_step, ++cx ) { double ds( getDotSize( src_img, x, y, radius ) ); // Simple optimization: if the dot size is zero, just fill the destination // area with black pixels and don't generate any g code. if ( ds == 0 ) { for ( int i = scale_factor*(x - radius); i < scale_factor*(x + radius); ++i ) { for ( int j = scale_factor*(y - radius); j < scale_factor*(y + radius); ++j ) { if ( i >= 0 && i < dest.width() && j >= 0 && j < dest.height() ) { dest.setPixel( i, j, qRgb( 0, 0, 0 ) ); } } } } else { // Draw a circle and generate some tool movement g code. ++m_cutCount; if ( generateGCode ) { // Write the g code to cut this dot. // Lift tool to safe 'fast z' depth. m_gCode += "G00Z" + QString::number( params.m_fastZ ) + "\n"; // Move tool to cut location. double cut_x( cx * ( max_dot_size + params.m_minDotGap ) ); if ( offset ) cut_x -= max_dot_size / 2.0; m_gCode += "G00X" + QString::number( cut_x ); if ( write_y ) { double cut_y( cy * ( max_dot_size + params.m_minDotGap ) ); m_gCode += "Y" + QString::number( cut_y ); write_y = false; } m_gCode += "\n"; // Move tool to cut depth. m_gCode += "G01Z" + QString::number( - params.m_fullToolDepth * params.m_maxCutPercent * ds ) + "\n"; } // Draw a circle in the preview image. double ds2( radius*radius*ds*ds*scale_factor*scale_factor ); for ( int i = scale_factor*(x - radius); i < scale_factor*(x + radius); ++i ) { for ( int j = scale_factor*(y - radius); j < scale_factor*(y + radius); ++j ) { if ( i >= 0 && i < dest.width() && j >= 0 && j < dest.height() ) { int dx( i - scale_factor*x ), dy( j - scale_factor*y ); if ( dx * dx + dy * dy < ds2 - 0.5 ) dest.setPixel(i, j, qRgb(255, 255, 255) ); // Make the border pixels grey to improve the appearance a bit. else if ( dx * dx + dy * dy < ds2 + 0.5 ) dest.setPixel(i, j, qRgb(127, 127, 127) ); else { dest.setPixel(i, j, qRgb(0, 0, 0) ); } } } } } } // Zig-zag in x. if ( offset ) offset = 0; else offset = params.m_step/2; } // Finally, make sure the tool is parked at a safe depth. m_gCode += "G00Z" + QString::number( params.m_fastZ ); // Lift tool to safe 'fast z' depth. m_gCode += "\n"; }
bool PunchFilter::Punch(const QImage &img, QImage *outputImage, const QRect &clipRect /*= QRect()*/ ) const { *outputImage = img; int top = 0; int bottom = img.height(); int left = 0; int right = img.width(); if (!clipRect.isNull()) { // If we have a cliprect, set our coordinates to our cliprect // and make sure it is within the boundaries of the image // After that, an optimization where we adjust the cliprect to only be the bounding // rect of the circle we are manipulating, (so that we don't have to run the filter on // the whole image if the default cliprect was given). top = qMax(top, clipRect.top()); top = qMax(top, (int)(ceil(m_Center.y() - 1) - m_Radius)); bottom = qMin(bottom, clipRect.bottom()); bottom = qMin(bottom, (int)(floor(m_Center.y() + 1) + m_Radius)); left = qMax(left, clipRect.left()); left = qMax(left, (int)(ceil(m_Center.x() - 1) - m_Radius)); right = qMin(right, clipRect.right()); right = qMin(right, (int)(floor(m_Center.x() + 1) + m_Radius)); } qreal centerx = m_Center.x(); qreal centery = m_Center.y(); int x; int y; double amplitude = m_Force/3.2; // scale down //amplitude = qBound(-0.3125, amplitude, 0.3125); for (y = top; y < bottom; y++) { for (x = left; x < right; x++) { qreal dx = x - centerx; qreal dy = y - centery; double distance = sqrt(dx * dx + dy * dy); QRgb rgb; if (distance <= m_Radius + M_SQRT2) { // M_SQRT2 is the maximum "width" of a pixel. (If measured diagonally) // we must evaluate this area "outside" the radius also to reduce aliasing effects. double distort = distance / m_Radius; if (distort > 0.0 && distort < 1.0) { distort = punch_xform(distort, amplitude); } // Normalize the distance vector and find the length after distortion if (dx != 0 || dy != 0) { double mag = m_Radius/sqrt(dx * dx + dy * dy); mag *= distort; dx *= mag; dy *= mag; } double tx = centerx + dx; double ty = centery + dy; // Crop off any overflows. This happens since we are adding M_SQRT2 to the radius to evaluate a small area outside the radius circle. if (tx > img.width() || tx < 0) tx = x; if (ty > img.height() || ty < 0) ty = y; rgb = getSubpixel(img, tx, ty); outputImage->setPixel(x, y, rgb); } } } return true; }