void VideoCapture::getCameraImage(QImage & img, bool) { // Just generate something dynamic (rectangles) static QImage cimg; int x, y, w, h; if (cimg.isNull()) { x = y = 0; w = 640; h = 480; cimg.create(w, h, 32); } else { w = nextrand() % (cimg.width() - 10) + 10; h = nextrand() % (cimg.height() - 10) + 10; x = nextrand() % (cimg.width() - w); y = nextrand() % (cimg.height() - h); } QRgb c = qRgb(nextrand() % 255, nextrand() % 255, nextrand() % 255); for (int j = 0; j < h; j++) { QRgb *l = (QRgb *) cimg.scanLine(y + j) + x; for (int i = 0; i < w; i++) l[i] = c; } img = cimg; }
/*! Returns the QImage called \a name. You should avoid including any filename type extension (e.g. .png, .xpm). */ QImage Resource::loadImage( const QString &name) { #ifndef QT_NO_DEPTH_32 // have alpha-blended pixmaps static QImage last_enabled; static QString last_enabled_name; if ( name == last_enabled_name ) return last_enabled; #endif QImage img = load_image(name); #ifndef QT_NO_DEPTH_32 // have alpha-blended pixmaps if ( img.isNull() ) { // No file, try generating if ( name[name.length()-1]=='d' && name.right(9)=="_disabled" ) { last_enabled_name = name.left(name.length()-9); last_enabled = load_image(last_enabled_name); if ( last_enabled.isNull() ) { last_enabled_name = QString::null; } else { img.detach(); img.create( last_enabled.width(), last_enabled.height(), 32 ); for ( int y = 0; y < img.height(); y++ ) { for ( int x = 0; x < img.width(); x++ ) { QRgb p = last_enabled.pixel( x, y ); int a = qAlpha(p)/3; int g = qGray(qRed(p),qGreen(p),qBlue(p)); img.setPixel( x, y, qRgba(g,g,g,a) ); } } img.setAlphaBuffer( TRUE ); } } } #endif return img; }
void MyWidget:: loadPic(QImage pic,int frame) { ptrFrame *ptr; ptr = list->at(frame); long r,g,b,i,j,temp; pic.create(ptr->w,ptr->h,32,0,QImage::IgnoreEndian); for (i=0;i<w;i++) { for (j=0;j<h;j++) { r=0;g=0;b=0; //r1=0;g1=0;b1=0; temp = (long)ptr->r + i*ptr->w+j; memcpy(&r,(void*)temp,1); temp = (long)ptr->g + i*ptr->w+j; memcpy(&g,(void*)temp,1); temp = (long)ptr->b + i*ptr->w+j; memcpy(&b,(void*)temp,1); pic.setPixel(i,j,qRgb(r,g,b)); } } }
void VideoCapture::getCameraImage(QImage & img, bool copy) { if (fd == -1) { if (img.isNull()) { img.create(width, height, 32); } return; } // Start capturing the next frame (we alternate between 0 and 1). int frame = currentFrame; struct video_mmap capture; if (mbuf.frames > 1) { currentFrame = !currentFrame; capture.frame = currentFrame; capture.width = width; capture.height = height; capture.format = VIDEO_PALETTE_RGB32; ioctl(fd, VIDIOCMCAPTURE, &capture); } // Wait for the current frame to complete. ioctl(fd, VIDIOCSYNC, &frame); // Create an image that refers directly to the kernel's // frame buffer, to avoid having to copy the data. if (!copy) { img = QImage(frames + mbuf.offsets[frame], width, height, 32, 0, 0, QImage::IgnoreEndian); } else { img.create(width, height, 32); memcpy(img.bits(), frames + mbuf.offsets[frame], width * height * 4); } // Queue up another frame if the device only supports one at a time. if (mbuf.frames <= 1) { capture.frame = currentFrame; capture.width = width; capture.height = height; capture.format = VIDEO_PALETTE_RGB32; ioctl(fd, VIDIOCMCAPTURE, &capture); } }
QImage ShadowEngine::makeShadow(const QPixmap& textPixmap, const QColor &bgColor) { QImage result; // create a new image for for the shaddow int w = textPixmap.width(); int h = textPixmap.height(); // avoid calling these methods for every pixel int bgRed = bgColor.red(); int bgGreen = bgColor.green(); int bgBlue = bgColor.blue(); float alphaShadow; /* * This is the source pixmap */ QImage img = textPixmap.convertToImage().convertDepth(32); /* * Resize the image if necessary */ if ((result.width() != w) || (result.height() != h)) { result.create(w, h, 32); } result.fill(0); // all black result.setAlphaBuffer(true); for (int i = thickness_; i < w - thickness_; i++) { for (int j = thickness_; j < h - thickness_; j++) { alphaShadow = decay(img, i, j); alphaShadow = (alphaShadow > 180.0) ? 180.0 : alphaShadow; // update the shadow's i,j pixel. result.setPixel(i,j, qRgba(bgRed, bgGreen , bgBlue, (int) alphaShadow)); } } return result; }
bool ossimQtStaticTileImageCache::getTile(const ossimIpt& pt, QImage& image)const { bool result = false; ossimIpt tileOrigin = getTileOrigin(pt); ossimIrect cacheRect = getCacheRect(); if((image.width() != theTileSize.x)|| (image.height() != theTileSize.y)) { image.create(theTileSize.x, theTileSize.y, 32); } if(cacheRect.pointWithin(tileOrigin)) { ossimIpt delta(tileOrigin.x - cacheRect.ul().x, tileOrigin.y - cacheRect.ul().y); if((delta.x >= 0)&&(delta.y >= 0)) { image = theCache.copy(tileOrigin.x - cacheRect.ul().x, tileOrigin.y - cacheRect.ul().y, theTileSize.x, theTileSize.y); ossim_int32 idx = getTileIndex(pt); if(idx >=0) { result = theValidTileArray[idx]; } } else { image.fill(0); } } else { image.fill(0); } return result; }
void get_transformed_pixmap(QPixmap* originalPixmap, QPixmap* destPixmap, int src_x, int src_y, int src_width, int src_height, int transform, bool hasAlpha) { QImage originalImage = originalPixmap->convertToImage(); if ( hasAlpha ) { // Qt's handling of the alpha channel in the conversion // process between QPixmap and QImage is buggy. // If the pixmap's pixels only have alpha values 0x00 and 0xFF // then the resulting QImage from conversion will return // false for hasAlphaBuffer(). // so we set our own flag instead of depending on Qt to // maintain alpha information. originalImage.setAlphaBuffer(TRUE); } /*Qt gives us this useful API that returns a section of a QImage*/ QImage sectionImage = originalImage.copy(src_x, src_y, src_width, src_height); /* Skip this pixel-by-pixel copy if there is no transform */ if (0 != transform) { QImage sectionImage32bpp = sectionImage.convertDepth(32); QImage processedImage; int nXOriginSrc = 0; int nYOriginSrc = 0; int nWidth = src_width; int nHeight = src_height; /*scan length of the source image*/ int imageWidth = src_width; /*number of rows of the source image*/ int imageHeight = src_height; int imgLen; int srcImgLen; int t_width; int t_height; int srcX; int srcY; int xStart; int yStart; int xIncr; int yIncr; int destX; int destY; int yCounter; int xCounter; int srcIndex; int destIndex; uchar* srcBits = NULL; uchar* destBits = NULL; uchar* srcBitsPtr = NULL; uchar* destBitsPtr = NULL; /* set dimensions of image being created, depending on transform */ if (transform & TRANSFORM_INVERTED_AXES) { t_width = src_height; t_height = src_width; } else { t_width = src_width; t_height = src_height; } /* width * height * 4 gives us the size of a 32 bpp image */ imgLen = nWidth * nHeight << 2; srcImgLen = imageWidth * imageHeight << 2; /* Qt specific */ processedImage.create(t_width, t_height, 32); srcBits = sectionImage32bpp.bits(); destBits = processedImage.bits(); /* ----------- */ if (transform & TRANSFORM_Y_FLIP) { yStart = nHeight-1; yIncr = -1; } else { yStart = 0; yIncr = +1; } if (transform & TRANSFORM_X_FLIP) { xStart = nWidth-1; xIncr = -1; } else { xStart = 0; xIncr = +1; } srcBitsPtr = srcBits; destBitsPtr = destBits; /* increment srcX,Y regular. increment destX,Y according to transform. this makes handling of mask and alpha values easier */ for (srcY = nYOriginSrc, destY = yStart, yCounter = 0; yCounter < nHeight; srcY++, destY+=yIncr, yCounter++) { /* in the current implementation we have source bitmap dimension as the width of the image and the height of the region destination bitmap is of the dimensions of the region */ for (srcX = nXOriginSrc, destX = xStart, xCounter = 0; xCounter < nWidth; srcX++, destX+=xIncr, xCounter++) { if ( transform & TRANSFORM_INVERTED_AXES ) { destIndex = ( ( (destX) * t_width) + (destY) ); } else { destIndex = ( ( (destY) * t_width) + (destX) ); } destBitsPtr = destBits + (destIndex * 4) ; srcIndex = (((srcY) * imageWidth) + (srcX)); srcBitsPtr = srcBits + (srcIndex * 4); /* copy the pixel that is pointed to */ *((int *)destBitsPtr) = *((int *)srcBitsPtr); } /*for x*/ } /* for y */ /* ---------- */ if(TRUE == sectionImage.hasAlphaBuffer() ) { processedImage.setAlphaBuffer(TRUE); } else { processedImage.setAlphaBuffer(FALSE); } destPixmap->convertFromImage(processedImage); } else { /* No transform, just copy the image sub-section */ destPixmap->convertFromImage(sectionImage); } }
QImage QPatchedPixmap::convertToImage() const { QImage image; if ( isNull() ) { #if defined(CHECK_NULL) qWarning( "QPixmap::convertToImage: Cannot convert a null pixmap" ); #if defined(NASTY) abort(); #endif #endif return image; } int w = qt_screen->mapToDevice( QSize(width(), height()) ).width(); int h = qt_screen->mapToDevice( QSize(width(), height()) ).height(); int d = depth(); bool mono = d == 1; if( d == 15 || d == 16 ) { #ifndef QT_NO_QWS_DEPTH_16 d = 32; // Convert here because we may not have a 32bpp gfx image.create( w,h,d,0, QImage::IgnoreEndian ); for ( int y=h-1; y>=0; y-- ) { // for each scan line... register uint *p = (uint *)image.scanLine(y); ushort *s = (ushort*)scanLine(y); for (int i=w;i>0;i--) *p++ = qt_conv16ToRgb( *s++ ); } const QBitmap *mymask = mask(); if(mymask!=NULL) { QImage maskedimage(width(), height(), 32); maskedimage.fill(0); QGfx * mygfx=maskedimage.graphicsContext(); if(mygfx) { mygfx->setAlphaSource(mymask->scanLine(0), mymask->bytesPerLine()); mygfx->setSource(&image); mygfx->setAlphaType(QGfx::LittleEndianMask); mygfx->setLineStep(maskedimage.bytesPerLine()); mygfx->blt(0,0,width(),height(),0,0); } else { qWarning("No image gfx for convertToImage!"); } delete mygfx; maskedimage.setAlphaBuffer(TRUE); image.reset(); image=maskedimage; } #endif } else { // We can only create little-endian pixmaps if ( d == 4 ) image.create(w,h,8,0, QImage::IgnoreEndian ); else if ( d == 24 ) image.create(w,h,32,0, QImage::IgnoreEndian ); else image.create(w,h,d,0, mono ? QImage::LittleEndian : QImage::IgnoreEndian );//####### endianness QGfx * mygfx=image.graphicsContext(); const QBitmap *mymask = mask(); if(mygfx) { QGfx::AlphaType at = QGfx::IgnoreAlpha; if(mymask!=NULL) { at = QGfx::LittleEndianMask; mygfx->setAlphaSource(mymask->scanLine(0), mymask->bytesPerLine()); image.fill(0); } mygfx->setSource(this); mygfx->setAlphaType(at); mygfx->setLineStep(image.bytesPerLine()); mygfx->blt(0,0,width(),height(),0,0); } else { qWarning("No image gfx for convertToImage!"); } delete mygfx; image.setAlphaBuffer(mymask==NULL?data->hasAlpha:TRUE); } if ( mono ) { // bitmap image.setNumColors( 2 ); image.setColor( 0, qRgb(255,255,255) ); image.setColor( 1, qRgb(0,0,0) ); } else if ( d <= 8 ) { image.setNumColors( numCols() ); for ( int i = 0; i < numCols(); i++ ) image.setColor( i, clut()[i] ); } image = qt_screen->mapFromDevice( image ); return image; }
static void read_jpeg_image(QImageIO* iio) { QImage image; struct jpeg_decompress_struct cinfo; struct my_jpeg_source_mgr *iod_src = new my_jpeg_source_mgr(iio); struct my_error_mgr jerr; jpeg_create_decompress(&cinfo); cinfo.src = iod_src; cinfo.err = jpeg_std_error(&jerr); jerr.error_exit = my_error_exit; if (!setjmp(jerr.setjmp_buffer)) { #if defined(_OS_UNIXWARE7_) (void) jpeg_read_header(&cinfo, B_TRUE); #else (void) jpeg_read_header(&cinfo, TRUE); #endif (void) jpeg_start_decompress(&cinfo); if ( cinfo.output_components == 3 || cinfo.output_components == 4) { image.create( cinfo.output_width, cinfo.output_height, 32 ); } else if ( cinfo.output_components == 1 ) { image.create( cinfo.output_width, cinfo.output_height, 8, 256 ); for (int i=0; i<256; i++) image.setColor(i, qRgb(i,i,i)); } else { // Unsupported format } if (!image.isNull()) { uchar** lines = image.jumpTable(); while (cinfo.output_scanline < cinfo.output_height) (void) jpeg_read_scanlines(&cinfo, lines + cinfo.output_scanline, cinfo.output_height); (void) jpeg_finish_decompress(&cinfo); } if ( cinfo.output_components == 3 ) { // Expand 24->32 bpp. for (uint j=0; j<cinfo.output_height; j++) { uchar *in = image.scanLine(j) + cinfo.output_width * 3; QRgb *out = (QRgb*)image.scanLine(j); for (uint i=cinfo.output_width; i--; ) { in-=3; out[i] = qRgb(in[0], in[1], in[2]); } } } iio->setImage(image); iio->setStatus(0); } jpeg_destroy_decompress(&cinfo); delete iod_src; }
void MyWidget::open_file() { //pops up the dialog QString s = QFileDialog::getOpenFileName("./","Mpeg files (*.mpg *.mpeg)",this,"open file dialog","Choose a file" ); //was a file selected if (s != NULL) { status->setText("Loading..."); char *inp = c2c(s); sprintf(in,"%s",inp); //remove previous info cleanup(); framecount = 1; //load the video Mpeg *loader = new Mpeg(list,in); loader->start(); loader->load(); //qApp->notify(this,&paintEvent()); char count[120]; while (loader->Processing()) { qApp->processEvents(); sprintf(count,"Loading... %d",framecount); status->setText(count); } ptrFrame* first = list->at(0); w = first->w; h = first->h; //adapt the gui img.create(w,h,32,0,QImage::IgnoreEndian); setFixedSize(w+160,h+50); move(0,0); b_play->setGeometry(w+10, 10, 75, 30); b_open->setGeometry(w+10, 50, 100, 30); b_save->setGeometry(w+10, 90, 125, 30); l_thres->setGeometry(w+10,130,100,30); s_thres->setGeometry(w+10,150,100,30); l_skin->setGeometry(w+10,170,100,30); s_skin->setGeometry(w+10,190,100,30); status->setGeometry(w+10,h-10,100,30); s_frame->setGeometry(10,h+10,w-10,30); s_frame->setMinValue(1); s_frame->setMaxValue(framecount-1); s_frame->setTickInterval(1); s_frame->setValue(1); l_algo->setGeometry(w+10,210,140,30); reload = true; status->setText("File Loaded"); } }
//===========================================================================// // void ImageOperations::subsample(QImage image, QImage* ret, // // bool resize = true) // //===========================================================================// // INPUTS: image: QImage to be sub-sampled by factor 2. // // resize: flag to indicate whether the size of the image should // // be decreased by factor 2 as part of sub-sampling or // // pixel values duplicated to maintain the original size. // // OUTPUTS: QImage: Sub-sampled image of either equal or half the original // // size. // // OPERATION: The average of a pixel and its right, lower and lower-right // // neighbours is computed and used as value for the pixel(s) in // // the sub-sampled image. // //===========================================================================// void ImageOperations::subsample( const QImage& qimg, QImage& qimgRet, bool bResize) { int subs_w, subs_h; if( bResize ) { // Compute the subs_w = int(qimg.width()/2); // size of the subs_h = int(qimg.height()/2); // output image. } else { subs_w = qimg.width(); subs_h = qimg.height(); } qimgRet.create(subs_w, subs_h, 32); int col_r, col_g, col_b; for (int y=0; y<qimg.height(); y+=2) { // Traverse image for (int x=0; x<qimg.width(); x+=2) { // and compute col_r = qRed(qimg.pixel(x,y)); // average of col_g = qGreen(qimg.pixel(x,y)); // the pixel and col_b = qBlue(qimg.pixel(x,y)); // its neighbours int sum = 1; // to the right, if (x < qimg.width()-1) { // the bottom as col_r += qRed (qimg.pixel(x+1, y)); // well as the col_g += qGreen(qimg.pixel(x+1,y)); // bottom-right. col_b += qBlue (qimg.pixel(x+1,y)); // Consider image sum++; // borders. } if (y < qimg.height()-1) { col_r += qRed (qimg.pixel(x,y+1)); col_g += qGreen(qimg.pixel(x,y+1)); col_b += qBlue (qimg.pixel(x,y+1)); sum++; } if ((x<qimg.width()-1) && (y<qimg.height()-1)) { col_r += qRed (qimg.pixel(x+1, y+1)); col_g += qGreen(qimg.pixel(x+1, y+1)); col_b += qBlue (qimg.pixel(x+1, y+1)); sum++; } col_r /= sum; col_g /= sum; col_b /= sum; if (bResize) { // Set the output if ((int(x/2) < subs_w) && (int(y/2) < subs_h)) { // image's pixels qimgRet.setPixel(int(x/2), int(y/2), // to the computed qRgb(col_r, col_g, col_b)); // values. } } else { // In the case that qimgRet.setPixel(x, y, qRgb(col_r, col_g, // the image is col_b)); // not being re- if (x < qimg.width()-1) { // sized, four qimgRet.setPixel(x+1, y, qRgb(col_r, col_g, // pixels hold col_b)); // the same value } // (with the if (y < qimg.height()-1) { // exception of qimgRet.setPixel(x, y+1, qRgb(col_r, col_g, // the borders). col_b)); } if ((x<qimg.width()-1) && (y<qimg.height()-1)) { qimgRet.setPixel(x+1, y+1, qRgb(col_r, col_g, col_b)); } } } } }
QImage KShadowEngine::makeShadow(const QPixmap &textPixmap, const QColor &bgColor) { QImage result; // create a new image for for the shaddow int w = textPixmap.width(); int h = textPixmap.height(); // avoid calling these methods for every pixel int bgRed = bgColor.red(); int bgGreen = bgColor.green(); int bgBlue = bgColor.blue(); int thick = m_shadowSettings->thickness() >> 1; double alphaShadow; /* * This is the source pixmap */ QImage img = textPixmap.convertToImage().convertDepth(32); /* * Resize the image if necessary */ if((result.width() != w) || (result.height() != h)) { result.create(w, h, 32); } result.fill(0); // all black result.setAlphaBuffer(true); for(int i = thick; i < w - thick; i++) { for(int j = thick; j < h - thick; j++) { switch(m_shadowSettings->algorithm()) { case KShadowSettings::DoubleLinearDecay: alphaShadow = doubleLinearDecay(img, i, j); break; case KShadowSettings::RadialDecay: alphaShadow = radialDecay(img, i, j); break; case KShadowSettings::NoDecay: alphaShadow = noDecay(img, i, j); break; case KShadowSettings::DefaultDecay: default: alphaShadow = defaultDecay(img, i, j); } alphaShadow = (alphaShadow > m_shadowSettings->maxOpacity()) ? m_shadowSettings->maxOpacity() : alphaShadow; // update the shadow's i,j pixel. result.setPixel(i, j, qRgba(bgRed, bgGreen, bgBlue, (int)alphaShadow)); } } return result; }
void CDisplayVectorDialog::updateView( TImage* imagePtr, TField2D* fieldPtr ) throw() { BENCHSTART; FBEGIN; #ifdef USE_RENDERING if ( !fieldPtr || !imagePtr ) return; boost::shared_ptr<TImage> myImg ( imagePtr ); CVTKAdapter myAdapter( myImg ); vtkImageData* myImage = myAdapter.convertToExternal(); vtkStructuredGrid* myField = vtkStructuredGrid::New(); int dims[3]; dims[0] = imagePtr->getExtent(0); dims[1] = imagePtr->getExtent(1); dims[2] = 1; myField->SetDimensions(dims); vtkDoubleArray *vectors = vtkDoubleArray::New(); vtkPoints *points = vtkPoints::New(); points->Allocate(dims[0]*dims[1]); vectors->SetNumberOfComponents(3); vectors->SetNumberOfTuples(dims[0]*dims[1]); size_t count = 0; double x[3] = {1.0,1.0,0.0}; uint dx = 0; uint dy = 0; for ( TField2D::iterator it = fieldPtr->begin(); it != fieldPtr->end(); ++it, ++count ) { x[0] = static_cast<double>(dx); ++dx; if ( dx > fieldPtr->getExtent(0) ) {dy++;dx=0;} x[1] = dy; /* if ( (*it)[0] > 0.0 ) cerr << count << ": Setting " << x[0] << ";" << x[1] << " to " << (*it) << endl;*/ double v[3]; v[0] = (*it)[0]; v[1] = (*it)[1]; v[2] = 0.0; vectors->InsertTuple(count,v); points->InsertPoint(count,x); } myField->SetPoints(points); points->Delete(); myField->GetPointData()->SetVectors(vectors); vectors->Delete(); myImage->SetScalarTypeToShort(); // displayPtr->testImageDataRange( imagePtr->getMinimum(), imagePtr->getMaximum() ); // displayPtr->setImage( myImage ); displayPtr->testOverlayDataRange( 0.0, 1.0 ); displayPtr->setOverlay( myField ); if ( width != imagePtr->getExtent(0) || height != imagePtr->getExtent(1) ) { width = imagePtr->getExtent(0); height = imagePtr->getExtent(1); displayPtr->getImage()->SetDisplayExtent(0,imagePtr->getExtent(0)-1,0,imagePtr->getExtent(1)-1,0,0); displayPtr->getRenderer()->GetActiveCamera()->SetPosition( static_cast<double>( imagePtr->getExtent(0) ) / 2.0, static_cast<double>( imagePtr->getExtent(1) ) / 2.0, 0.5 * static_cast<double>( std::max( imagePtr->getExtent(0), imagePtr->getExtent(1) ) ) ); displayPtr->getRenderer()->GetActiveCamera()->SetFocalPoint( static_cast<double>( imagePtr->getExtent(0) ) / 2.0, static_cast<double>( imagePtr->getExtent(1) ) / 2.0 , 0.0 ); displayPtr->getRenderer()->GetActiveCamera()->ComputeViewPlaneNormal(); displayPtr->getRenderer()->GetActiveCamera()->SetViewUp(0.0,-1.0,0.0); displayPtr->getRenderer()->GetActiveCamera()->OrthogonalizeViewUp(); displayPtr->getRenderer()->ResetCamera(); } displayPtr->update(); myField->Delete(); #else // Draw the zoomed image ushort w = fieldPtr->getExtent(0); ushort h = fieldPtr->getExtent(1); bufferPixmap.resize( w * 7, h * 7 ); QPainter p( &bufferPixmap ); // Initialize painter to display the vector field if ( imagePtr && imagePtr->getExtent(0) > 0 && imagePtr->getExtent(1) > 0 ) { p.scale( 7, 7 ); QImage tmpImage; tmpImage.create( imagePtr->getExtent(0), imagePtr->getExtent(1), 32, 256*256*256 ); float intRange = 1.0 / static_cast<float> ( imagePtr->getDataRange().getMaximum() - imagePtr->getDataRange().getMinimum() + 1 ) * 256.0; for ( uint x = 0; x < imagePtr->getExtent(0); x++ ) for ( uint y = 0; y < imagePtr->getExtent(1); y++ ) { if ( imagePtr->getDataDimension() == 1 ) { ushort value = static_cast<int>( static_cast<float> ( (*imagePtr)(x,y) - imagePtr->getDataRange().getMinimum() ) * intRange ); if ( value < 256 ) tmpImage.setPixel( x, imagePtr->getExtent(1) - 1 - y, qRgb( value, value, value ) ); #ifdef DEBUG else { alog << LWARN << "\n **** Pixel value " << value << " too high (" << imagePtr->getDataRange().getMaximum() << ") ****" << endl; } #endif } else if ( imagePtr->getDataDimension() == 3 ) tmpImage.setPixel( x, y, qRgb( (*imagePtr)(x,imagePtr->getExtent(1) - 1 - y,0), (*imagePtr)(x,imagePtr->getExtent(1) - 1 - y,1), (*imagePtr)(x,imagePtr->getExtent(1) - 1 - y,2) ) ); else if ( imagePtr->getDataDimension() == 4 ) tmpImage.setPixel( x, y, qRgba( (*imagePtr)(x,imagePtr->getExtent(1) - 1 - y,0), (*imagePtr)(x,imagePtr->getExtent(1) - 1 - y,1), (*imagePtr)(x,imagePtr->getExtent(1) - 1 - y,2), (*imagePtr)(x,imagePtr->getExtent(1) - 1 - y,3) ) ); } p.drawImage( 0, 0, tmpImage ); p.scale( 1.0/7.0, 1.0/7.0); } else // Just clear the window { p.eraseRect( 0, 0, fieldPtr->getExtent(0) * 7, fieldPtr->getExtent(1) * 7 ); } // Draw the vector field for ( uint x = 0; x < fieldPtr->getExtent(0); x++ ) for ( uint y = 0; y < fieldPtr->getExtent(1); y++ ) { double val = norm( (*fieldPtr)(x,imagePtr->getExtent(1) - 1 - y) ); if ( val > 0.0 ) { int v = static_cast<int>( val * 2550.0 ); int v2 = 0; int v3 = 0; if ( v > 255 ) { v2 = v - 255; if ( v2 > 255 ) v3 = v2 - 255; v = std::min( v, 255 ); v2 = std::min( v2, 255 ); v3 = std::min( v3, 255 ); } if ( v > 240 && v2 > 240 && v3 > 240 ) // Very large vectors are shown in green p.setPen( Qt::green ); else if ( v < 10 && v2 < 10 && v3 < 10 ) // Very small vectors are shown in blue p.setPen( Qt::blue ); else p.setPen( QColor( v, v2, v3 ) ); ushort ycoord = imagePtr->getExtent(1) - 1 - y; TVector2D draw ( (*fieldPtr)(x,ycoord) ); draw *= ( 3.0 / val ); p.drawLine( ( x * 7 ) + 3, ( y * 7 ) + 3, ( x * 7 ) + 3 + static_cast<int>( draw[0] ), ( y * 7 ) + 3 - static_cast<int>( draw[1] ) ); p.setPen( Qt::blue ); p.drawPoint( ( x * 7 ) + 3, ( y * 7 ) + 3 ); } } displayPtr->setImage( bufferPixmap.convertToImage() ); p.flush(); p.end(); #endif BENCHSTOP; }
/*! This function decodes some data into image changes. Returns the number of bytes consumed. */ int QGIFFormat::decode(QImage& img, QImageConsumer* consumer, const uchar* buffer, int length) { // We are required to state that // "The Graphics Interchange Format(c) is the Copyright property of // CompuServe Incorporated. GIF(sm) is a Service Mark property of // CompuServe Incorporated." #define LM(l, m) (((m)<<8)|l) digress = FALSE; int initial = length; QRgb** line = (QRgb **)img.jumpTable(); while (!digress && length) { length--; unsigned char ch=*buffer++; switch (state) { case Header: hold[count++]=ch; if (count==6) { // Header gif89=(hold[3]!='8' || hold[4]!='7'); state=LogicalScreenDescriptor; count=0; } break; case LogicalScreenDescriptor: hold[count++]=ch; if (count==7) { // Logical Screen Descriptor swidth=LM(hold[0], hold[1]); sheight=LM(hold[2], hold[3]); gcmap=!!(hold[4]&0x80); //UNUSED: bpchan=(((hold[4]&0x70)>>3)+1); //UNUSED: gcmsortflag=!!(hold[4]&0x08); gncols=2<<(hold[4]&0x7); bgcol=(gcmap) ? hold[5] : -1; //aspect=hold[6] ? double(hold[6]+15)/64.0 : 1.0; trans_index = -1; count=0; ncols=gncols; if (gcmap) { ccount=0; state=GlobalColorMap; globalcmap = new QRgb[gncols+1]; // +1 for trans_index globalcmap[gncols] = Q_TRANSPARENT; } else { state=Introducer; } } break; case GlobalColorMap: case LocalColorMap: hold[count++]=ch; if (count==3) { QRgb rgb = qRgb(hold[0], hold[1], hold[2]); if ( state == LocalColorMap ) { if ( ccount < lncols ) localcmap[ccount] = rgb; } else { globalcmap[ccount] = rgb; } if (++ccount >= ncols) { if ( state == LocalColorMap ) state=TableImageLZWSize; else state=Introducer; } count=0; } break; case Introducer: hold[count++]=ch; switch (ch) { case ',': state=ImageDescriptor; break; case '!': state=ExtensionLabel; break; case ';': if (consumer) { if ( out_of_bounds ) // flush anything that survived consumer->changed(QRect(0,0,swidth,sheight)); consumer->end(); } state=Done; break; default: digress=TRUE; // Unexpected Introducer - ignore block state=Error; } break; case ImageDescriptor: hold[count++]=ch; if (count==10) { int newleft=LM(hold[1], hold[2]); int newtop=LM(hold[3], hold[4]); int width=LM(hold[5], hold[6]); int height=LM(hold[7], hold[8]); // disbelieve ridiculous logical screen sizes, // unless the image frames are also large. if ( swidth/10 > QMAX(width,200) ) swidth = -1; if ( sheight/10 > QMAX(height,200) ) sheight = -1; if ( swidth <= 0 ) swidth = newleft + width; if ( sheight <= 0 ) sheight = newtop + height; if (img.isNull()) { img.create(swidth, sheight, 32); memset( img.bits(), 0, img.numBytes() ); if (consumer) consumer->setSize(swidth, sheight); } img.setAlphaBuffer(trans_index >= 0); line = (QRgb **)img.jumpTable(); disposePrevious( img, consumer ); disposed = FALSE; left = newleft; top = newtop; // Sanity check frame size - must fit on "screen". if (left >= swidth) left=QMAX(0, swidth-1); if (top >= sheight) top=QMAX(0, sheight-1); if (left+width >= swidth) { if ( width <= swidth ) left=swidth-width; else width=swidth-left; } if (top+height >= sheight) { if ( height <= sheight ) top=sheight-height; else height=sheight-top; } right=QMAX( 0, left+width-1); bottom=QMAX(0, top+height-1); lcmap=!!(hold[9]&0x80); interlace=!!(hold[9]&0x40); //bool lcmsortflag=!!(hold[9]&0x20); lncols=lcmap ? (2<<(hold[9]&0x7)) : 0; if (lncols) { if ( localcmap ) delete [] localcmap; localcmap = new QRgb[lncols+1]; localcmap[lncols] = Q_TRANSPARENT; ncols = lncols; } else { ncols = gncols; } frame++; if ( frame == 0 ) { if ( left || top || width!=swidth || height!=sheight ) { // Not full-size image - erase with bg or transparent if ( trans_index >= 0 ) { fillRect(img, 0, 0, swidth, sheight, color(trans_index)); if (consumer) consumer->changed(QRect(0,0,swidth,sheight)); } else if ( bgcol>=0 ) { fillRect(img, 0, 0, swidth, sheight, color(bgcol)); if (consumer) consumer->changed(QRect(0,0,swidth,sheight)); } } } if ( disposal == RestoreImage ) { int l = QMIN(swidth-1,left); int r = QMIN(swidth-1,right); int t = QMIN(sheight-1,top); int b = QMIN(sheight-1,bottom); int w = r-l+1; int h = b-t+1; if (backingstore.width() < w || backingstore.height() < h) { // We just use the backing store as a byte array backingstore.create( QMAX(backingstore.width(), w), QMAX(backingstore.height(), h), 32); memset( img.bits(), 0, img.numBytes() ); } for (int ln=0; ln<h; ln++) { memcpy(backingstore.scanLine(ln), line[t+ln]+l, w*sizeof(QRgb)); } } count=0; if (lcmap) { ccount=0; state=LocalColorMap; } else { state=TableImageLZWSize; } x = left; y = top; accum = 0; bitcount = 0; sp = stack; needfirst = FALSE; out_of_bounds = FALSE; } break; case TableImageLZWSize: { lzwsize=ch; if ( lzwsize > max_lzw_bits ) { state=Error; } else { code_size=lzwsize+1; clear_code=1<<lzwsize; end_code=clear_code+1; max_code_size=2*clear_code; max_code=clear_code+2; int i; for (i=0; i<clear_code && i<(1<<max_lzw_bits); i++) { table[0][i]=0; table[1][i]=i; } for (i=clear_code; i<(1<<max_lzw_bits); i++) { table[0][i]=table[1][i]=0; } state=ImageDataBlockSize; } count=0; break; } case ImageDataBlockSize: expectcount=ch; if (expectcount) { state=ImageDataBlock; } else { if (consumer) { consumer->frameDone(); digress = TRUE; } state=Introducer; } break; case ImageDataBlock: count++; accum|=(ch<<bitcount); bitcount+=8; while (bitcount>=code_size && state==ImageDataBlock) { int code=accum&((1<<code_size)-1); bitcount-=code_size; accum>>=code_size; if (code==clear_code) { if (!needfirst) { int i; code_size=lzwsize+1; max_code_size=2*clear_code; max_code=clear_code+2; for (i=0; i<clear_code; i++) { table[0][i]=0; table[1][i]=i; } for (i=clear_code; i<(1<<max_lzw_bits); i++) { table[0][i]=table[1][i]=0; } } needfirst=TRUE; } else if (code==end_code) { bitcount = -32768; // Left the block end arrive } else { if (needfirst) { firstcode=oldcode=code; if (!out_of_bounds && line && firstcode!=trans_index) line[y][x] = color(firstcode); x++; if (x>=swidth) out_of_bounds = TRUE; needfirst=FALSE; if (x>right) { x=left; if (out_of_bounds) out_of_bounds = left>=swidth || y>=sheight; nextY(img,consumer); } } else { incode=code; if (code>=max_code) { *sp++=firstcode; code=oldcode; } while (code>=clear_code) { *sp++=table[1][code]; if (code==table[0][code]) { state=Error; break; } if (sp-stack>=(1<<(max_lzw_bits))*2) { state=Error; break; } code=table[0][code]; } *sp++=firstcode=table[1][code]; code=max_code; if (code<(1<<max_lzw_bits)) { table[0][code]=oldcode; table[1][code]=firstcode; max_code++; if ((max_code>=max_code_size) && (max_code_size<(1<<max_lzw_bits))) { max_code_size*=2; code_size++; } } oldcode=incode; while (sp>stack) { --sp; if (!out_of_bounds && *sp!=trans_index) line[y][x] = color(*sp); x++; if (x>=swidth) out_of_bounds = TRUE; if (x>right) { x=left; if (out_of_bounds) out_of_bounds = left>=swidth || y>=sheight; nextY(img,consumer); } } } } } if (count==expectcount) { count=0; state=ImageDataBlockSize; } break; case ExtensionLabel: switch (ch) { case 0xf9: state=GraphicControlExtension; break; case 0xff: state=ApplicationExtension; break; #if 0 case 0xfe: state=CommentExtension; break; case 0x01: break; #endif default: state=SkipBlockSize; } count=0; break; case ApplicationExtension: if (count<11) hold[count]=ch; count++; if (count==hold[0]+1) { if (qstrncmp((char*)(hold+1), "NETSCAPE", 8)==0) { // Looping extension state=NetscapeExtensionBlockSize; } else { state=SkipBlockSize; } count=0; } break; case NetscapeExtensionBlockSize: expectcount=ch; count=0; if (expectcount) state=NetscapeExtensionBlock; else state=Introducer; break; case NetscapeExtensionBlock: if (count<3) hold[count]=ch; count++; if (count==expectcount) { int loop = hold[0]+hold[1]*256; if (consumer) consumer->setLooping(loop); state=SkipBlockSize; // Ignore further blocks } break; case GraphicControlExtension: if (count<5) hold[count]=ch; count++; if (count==hold[0]+1) { disposePrevious( img, consumer ); disposal=Disposal((hold[1]>>2)&0x7); //UNUSED: waitforuser=!!((hold[1]>>1)&0x1); int delay=count>3 ? LM(hold[2], hold[3]) : 1; // IE and mozilla use a minimum delay of 10. With the minumum delay of 10 // we are compatible to them and avoid huge loads on the app and xserver. if ( delay < 10 ) delay = 10; bool havetrans=hold[1]&0x1; trans_index = havetrans ? hold[4] : -1; if (consumer) consumer->setFramePeriod(delay*10); count=0; state=SkipBlockSize; } break; case SkipBlockSize: expectcount=ch; count=0; if (expectcount) state=SkipBlock; else state=Introducer; break; case SkipBlock: count++; if (count==expectcount) state=SkipBlockSize; break; case Done: digress=TRUE; /* Netscape ignores the junk, so we do too. length++; // Unget state=Error; // More calls to this is an error */ break; case Error: return -1; // Called again after done. } }
static void setup_qt( QImage& image, png_structp png_ptr, png_infop info_ptr, float screen_gamma=0.0 ) { if ( screen_gamma != 0.0 && png_get_valid(png_ptr, info_ptr, PNG_INFO_gAMA) ) { double file_gamma; png_get_gAMA(png_ptr, info_ptr, &file_gamma); png_set_gamma( png_ptr, screen_gamma, file_gamma ); } png_uint_32 width; png_uint_32 height; int bit_depth; int color_type; png_get_IHDR(png_ptr, info_ptr, &width, &height, &bit_depth, &color_type, 0, 0, 0); if ( color_type == PNG_COLOR_TYPE_GRAY ) { // Black & White or 8-bit grayscale if ( bit_depth == 1 && info_ptr->channels == 1 ) { png_set_invert_mono( png_ptr ); png_read_update_info( png_ptr, info_ptr ); if (!image.create( width, height, 1, 2, QImage::BigEndian )) return; image.setColor( 1, qRgb(0,0,0) ); image.setColor( 0, qRgb(255,255,255) ); } else if (bit_depth == 16 && png_get_valid(png_ptr, info_ptr, PNG_INFO_tRNS)) { png_set_expand(png_ptr); png_set_strip_16(png_ptr); png_set_gray_to_rgb(png_ptr); if (!image.create(width, height, 32)) return; image.setAlphaBuffer(TRUE); if (QImage::systemByteOrder() == QImage::BigEndian) png_set_swap_alpha(png_ptr); png_read_update_info(png_ptr, info_ptr); } else { if ( bit_depth == 16 ) png_set_strip_16(png_ptr); else if ( bit_depth < 8 ) png_set_packing(png_ptr); int ncols = bit_depth < 8 ? 1 << bit_depth : 256; png_read_update_info(png_ptr, info_ptr); if (!image.create(width, height, 8, ncols)) return; for (int i=0; i<ncols; i++) { int c = i*255/(ncols-1); image.setColor( i, qRgba(c,c,c,0xff) ); } if ( png_get_valid(png_ptr, info_ptr, PNG_INFO_tRNS) ) { const int g = info_ptr->trans_values.gray; if (g < ncols) { image.setAlphaBuffer(TRUE); image.setColor(g, image.color(g) & RGB_MASK); } } } } else if ( color_type == PNG_COLOR_TYPE_PALETTE && png_get_valid(png_ptr, info_ptr, PNG_INFO_PLTE) && info_ptr->num_palette <= 256 ) { // 1-bit and 8-bit color if ( bit_depth != 1 ) png_set_packing( png_ptr ); png_read_update_info( png_ptr, info_ptr ); png_get_IHDR(png_ptr, info_ptr, &width, &height, &bit_depth, &color_type, 0, 0, 0); if (!image.create(width, height, bit_depth, info_ptr->num_palette, QImage::BigEndian)) return; int i = 0; if ( png_get_valid(png_ptr, info_ptr, PNG_INFO_tRNS) ) { image.setAlphaBuffer( TRUE ); while ( i < info_ptr->num_trans ) { image.setColor(i, qRgba( info_ptr->palette[i].red, info_ptr->palette[i].green, info_ptr->palette[i].blue, info_ptr->trans[i] ) ); i++; } } while ( i < info_ptr->num_palette ) { image.setColor(i, qRgba( info_ptr->palette[i].red, info_ptr->palette[i].green, info_ptr->palette[i].blue, 0xff ) ); i++; } } else { // 32-bit if ( bit_depth == 16 ) png_set_strip_16(png_ptr); png_set_expand(png_ptr); if ( color_type == PNG_COLOR_TYPE_GRAY_ALPHA ) png_set_gray_to_rgb(png_ptr); if (!image.create(width, height, 32)) return; // Only add filler if no alpha, or we can get 5 channel data. if (!(color_type & PNG_COLOR_MASK_ALPHA) && !png_get_valid(png_ptr, info_ptr, PNG_INFO_tRNS)) { png_set_filler(png_ptr, 0xff, QImage::systemByteOrder() == QImage::BigEndian ? PNG_FILLER_BEFORE : PNG_FILLER_AFTER); // We want 4 bytes, but it isn't an alpha channel } else { image.setAlphaBuffer(TRUE); } if ( QImage::systemByteOrder() == QImage::BigEndian ) { png_set_swap_alpha(png_ptr); } png_read_update_info(png_ptr, info_ptr); } // Qt==ARGB==Big(ARGB)==Little(BGRA) if ( QImage::systemByteOrder() == QImage::LittleEndian ) { png_set_bgr(png_ptr); } }
QImage splash_read_jpeg_image(FILE* f) { QImage image; struct jpeg_decompress_struct cinfo; struct my_jpeg_source_mgr *iod_src = new my_jpeg_source_mgr(f); struct my_error_mgr jerr; jpeg_create_decompress(&cinfo); cinfo.src = iod_src; cinfo.err = jpeg_std_error(&jerr); jerr.error_exit = my_error_exit; if (!setjmp(jerr.setjmp_buffer)) { #if defined(Q_OS_UNIXWARE) (void) jpeg_read_header(&cinfo, B_TRUE); #else (void) jpeg_read_header(&cinfo, TRUE); #endif (void) jpeg_start_decompress(&cinfo); { bool created = FALSE; if ( cinfo.output_components == 3 || cinfo.output_components == 4) { created = image.create( cinfo.output_width, cinfo.output_height, 32 ); } else if ( cinfo.output_components == 1 ) { created = image.create( cinfo.output_width, cinfo.output_height, 8, 256 ); for (int i=0; i<256; i++) image.setColor(i, qRgb(i,i,i)); } else { // Unsupported format } if (!created) image = QImage(); if (!image.isNull()) { uchar** lines = image.jumpTable(); while (cinfo.output_scanline < cinfo.output_height) (void) jpeg_read_scanlines(&cinfo, lines + cinfo.output_scanline, cinfo.output_height); (void) jpeg_finish_decompress(&cinfo); if ( cinfo.output_components == 3 ) { // Expand 24->32 bpp. for (uint j=0; j<cinfo.output_height; j++) { uchar *in = image.scanLine(j) + cinfo.output_width * 3; QRgb *out = (QRgb*)image.scanLine(j); for (uint i=cinfo.output_width; i--; ) { in-=3; out[i] = qRgb(in[0], in[1], in[2]); } } } } } if (!image.isNull()) { if ( cinfo.density_unit == 1 ) { image.setDotsPerMeterX( int(100. * cinfo.X_density / 2.54) ); image.setDotsPerMeterY( int(100. * cinfo.Y_density / 2.54) ); } else if ( cinfo.density_unit == 2 ) { image.setDotsPerMeterX( int(100. * cinfo.X_density) ); image.setDotsPerMeterY( int(100. * cinfo.Y_density) ); } } } jpeg_destroy_decompress(&cinfo); delete iod_src; return image; }
void ImageOperations::featureScale( const OpGrayImage& img, OpGrayImage& imgRet ) { QImage qimg = img.getQtImage(); QImage qimgRet; qimgRet.create(qimg.width()/2, qimg.height()/2, 32); for( int y=0; y<qimg.height(); y+=2 ) { for( int x=0; x<qimg.width(); x+=2 ) { if( (int(x/2) < qimgRet.width()) && (int(y/2) < qimgRet.height())) { if( (qRed (qimg.pixel(x,y)) == 0) && // If pixel in (qGreen(qimg.pixel(x,y)) == 0) && // image contains feature (qBlue (qimg.pixel(x,y)) == 0)) { // set in scaled qimgRet.setPixel(int(x/2), int(y/2), qRgb(0,0,0)); } else { if ((x < qimg.width()-1) && // Else, if pixel ((qRed (qimg.pixel(x+1,y)) == 0) && // to the right (qGreen(qimg.pixel(x+1,y)) == 0) && // contains feature (qBlue (qimg.pixel(x+1,y)) == 0))) { // set in scaled. qimgRet.setPixel(int(x/2), int(y/2), qRgb(0,0,0)); } else { if( (y < qimg.height()-1) && // Else if pixel ((qRed (qimg.pixel(x,y+1)) == 0) && // below contains (qGreen(qimg.pixel(x,y+1)) == 0) && // feature, set (qBlue (qimg.pixel(x,y+1)) == 0))) { // in scaled. qimgRet.setPixel(int(x/2), int(y/2), qRgb(0,0,0)); } else { // Else if pixel if( ((x < qimg.width()-1) && // to the right (y < qimg.height()-1)) && // and below ((qRed (qimg.pixel(x,y+1)) == 0) && // contains (qGreen(qimg.pixel(x,y+1)) == 0) && // feature, set (qBlue (qimg.pixel(x,y+1)) == 0))) { // it in scaled. qimgRet.setPixel(int(x/2), int(y/2), qRgb(0,0,0)); } else { // Else set qimgRet.setPixel(int(x/2), int(y/2), // non-feature in qRgb(255,255,255)); // scaled. } } } } } } } OpGrayImage tmp(qimgRet); imgRet = tmp; }