Example #1
0
void ScanGallery::slotExportFile()
{
    FileTreeViewItem *curr = highlightedFileTreeViewItem();
    if (curr==NULL) return;

    if (curr->isDir())
    {
        kDebug() << "Not yet implemented!";
        return;
    }

    KUrl fromUrl(curr->url());

    QString filter;
    ImageFormat format = getImgFormat(curr);
    if (format.isValid()) filter = "*."+format.extension()+"|"+format.mime()->comment()+"\n";
// TODO: do we need the below?
    filter += "*|"+i18n("All Files");

    QString initial = "kfiledialog:///exportImage/"+fromUrl.fileName();
    KUrl fileName = KFileDialog::getSaveUrl(KUrl(initial), filter, this);
    if (!fileName.isValid()) return;			// didn't get a file name
    if (fromUrl==fileName) return;			// can't save over myself

    /* Since it is asynchron, we will never know if it succeeded. */
    ImgSaver::copyImage(fromUrl, fileName);
}
Example #2
0
double ImageFileSize::bytesByFormat(const ImageFormat &imageFormat) const
{
    double fileSize = bytes;
    if (imageFormat.isBmp()) {
        fileSize -= 54;
        fileSize /= 3;
    }
    else if (imageFormat.isPpm()) {
        fileSize -= 17;
        fileSize /= 3;
    }
    else if (imageFormat.isIco()) {
        fileSize -= 1422;
        fileSize /= 4;
    }
    else if (imageFormat.isTiff()) {
        fileSize -= 14308;
        fileSize /= 4;
    }
    else if (imageFormat.isXbm()) {
        fileSize -= 60;
        fileSize /= 0.65;
    }
    return fileSize;
}
void
MultiCameraWriter::initMultiImageLogWriter(const orca::MultiCameraDescriptionPtr &descr)
{
#ifdef OPENCV_FOUND
    for( unsigned int i=0; i<descr->descriptions.size(); ++i ) {
	isPadded_[i] = false;
	// Get the properties of each camera
	std::string format = descr->descriptions[i]->format;
	int width = descr->descriptions[i]->width;
        int height = descr->descriptions[i]->height;
        // class to search for image format properties
        ImageFormat imageFormat = ImageFormat::find(format);
        int numChannels = imageFormat.getNumberOfChannels();
        int depth = imageFormat.getBitsPerPixel() / numChannels;
        // check if opencv has padded the byte array so that the width is a multiple of 4 or 8 bytes
        orcaByteWidth_[i] = width*numChannels;
        // Don't allocate image space yet
        cvImage_[i] = cvCreateImageHeader(cvSize(width, height), depth, numChannels);
        if (orcaByteWidth_[i] != cvImage_[i]->widthStep)
        {
            isPadded_[i] = true;
        }
        if (isPadded_[i])
            // Allocate space, we will need to copy image data properly
            cvCreateData(&cvImage_[i]);
        // Allocate memory for bayer image conversion
        if (format == "BayerBG8" ||
            format == "BayerGB8"||
            format == "BayerRG8" ||
            format == "BayerGR8")
                cvBayer_[i] = cvCreateImage(cvSize(width, height), IPL_DEPTH_8U, 3);
    }
#endif
}
Example #4
0
File: util.cpp Project: nyorain/ny
ImageFormat visualToFormat(const xcb_visualtype_t& v, unsigned int depth)
{
	//the visual does only have an alpha channel if its depth is 32 bits
	auto alphaMask = 0u;
	if(depth == 32) alphaMask = 0xFFFFFFFFu & ~(v.red_mask | v.green_mask | v.blue_mask);

	//represents a color mask channel
	struct Channel
	{
		ColorChannel color;
		unsigned int offset;
		unsigned int size;
	} channels[4];

	//Converts a given mask to a Channel struct
	auto parseMask = [](ColorChannel color, unsigned int mask) {
		auto active = false;
		Channel ret {color, 0u, 0u};

		for(auto i = 0u; i < 32; ++i)
		{
			if(mask & (1 << i))
			{
				if(!active) ret.offset = i;
				ret.size++;
			}
			else if(active)
			{
				break;
			}
		}

		return ret;
	};

	//parse the color masks
	channels[0] = parseMask(ColorChannel::red, v.red_mask);
	channels[1] = parseMask(ColorChannel::green, v.green_mask);
	channels[2] = parseMask(ColorChannel::blue, v.blue_mask);
	channels[3] = parseMask(ColorChannel::alpha, alphaMask);

	//sort them by the order they appear
	std::sort(std::begin(channels), std::end(channels),
		[](auto& a, auto& b){ return a.offset < b.offset; });

	//insert them (with offsets if needed) into the returned ImageFormat
	ImageFormat ret {};

	auto prev = 0u;
	auto it = ret.begin();
	for(auto channel : channels)
	{
		if(channel.offset > prev + 1) *(it++) = {ColorChannel::none, channel.offset - (prev + 1)};
		*(it++) = {channel.color, channel.size};
		prev = channel.offset + channel.size;
	}

	return ret;
}
Example #5
0
void GLContext::drawImage(const Image& image, const Vec4f& pos, const Vec2f& align, bool topToBottom)
{
    const Vec2i& imgSize = image.getSize();
    if (imgSize.min() <= 0)
        return;

    Buffer& buf = image.getBuffer();
    ImageFormat format = image.getFormat().getGLFormat();
    const ImageFormat::StaticFormat* sf = format.getStaticFormat();

    glActiveTexture(GL_TEXTURE0);
    const Vec2i& texSize = bindTempTexture(imgSize);

    // Format is not supported by GL => convert and upload.

    if (image.getFormat() != format || image.getStride() != imgSize.x * format.getBPP())
    {
        Image converted(imgSize, format);
        converted = image;
        glTexSubImage2D(GL_TEXTURE_2D, 0, 0, 0, imgSize.x, imgSize.y, sf->glFormat, sf->glType, converted.getPtr());
    }

    // Data is already on the GPU => transfer to the texture.

    else if (buf.getOwner() == Buffer::GL || (buf.getOwner() == Buffer::Cuda && (buf.getHints() & Buffer::Hint_CudaGL) != 0))
    {
        glBindBuffer(GL_PIXEL_UNPACK_BUFFER, buf.getGLBuffer());
        glTexSubImage2D(GL_TEXTURE_2D, 0, 0, 0, imgSize.x, imgSize.y, sf->glFormat, sf->glType, NULL);
        glBindBuffer(GL_PIXEL_UNPACK_BUFFER, 0);
    }

    // Otherwise => upload.

    else
    {
        glTexSubImage2D(GL_TEXTURE_2D, 0, 0, 0, imgSize.x, imgSize.y, sf->glFormat, sf->glType, buf.getPtr());
    }

    // Determine orientation.

    Vec4f posLo = m_vgXform * pos;
    Vec2f posRange = Vec2f(imgSize) * m_viewScale * posLo.w;
    posLo -= Vec4f(align * posRange, 0.0f, 0.0f);
    Vec2f posHi = posLo.getXY() + posRange;

    if (topToBottom)
        swap(posLo.y, posHi.y);

    // Draw texture.

    glPushAttrib(GL_ENABLE_BIT);
    glDisable(GL_CULL_FACE);
    drawTexture(0, posLo, posHi, Vec2f(0.0f), Vec2f(imgSize) / Vec2f(texSize));
    glPopAttrib();
    checkErrors();
}
Example #6
0
void ScanGallery::slotDecorate(FileTreeViewItem *item)
{
    if (item==NULL) return;

    if (!item->isDir())					// dir is done in another slot
    {
        ImageFormat format = getImgFormat(item);	// this is safe for any file
        item->setText(2,(" "+format.name()+" "));

        const KookaImage *img = imageForItem(item);
        if (img!=NULL)					// image appears to be loaded
        {						// set image depth pixmap
            if (img->depth()==1) item->setIcon(0, mPixBw);
            else
            {
                if (img->isGrayscale()) item->setIcon(0, mPixGray);
                else item->setIcon(0, mPixColor);
            }
							// set image size column
            QString t = i18n(" %1 x %2", img->width(), img->height());
            item->setText(1,t);
        }
        else						// not yet loaded, show file info
        {
            if (format.isValid())			// if a valid image file
            {
                item->setIcon(0, mPixFloppy);
                const KFileItem *kfi = item->fileItem();
                if (!kfi->isNull()) item->setText(1, (" "+KIO::convertSize(kfi->size())));
            }
            else
            {
                item->setIcon(0, KIO::pixmapForUrl(item->url(), 0, KIconLoader::Small));
            }
        }
    }

    // This code is quite similar to m_nextUrlToSelect in FileTreeView::slotNewTreeViewItems
    // When scanning a new image, we wait for the KDirLister to notice the new file,
    // and then we have the FileTreeViewItem that we need to display the image.
    if (!m_nextUrlToShow.isEmpty())
    {
        if (m_nextUrlToShow.equals(item->url(), KUrl::CompareWithoutTrailingSlash))
        {
            m_nextUrlToShow = KUrl();			// do this first to prevent recursion
            slotItemActivated(item);
            setCurrentItem(item);			// neccessary in case of new file from D&D
        }
    }
}
Example #7
0
static QString buildNewFilename(const QString &cmplFilename, const ImageFormat &currFormat)
{
   /* cmplFilename = new name the user wishes.
    * currFormat   = the current format of the image.
    * if the new filename has a valid extension, which is the same as the
    * format of the current, fine. A ''-String has to be returned.
    */
   QFileInfo fiNew(cmplFilename);
   QString base = fiNew.baseName();
   QString newExt = fiNew.suffix().toLower();
   QString nowExt = currFormat.extension();
   QString ext = "";

   kDebug() << "Filename wanted:"<< cmplFilename << "ext" << nowExt << "->" << newExt;

   if( newExt.isEmpty() )
   {
      /* ok, fine -> return the currFormat-Extension */
      ext = base + "." + nowExt;
   }
   else if( newExt == nowExt )
   {
      /* also good, no reason to put another extension */
      ext = cmplFilename;
   }
   else
   {
      /* new Ext. differs from the current extension. Later. */
      KMessageBox::sorry(NULL, i18n( "You entered a file extension that differs from the existing one. That is not yet possible. Converting 'on the fly' is planned for a future release.\n"
				      "Kooka corrects the extension."),
			  i18n("On the Fly Conversion"));
      ext = base + "." + nowExt;
   }
   return( ext );
}
Example #8
0
void Image::notifyAppendFrame(int fwidth, int fheight, const ImageFormat& format)
{
    if (!ImageManager::isAcceptableSize(fwidth, fheight)) {
        kWarning() << "ImageLoader somehow fed us an illegal size, killing it!";
        loadError();
        return;
    }

    //Create the new frame.
    QImage image = format.makeImage (fwidth, fheight);
    //IMPORTANT: we use image.width(), etc., below for security/paranoia
    //reasons -- so we e.g. end up with a size 0 image if QImage overflow
    //checks kick in, etc. This is on top of the policy enforcement
    //enough, in case someone breaks it or such
    RawImagePlane* iplane = new RawImagePlane(image.width(), image.height());
    iplane->image         = image;
    iplane->format        = format;
    PixmapPlane*   plane  = new PixmapPlane  (image.width(), image.height(), iplane);

    if (loaderPlane) //Had a previous plane
    {
        loaderPlane->nextFrame = plane;
        loaderPlane            = plane;
    }
    else
    {
        //Created the first one
        loaderPlane = original = plane;
    }

    //Go through the list of scaled sizes, and build frames for that.

    loaderScanline = 0;
}
bool FIPAdaptiveThreshold::init()
{
    bool rValue=ImageProcessor::init();
    //Note: SharedImageBuffer of downstream producer is initialised with storage in ImageProcessor::init.
    
    size_t maxScratchDataSize=0;
    size_t maxScratchDataValues=0;
    
    for (uint32_t i=0; i<ImagesPerSlot_; i++)
    {
        const ImageFormat imFormat=getUpstreamFormat(i);
        
        const size_t width=imFormat.getWidth();
        const size_t height=imFormat.getHeight();
        
        const size_t bytesPerPixel=imFormat.getBytesPerPixel();
        const size_t componentsPerPixel=imFormat.getComponentsPerPixel();
        
        const size_t scratchDataSize = width * height * bytesPerPixel;
        const size_t scratchDataValues = width * height * componentsPerPixel;
        
        if (scratchDataSize>maxScratchDataSize)
        {
            maxScratchDataSize=scratchDataSize;
        }
        
        if (scratchDataValues>maxScratchDataValues)
        {
            maxScratchDataValues=scratchDataValues;
        }
    }
    
    //Allocate a buffer big enough for any of the image slots.
    _noiseFilteredInputData=new uint8_t[maxScratchDataSize];
    _scratchData=new uint8_t[maxScratchDataSize];
    memset(_noiseFilteredInputData, 0, maxScratchDataSize);
    memset(_scratchData, 0, maxScratchDataSize);
    
    _integralImageScratchData=new double[maxScratchDataValues];
    memset(_integralImageScratchData, 0, maxScratchDataValues*sizeof(double));
    
    return rValue;
}
typename ImageFormat::color_format::available_channels
format_channel(
	ImageFormat const &_format,
	sge::image::size_type const _index
)
{
	return
		_format.format_store().get()->order[
			_index
		];
}
void
MultiCameraReader::initDataStorage()
{
    // Create new MultiCameraData object
    data_ = new orca::MultiCameraData();
#ifdef OPENCV_FOUND
    // Resize variables appropriately
    cvImage_.resize(descr_->descriptions.size(), NULL);

    for( unsigned int i = 0; i < descr_->descriptions.size(); ++i)
    {
        data_->cameraDataVector.push_back( new orca::ImageData() );
        data_->cameraDataVector[i]->description = descr_->descriptions[i];
        
        // class to search for image format properties
        ImageFormat imageFormat = ImageFormat::find(descr_->descriptions[i]->format);
        int nChannels = imageFormat.getNumberOfChannels();

        // resize object buffer to fit image
        int imageSize = (int)ceil( nChannels * data_->cameraDataVector[i]->description->height *data_->cameraDataVector[i]->description->width );
        data_->cameraDataVector[i]->pixelData.resize( imageSize );
    }
#endif
}
Example #12
0
Viewer::Viewer( const orca::MultiCameraDataPtr& multiCameraData,
                const orcaice::Context& context ) :
    context_(context)
{
    isPadded_ = false;
    
    // Assume each camera is identical and the image formats are identical
    std::string format = multiCameraData->cameraDataVector.at(0)->description->format;
    int width = multiCameraData->cameraDataVector.at(0)->description->width;
    int height = multiCameraData->cameraDataVector.at(0)->description->height;
    
    // class to search for image format properties
    ImageFormat imageFormat = ImageFormat::find( format );
    int numChannels = imageFormat.getNumberOfChannels();
    int depth = imageFormat.getBitsPerPixel()/numChannels;
    
    // set up opencv storage for the source image
    cvSrcImage_ = cvCreateImage( cvSize( width, height ),  depth, numChannels );
    
    // check if opencv has padded the byte array so that the width is a multiple of 4 or 8 bytes
    orcaByteWidth_ = width*numChannels;
    if ( orcaByteWidth_ != cvSrcImage_->widthStep )
    {
        isPadded_ = true;
    }

    // set up opencv storage for the display image
    std::string displayFormat = "BGR8";
    ImageFormat imageDisplayFormat = ImageFormat::find( displayFormat );
    numChannels  = imageDisplayFormat.getNumberOfChannels();
    depth = imageDisplayFormat.getBitsPerPixel()/numChannels;
    
    // The width of all the camera frames side-by-side
    int totalWidth = cvSrcImage_->width * multiCameraData->cameraDataVector.size();
    
    cvMultiDisplayImage_ = cvCreateImage( cvSize( totalWidth, height ),
                                          depth, 
                                          numChannels );
    
    // dodgy opencv needs this so it has time to resize
    cvWaitKey(100);
    
    name_ = "MultiCameraViewer";
    cvNamedWindow( name_ );
    // context_.tracer()->debug("opencv window created",5);
    
    // start the timer for calculating the number of frames per second
    // the images are being displayed at
    orcaice::setToNow( oldFrameTime_ );
    
    // initialise font for displaying fps
    cvInitFont(&font_, CV_FONT_HERSHEY_SIMPLEX, 0.5, 0.5, 0.0, 1, CV_AA);
        
}
Example #13
0
    /**
     Our implementation of libPNG callbacks
    */
    void haveInfo()
    {
        int bitDepth, colorType, interlaceType;
    
        png_get_IHDR(pngReadStruct, pngInfoStruct, &width, &height, &bitDepth,
                     &colorType, &interlaceType, 0, 0);
                     
        if (!ImageManager::isAcceptableSize(width, height)) {
            libPngError = true;
            return;
        }
        
        //Ask libPNG to change bit depths we don't support
        if (bitDepth < 8)
#if PNG_LIBPNG_VER < 10400
            png_set_gray_1_2_4_to_8(pngReadStruct);
#else
            png_set_expand_gray_1_2_4_to_8(pngReadStruct);
#endif
        
        if (bitDepth > 8)
            png_set_strip_16       (pngReadStruct);
            
        //Some images (basically, only paletted ones) may have alpha
        //included as part of a tRNS chunk. We want to convert that to regular alpha
        //channel..
        bool haveTRNS = false; 
        if (png_get_valid(pngReadStruct, pngInfoStruct, PNG_INFO_tRNS))
        {
            png_set_tRNS_to_alpha(pngReadStruct);
            haveTRNS = true;
            
            if (colorType == PNG_COLOR_TYPE_RGB)
                colorType =  PNG_COLOR_TYPE_RGB_ALPHA; //Paranoia..
            else if (colorType == PNG_COLOR_TYPE_GRAY)
                colorType = PNG_COLOR_TYPE_GRAY_ALPHA;
        }    
            
        ImageFormat imFrm;    
            
        //Prepare for mapping from colorType to our format descriptors.
        switch (colorType)
        {
            case PNG_COLOR_TYPE_GRAY:
                imFrm.greyscaleSetup();
                break;
            case PNG_COLOR_TYPE_GRAY_ALPHA:
                //We don't natively support 8-bit plus alpha, so ask libPNG to expand it out to RGB
                png_set_gray_to_rgb(pngReadStruct);
                imFrm.type = ImageFormat::Image_ARGB_32;
                break;
            case PNG_COLOR_TYPE_PALETTE:                
                //For now, we handle paletted images as RGB or ARGB
                //### TODO: handle non-alpha paletted images with a sufficiently small palette as 
                //paletted
                imFrm.type = haveTRNS ? ImageFormat::Image_ARGB_32 : ImageFormat::Image_RGB_32;
                png_set_palette_to_rgb(pngReadStruct);
                break;
            case PNG_COLOR_TYPE_RGB:
                imFrm.type = ImageFormat::Image_RGB_32;
                break;
            case PNG_COLOR_TYPE_RGB_ALPHA:
                imFrm.type = ImageFormat::Image_ARGB_32;
                break;
            default:
                //Huh?
                libPngError = true;
                return;
        }
        
        //Configure padding/byte swapping if need be (32-bit images)
        //We want a 32-bit value with ARGB.
        //This means that for little-endian, in memory we should have BGRA,
        //and for big-endian, well, ARGB        
        if (imFrm.type == ImageFormat::Image_RGB_32)
        {
            //Need fillers, plus perhaps BGR swapping for non-alpha
#if Q_BYTE_ORDER == Q_BIG_ENDIAN || defined(__BIG_ENDIAN__)
            png_set_filler(pngReadStruct, 0xff, PNG_FILLER_BEFORE);
#else
            png_set_filler(pngReadStruct, 0xff, PNG_FILLER_AFTER);
            png_set_bgr   (pngReadStruct);
#endif                
        }
        else if (imFrm.type == ImageFormat::Image_ARGB_32)
        {
#if Q_BYTE_ORDER == Q_BIG_ENDIAN || defined(__BIG_ENDIAN__)
            png_set_swap_alpha(pngReadStruct); //ARGB, not RGBA
#else
            png_set_bgr   (pngReadStruct);     //BGRA
#endif
        }
        
        //Remember depth, for our own use
        depth = imFrm.depth();
        
        //handle interlacing        
        if (interlaceType != PNG_INTERLACE_NONE)
        {
            interlaced  = true;
            scanlineBuf = new unsigned char[depth * width];
            png_set_interlace_handling(pngReadStruct);
            
            // Give up on premultiply in this case..
            if (imFrm.type == ImageFormat::Image_ARGB_32)
                imFrm.type = ImageFormat::Image_ARGB_32_DontPremult;
        }
        
        notifySingleFrameImage(width, height, imFrm);
        
        //OK, time to start input
        png_read_update_info(pngReadStruct, pngInfoStruct);
    }
Example #14
0
void ScanGallery::loadImageForItem(FileTreeViewItem *item)
{
    if (item==NULL) return;

    const KFileItem *kfi = item->fileItem();
    if (kfi->isNull()) return;

    kDebug() << "loading" << item->url();

    QString ret = QString::null;			// no error so far

    ImageFormat format = getImgFormat(item);		// check for valid image format
    if (!format.isValid())
    {
        ret = i18n("Not a valid image format");
    }
    else
    {
        KookaImage *img = imageForItem(item);
        if (img==NULL)					// image not already loaded
        {
            // The image needs to be loaded. Possibly it is a multi-page image.
            // If it is, the kookaImage has a subImageCount larger than one. We
            // create an subimage-item for every subimage, but do not yet load
            // them.

            img = new KookaImage();
            ret = img->loadFromUrl(item->url());
            if (ret.isEmpty())				// image loaded OK
            {
                img->setFileItem(kfi);			// store the fileitem

                kDebug() << "subImage-count" << img->subImagesCount();
                if (img->subImagesCount()>1)		// look for subimages,
                {					// create items for them
                    KIconLoader *loader = KIconLoader::global();

                    // Start at the image with index 1, that makes one less than
                    // are actually in the image. But image 0 was already created above.
                    FileTreeViewItem *prevItem = NULL;
                    for (int i = 1; i<img->subImagesCount(); i++)
                    {
                        kDebug() << "Creating subimage" << i;
                        KFileItem newKfi(*kfi);
                        FileTreeViewItem *subImgItem = new FileTreeViewItem(item,newKfi,item->branch());

                        // TODO: what's the equivalent?
                        //if (prevItem!=NULL) subImgItem->moveItem(prevItem);
                        prevItem = subImgItem;

                        subImgItem->setIcon(0, loader->loadIcon("editcopy", KIconLoader::Small));
                        subImgItem->setText(0, i18n("Sub-image %1", i));
                        KookaImage *subImgImg = new KookaImage(i, img);
                        subImgImg->setFileItem(&newKfi);
                        subImgItem->setClientData(subImgImg);
                    }
                }

                if (img->isSubImage())			// this is a subimage
                {
                    kDebug() << "it is a subimage";
                    if (img->isNull())			// if not already loaded,
                    {
                        kDebug() << "extracting subimage";
                        img->extractNow();		// load it now
                    }
                }

                slotImageArrived(item, img);
            }
            else
            {
                delete img;				// nothing to load
            }
        }
    }

    if (!ret.isEmpty()) KMessageBox::error(this,	// image loading failed
                                           i18n("<qt>"
                                                "<p>Unable to load the image<br>"
                                                "<filename>%2</filename><br>"
                                                "<br>"
                                                "%1",
                                                ret,
                                                item->url().prettyUrl()),
                                           i18n("Image Load Error"));
}
bool FIPAdaptiveThreshold::trigger()
{
    if ((getNumReadSlotsAvailable())&&(getNumWriteSlotsAvailable()))
    {
        std::vector<Image**> imvRead=reserveReadSlot();
        std::vector<Image**> imvWrite=reserveWriteSlot();
        
        //Start stats measurement event.
        ProcessorStats_->tick();
        
        for (size_t imgNum=0; imgNum<ImagesPerSlot_; ++imgNum)
        {
            Image const * const imReadUS = *(imvRead[imgNum]);
            Image * const imWriteDS = *(imvWrite[imgNum]);
            const ImageFormat imFormat=getDownstreamFormat(imgNum);//down stream and up stream formats are the same.
            
            if (!_enabled)
            {
                uint8_t const * const dataReadUS=(uint8_t const * const)imReadUS->data();
                uint8_t * const dataWriteDS=(uint8_t * const)imWriteDS->data();
                const size_t bytesPerImage=imFormat.getBytesPerImage();
                memcpy(dataWriteDS, dataReadUS, bytesPerImage);
            } else
            {
                const size_t width=imFormat.getWidth();
                const size_t height=imFormat.getHeight();
                const size_t numElements=width * height * imFormat.getComponentsPerPixel();
                
                if (imFormat.getPixelFormat()==ImageFormat::FLITR_PIX_FMT_Y_F32)
                {
                    float const * const dataReadUS=(float const * const)imReadUS->data();
                    float * const dataWriteDS=(float * const)imWriteDS->data();
                    
                    //Small kernel noise filter.
                    _noiseFilter.filter((float *)_noiseFilteredInputData, dataReadUS, width, height,
                                        _integralImageScratchData, true);
                    
                    for (short i=1; i<_numIntegralImageLevels; ++i)
                    {
                        memcpy(_scratchData, _noiseFilteredInputData, width*height*sizeof(uint8_t));
                        
                        _noiseFilter.filter((float *)_noiseFilteredInputData, (float *)_scratchData, width, height,
                                            _integralImageScratchData, true);
                    }
                    
                    
                    for (size_t i=0; i<numElements; ++i)
                    {
                        dataWriteDS[i]=1.0;
                    }
                    
                    
                    //Large kernel adaptive reference.
                    _boxFilter.filter(dataWriteDS, dataReadUS, width, height,
                                      _integralImageScratchData, true);
                    
                    for (short i=1; i<_numIntegralImageLevels; ++i)
                    {
                        memcpy(_scratchData, dataWriteDS, width*height*sizeof(float));
                        
                        _boxFilter.filter(dataWriteDS, (float *)_scratchData, width, height,
                                          _integralImageScratchData, true);
                    }
                    
                    
                    const float tovF32=_thresholdOffset * 1.0f;
                    size_t tpc=0;
                    
                    for (size_t i=0; i<numElements; ++i)
                    {
                        if (( ((float *)_noiseFilteredInputData)[i] - tovF32) > dataWriteDS[i])
                        {
                            dataWriteDS[i]=1.0;
                            ++tpc;
                        } else
                        {
                            dataWriteDS[i]=0.0;
                        }
                    }
                    
                    _thresholdAvrg=tpc;// / double(width*height);
                } else
                    if (imFormat.getPixelFormat()==ImageFormat::FLITR_PIX_FMT_Y_8)
                    {
                        uint8_t const * const dataReadUS=(uint8_t const * const)imReadUS->data();
                        uint8_t * const dataWriteDS=(uint8_t * const)imWriteDS->data();
                        
                        //Small kernel noise filter.
                        _noiseFilter.filter((uint8_t *)_noiseFilteredInputData, dataReadUS, width, height,
                                            _integralImageScratchData, true);
                        
                        for (short i=1; i<_numIntegralImageLevels; ++i)
                        {
                            memcpy(_scratchData, _noiseFilteredInputData, width*height*sizeof(uint8_t));
                            
                            _noiseFilter.filter((uint8_t *)_noiseFilteredInputData, (uint8_t *)_scratchData, width, height,
                                                _integralImageScratchData, true);
                        }
                        
                        
                        for (size_t i=0; i<numElements; ++i)
                        {
                            dataWriteDS[i]=255;
                        }
                        
                        
                        //Large kernel adaptive reference.
                        _boxFilter.filter(dataWriteDS, dataReadUS, width, height,
                                          _integralImageScratchData, true);
                        
                        for (short i=1; i<_numIntegralImageLevels; ++i)
                        {
                            memcpy(_scratchData, dataWriteDS, width*height*sizeof(uint8_t));
                            
                            _boxFilter.filter(dataWriteDS, (uint8_t *)_scratchData, width, height,
                                              _integralImageScratchData, true);
                        }
                        
                        
                        const uint8_t tovUInt8=_thresholdOffset * 255.5;
                        size_t tpc=0;
                        
                        for (size_t i=0; i<numElements; ++i)
                        {
                            if (( ((uint8_t *)_noiseFilteredInputData)[i] - tovUInt8) > dataWriteDS[i])
                            {
                                dataWriteDS[i]=255;
                                ++tpc;
                            } else
                            {
                                dataWriteDS[i]=0;
                            }
                        }
                        
                        _thresholdAvrg=tpc / double(width*height);
                    }
            }
        }
        
        //Stop stats measurement event.
        ProcessorStats_->tock();
        
        releaseWriteSlot();
        releaseReadSlot();
        
        return true;
    }
    
    return false;
}
Image* FW::importBinaryImage(InputStream& stream)
{
    // ImageHeader.

    char formatID[9];
    stream.readFully(formatID, 8);
    formatID[8] = '\0';
    if (String(formatID) != "BinImage")
    {
        setError("Not a binary image file!");
        return NULL;
    }

    S32 version;
    stream >> version;
    if (version != 1)
    {
        setError("Unsupported binary image version!");
        return NULL;
    }

    S32 width, height, bpp, numChannels;
    stream >> width >> height >> bpp >> numChannels;
    if (width < 0 || height < 0 || bpp < 0 || numChannels < 0)
    {
        setError("Corrupt binary image data!");
        return NULL;
    }

    // Array of ImageChannel.

    ImageFormat format;
    for (int i = 0; i < numChannels; i++)
    {
        S32 ctype, cformat;
        ImageFormat::Channel c;
        stream >> ctype >> cformat >> c.wordOfs >> c.wordSize >> c.fieldOfs >> c.fieldSize;
        if (ctype < 0 || cformat < 0 || cformat >= ImageFormat::ChannelFormat_Max ||
            c.wordOfs < 0 || (c.wordSize != 1 && c.wordSize != 2 && c.wordSize != 4) ||
            c.fieldOfs < 0 || c.fieldSize <= 0 || c.fieldOfs + c.fieldSize > c.wordSize * 8 ||
            (cformat == ImageFormat::ChannelFormat_Float && c.fieldSize != 32))
        {
            setError("Corrupt binary image data!");
            return NULL;
        }

        c.type = (ImageFormat::ChannelType)ctype;
        c.format = (ImageFormat::ChannelFormat)cformat;
        format.addChannel(c);
    }

    if (bpp != format.getBPP())
    {
        setError("Corrupt binary image data!");
        return NULL;
    }

    // Image data.

    Image* image = new Image(Vec2i(width, height), format);
    stream.readFully(image->getMutablePtr(), width * height * bpp);

    // Handle errors.

    if (hasError())
    {
        delete image;
        return NULL;
    }
    return image;
}
Example #17
0
bool FIPDPT::trigger()
{
    if ((getNumReadSlotsAvailable())&&(getNumWriteSlotsAvailable()))
    {//There are images to consume and the downstream producer has space to produce.
        std::vector<Image**> imvRead=reserveReadSlot();
        std::vector<Image**> imvWrite=reserveWriteSlot();
        
        //Start stats measurement event.
        ProcessorStats_->tick();
        
        for (size_t imgNum=0; imgNum<1; ++imgNum)//For now, only process one image in each slot.
        {
            Image const * const imRead = *(imvRead[imgNum]);
            Image * const imWrite = *(imvWrite[imgNum]);
            
            uint8_t const * const dataRead=imRead->data();
            uint8_t * const dataWrite=imWrite->data();
            
            const ImageFormat imFormat=getDownstreamFormat(imgNum);
            
            const size_t width=imFormat.getWidth();
            const size_t height=imFormat.getHeight();
            
            memset(dataWrite, 0, width*height);//Clear the downstream image.
            
            
            //=== Setup the initial nodes ===//
            for (size_t y=0; y<height; ++y)
            {
                const size_t lineOffset=y * width;
                
                for (size_t x=0; x<width; ++x)
                {
                    const auto writeValue=dataRead[lineOffset + x];
                    
                    Node &node=nodeVect_[lineOffset + x];
                    node.index_=lineOffset + x;
                    node.value_=writeValue;
                    node.size_=1;
                    
#ifdef RUN_ARCS
                    node.arcIndices_.clear();
#else
                    node.neighbourIndices_.clear();
#endif
                    
                    node.pixelIndices_.clear();
                    node.pixelIndices_.push_back(lineOffset + x);
                }
            }
            //===  ==//
            
            
            //=== Setup the initial arcs or node neighbours ===//
            {
#ifdef RUN_ARCS
                size_t arcIndex=0;
#endif
                
                for (size_t y=0; y<height; ++y)
                {
                    const size_t lineOffset=y * width;
                    
                    for (size_t x=1; x<width; ++x)
                    {
#ifdef RUN_ARCS
                        Arc &arc=arcVect_[arcIndex];
                        arc.index_=arcIndex;
                        arc.active_=true;
                        
                        arc.nodeIndices_[0]=lineOffset+x-1;
                        arc.nodeIndices_[1]=lineOffset+x;
                        
                        nodeVect_[arc.nodeIndices_[0]].arcIndices_.push_back(arcIndex);
                        nodeVect_[arc.nodeIndices_[1]].arcIndices_.push_back(arcIndex);
                        arcIndex++;
#else
                        nodeVect_[lineOffset+x-1].neighbourIndices_.push_back(lineOffset+x);
                        nodeVect_[lineOffset+x].neighbourIndices_.push_back(lineOffset+x-1);
#endif
                    }
                    
                    if (y<(height-1))
                    {
                        for (size_t x=0; x<width; ++x)
                        {
#ifdef RUN_ARCS
                            Arc &arc=arcVect_[arcIndex];
                            arc.index_=arcIndex;
                            arc.active_=true;
                            
                            arc.nodeIndices_[0]=lineOffset+x;
                            arc.nodeIndices_[1]=lineOffset+x+width;
                            
                            nodeVect_[arc.nodeIndices_[0]].arcIndices_.push_back(arcIndex);
                            nodeVect_[arc.nodeIndices_[1]].arcIndices_.push_back(arcIndex);
                            arcIndex++;
#else
                            nodeVect_[lineOffset+x].neighbourIndices_.push_back(lineOffset+x+width);
                            nodeVect_[lineOffset+x+width].neighbourIndices_.push_back(lineOffset+x);
#endif
                        }
                    }
                }
            }
            //=== ===
            
            size_t policyCounter=0;
            
            size_t numBumpsRemoved=0;
            size_t numPitsRemoved=0;
            
            size_t numPulsesMerged=mergeFromAll();//Initial merge.
            
            updatePotentiallyActiveNodeIndexVect();
            
            std::vector<int32_t> nodeIndicesToMerge;
            
            size_t loopCounter = 0;
            int32_t previousSmallestPulse = 0;
            
            while ((previousSmallestPulse<filterPulseSize_)&&(potentiallyActiveNodeIndexVect_.size()>1))
            {
                nodeIndicesToMerge.clear();
                
                //=== Find smallest pulse ===//
                int32_t smallestPulse=0;
                
                for (const auto & potentiallyActiveNodeIndex : potentiallyActiveNodeIndexVect_)
                {
                    Node &node=nodeVect_[potentiallyActiveNodeIndex];
                    
                    if (node.size_>0)
                    {
                        if ((smallestPulse==0)||(node.size_<smallestPulse))
                        {
                            //if (isBump(node.index_)||isPit(node.index_))
                            //if (node.size_>previousSmallestPulse)
                            {
                                smallestPulse=node.size_;
                            }
                        }
                    }
                }
                //std::cout << "Smallest pulse is " << smallestPulse << ".\n";
                //std::cout.flush();
                //=== ===//
                
                
                //=== Implement DPT pit/bump policy ===//
                if (previousSmallestPulse!=smallestPulse)
                {
                    policyCounter=0;
                }
                //=== ===//
                
                
                //=== Remove pits and bumps according to policy ===//
                for (const auto & potentiallyActiveNodeIndex : potentiallyActiveNodeIndexVect_)
                {
                    Node &node=nodeVect_[potentiallyActiveNodeIndex];
                    
                    if ((node.size_>0)&&(node.size_<=smallestPulse))
                    {
                        if ((policyCounter%2)==0)
                        {
                            //if (isPit(node.index_))
                            {
                                //=== Remove pits ===//
                                flattenToNearestNeighbour(node.index_);
                                //flattenToFirstNeighbour(node.index_);
                                nodeIndicesToMerge.push_back(node.index_);
                                ++numPitsRemoved;
                            }
                        } else
                        {
                            //if (isBump(node.index_))
                            {
                                //=== Remove bumps ===//
                                flattenToNearestNeighbour(node.index_);
                                //flattenToFirstNeighbour(node.index_);
                                nodeIndicesToMerge.push_back(node.index_);
                                ++numBumpsRemoved;
                            }
                        }
                    }
                }
                //=== ===//
                
                
                //std::cout << "Pulses merged = " << numPulsesMerged << ".\n";
                //std::cout << "Pits removed = " << numPitsRemoved << ".\n";
                //std::cout << "Bumps removed = " << numBumpsRemoved << ".\n";
                //std::cout << "\n";
                //std::cout.flush();
                
                
                //=== Merge over arcs of removed nodes ===//
                numPulsesMerged=mergeFromList(nodeIndicesToMerge);
                //=== ===//
                
                ++policyCounter;
                previousSmallestPulse=smallestPulse;
                
                if ((loopCounter&15)==0) updatePotentiallyActiveNodeIndexVect();
                ++loopCounter;
            }
            
            
            //=============================================//
            //=============================================//
            //=============================================//
            
            
            {
                //=== Find smallest pulse ==
                int32_t smallestPulse=0;
                for (const auto & node : nodeVect_)
                {
                    if (node.size_>0)
                    {
                        if ((smallestPulse==0)||(node.size_<smallestPulse))
                        {
                            //if (isBump(node.index_)||isPit(node.index_))
                            {
                                smallestPulse=node.size_;
                            }
                        }
                    }
                }
                //=== ===
                
                //std::cout << "Drawing pulses of size " << smallestPulse << "+.\n";
                //std::cout.flush();
                
                //=== Draw the pulses on the sceen ==
                for (const auto & node : nodeVect_)
                {
                    if (node.size_>0)
                    {
                        //if (node.size_==smallestPulse)
                        {
                            for (const auto pixelIndex : node.pixelIndices_)
                            {
                                dataWrite[pixelIndex]=node.value_;
                            }
                        }
                    }
                }
                
                //std::cout << "\n";
                //std::cout.flush();
                //=== ===
            }
        }
        
        //Stop stats measurement event.
        ProcessorStats_->tock();
        
        releaseWriteSlot();
        releaseReadSlot();
        
        return true;
    }
    
    return false;
}
Example #18
0
void
ImageFormat::add( ImageFormat formatObject )
{
    formats_[formatObject.getFormatString()] = formatObject; 
}