Ejemplo n.º 1
0
  virtual nsIntRegion ComputeChangeInternal(NotifySubDocInvalidationFunc aCallback,
                                            bool& aGeometryChanged)
  {
    ImageLayer* imageLayer = static_cast<ImageLayer*>(mLayer.get());
    
    if (!imageLayer->GetVisibleRegion().IsEqual(mVisibleRegion)) {
      aGeometryChanged = true;
      IntRect result = NewTransformedBounds();
      result = result.Union(OldTransformedBounds());
      return result;
    }

    ImageContainer* container = imageLayer->GetContainer();
    if (mContainer != container ||
        mFilter != imageLayer->GetFilter() ||
        mScaleToSize != imageLayer->GetScaleToSize() ||
        mScaleMode != imageLayer->GetScaleMode()) {
      aGeometryChanged = true;

      if (mIsMask) {
        // Mask layers have an empty visible region, so we have to
        // use the image size instead.
        IntSize size = container->GetCurrentSize();
        IntRect rect(0, 0, size.width, size.height);
        return TransformRect(rect, mLayer->GetLocalTransform());

      } else {
        return NewTransformedBounds();
      }
    }

    return IntRect();
  }
Ejemplo n.º 2
0
// Divides the image into 2x2 pixel blocks and stores them as 16-dimensional
// vectors, (A, R, G, B) * 4.
static void vectorizeARGB(const ImageContainer& images, QVector<Vec<16>>& vectors) {
	for (int i=0; i<images.imageCount(); i++) {
		const QImage& img = images.getByIndex(i);

		// Ignore images smaller than this
		if (img.width() < MIN_MIPMAP_VQ || img.height() < MIN_MIPMAP_VQ)
			continue;

		for (int y=0; y<img.height(); y+=2) {
			for (int x=0; x<img.width(); x+=2) {
				Vec<16> vec;
				uint hash = 0;
				int offset = 0;
				for (int yy=y; yy<(y+2); yy++) {
					for (int xx=x; xx<(x+2); xx++) {
						QRgb pixel = img.pixel(xx, yy);
						argb2vec(pixel, vec, offset);
						hash = combineHash(pixel, hash);
						offset += 4;
					}
				}
				vec.setHash(hash);
				vectors.push_back(vec);
			}
		}
	}
}
Ejemplo n.º 3
0
NS_IMETHODIMP nsHTMLVideoElement::GetMozPaintedFrames(PRUint32 *aMozPaintedFrames)
{
  NS_ASSERTION(NS_IsMainThread(), "Should be on main thread.");
  ImageContainer* container = GetImageContainer();
  *aMozPaintedFrames = container ? container->GetPaintCount() : 0;
  return NS_OK;
}
Ejemplo n.º 4
0
GstFlowReturn GStreamerReader::AllocateVideoBufferFull(GstPad* aPad,
                                                       guint64 aOffset,
                                                       guint aSize,
                                                       GstCaps* aCaps,
                                                       GstBuffer** aBuf,
                                                       nsRefPtr<PlanarYCbCrImage>& aImage)
{
  /* allocate an image using the container */
  ImageContainer* container = mDecoder->GetImageContainer();
  if (container == nullptr) {
    return GST_FLOW_ERROR;
  }
  nsRefPtr<PlanarYCbCrImage> image =
    container->CreateImage(ImageFormat::PLANAR_YCBCR).downcast<PlanarYCbCrImage>();

  /* prepare a GstBuffer pointing to the underlying PlanarYCbCrImage buffer */
  GstBuffer* buf = GST_BUFFER(gst_moz_video_buffer_new());
  GST_BUFFER_SIZE(buf) = aSize;
  /* allocate the actual YUV buffer */
  GST_BUFFER_DATA(buf) = image->AllocateAndGetNewBuffer(aSize);

  aImage = image;

  /* create a GstMozVideoBufferData to hold the image */
  GstMozVideoBufferData* bufferdata = new GstMozVideoBufferData(image);

  /* Attach bufferdata to our GstMozVideoBuffer, it will take care to free it */
  gst_moz_video_buffer_set_data(GST_MOZ_VIDEO_BUFFER(buf), bufferdata);

  *aBuf = buf;
  return GST_FLOW_OK;
}
Ejemplo n.º 5
0
void writeUncompressedData(QDataStream& stream, const ImageContainer& images, int pixelFormat) {
	// Mipmap offset
	if (images.hasMipmaps()) {
		writeZeroes(stream, MIPMAP_OFFSET_16BPP);
	}

	// Texture data, from smallest to largest mipmap
	for (int i=0; i<images.imageCount(); i++) {
		const QImage& img = images.getByIndex(i);

		// The 1x1 mipmap level is a bit special for YUV textures. Since there's only
		// one pixel, it can't be saved as YUV422, so save it as RGB565 instead.
		if (img.width() == 1 && img.height() == 1 && pixelFormat == PIXELFORMAT_YUV422) {
			convertAndWriteTexel(stream, img.pixel(0, 0), PIXELFORMAT_RGB565, true);
			continue;
		}

		const Twiddler twiddler(img.width(), img.height());
		const int pixels = img.width() * img.height();

		// Write all texels for this mipmap level in twiddled order
		for (int j=0; j<pixels; j++) {
			const int index = twiddler.index(j);
			const int x = index % img.width();
			const int y = index / img.width();
			convertAndWriteTexel(stream, img.pixel(x, y), pixelFormat, true);
		}
	}
}
Ejemplo n.º 6
0
static void devectorizeARGB(const ImageContainer& srcImages, const QVector<Vec<16>>& vectors, const VectorQuantizer<16>& vq, int format, QVector<QImage>& indexedImages, QVector<quint64>& codebook) {
	int vindex = 0;

	for (int i=0; i<srcImages.imageCount(); i++) {
		const QSize size = srcImages.getByIndex(i).size();
		if (size.width() == 1 || size.height() == 1)
			continue;
		QImage img(size.width()/2, size.height()/2, QImage::Format_Indexed8);
		img.setColorCount(256);
		for (int y=0; y<img.height(); y++) {
			for (int x=0; x<img.width(); x++) {
				const Vec<16>& vec = vectors[vindex];
				int codeIndex = vq.findClosest(vec);
				img.setPixel(x, y, codeIndex);
				vindex++;
			}
		}
		indexedImages.push_back(img);
	}

	for (int i=0; i<vq.codeCount(); i++) {
		const Vec<16>& vec = vq.codeVector(i);
		QColor tl = QColor::fromRgbF(vec[1], vec[2], vec[3], vec[0]);
		QColor tr = QColor::fromRgbF(vec[5], vec[6], vec[7], vec[4]);
		QColor bl = QColor::fromRgbF(vec[9], vec[10], vec[11], vec[8]);
		QColor br = QColor::fromRgbF(vec[13], vec[14], vec[15], vec[12]);
		quint64 quad = packQuad(tl.rgba(), tr.rgba(), bl.rgba(), br.rgba(), format);
		codebook.push_back(quad);
	}
}
Ejemplo n.º 7
0
static void read_images(const PathContainer& paths, ImageContainer& imgs) {
	typedef typename PathContainer::const_iterator pc_iter;
	typedef typename ImageContainer::value_type img_t;

	for(pc_iter it = paths.begin(); it != paths.end(); ++it) {
		imgs.push_back( img_t() );
		MAGICK_WRAP( ImOp::read(*it).call(imgs.back()) );
	}
}
Ejemplo n.º 8
0
void
moz_gfx_memory_reset(MozGfxMemory *mem)
{
  if (mem->image)
    mem->image->Release();

  ImageContainer* container = ((MozGfxMemoryAllocator*) mem->memory.allocator)->reader->GetImageContainer();
  mem->image = reinterpret_cast<PlanarYCbCrImage*>(container->CreateImage(ImageFormat::PLANAR_YCBCR).take());
  mem->data = mem->image->AllocateAndGetNewBuffer(mem->memory.size);
}
Ejemplo n.º 9
0
// This function counts how many unique 2x2 16BPP pixel blocks there are in the image.
// If there are <= maxCodes, it puts the unique blocks in 'codebook' and 'indexedImages'
// will contain images that index the 'codebook' vector, resulting in quick "lossless"
// compression, if possible.
// It will keep counting blocks even if the block count exceeds maxCodes for the sole
// purpose of reporting it back to the user.
// Returns number of unique 2x2 16BPP pixel blocks in all images.
static int encodeLossless(const ImageContainer& images, int pixelFormat, QVector<QImage>& indexedImages, QVector<quint64>& codebook, int maxCodes) {
	QHash<quint64, int> uniqueQuads; // Quad <=> index

	for (int i=0; i<images.imageCount(); i++) {
		const QImage& img = images.getByIndex(i);

		// Ignore images smaller than this
		if (img.width() < MIN_MIPMAP_VQ || img.height() < MIN_MIPMAP_VQ)
			continue;

		QImage indexedImage(img.width() / 2, img.height() / 2, QImage::Format_Indexed8);
		indexedImage.setColorCount(256);

		for (int y=0; y<img.height(); y+=2) {
			for (int x=0; x<img.width(); x+=2) {
				QRgb tl = img.pixel(x + 0, y + 0);
				QRgb tr = img.pixel(x + 1, y + 0);
				QRgb bl = img.pixel(x + 0, y + 1);
				QRgb br = img.pixel(x + 1, y + 1);
				quint64 quad = packQuad(tl, tr, bl, br, pixelFormat);

				if (!uniqueQuads.contains(quad))
					uniqueQuads.insert(quad, uniqueQuads.size());

				if (uniqueQuads.size() <= maxCodes)
					indexedImage.setPixel(x / 2, y / 2, uniqueQuads.value(quad));
			}
		}

		// Only add the image if we haven't hit the code limit
		if (uniqueQuads.size() <= maxCodes) {
			indexedImages.push_back(indexedImage);
		}
	}

	if (uniqueQuads.size() <= maxCodes) {
		// This texture can be losslessly compressed.
		// Copy the unique quads over to the codebook.
		// indexedImages is already done.
		codebook.resize(uniqueQuads.size());
		for (auto it = uniqueQuads.cbegin(); it != uniqueQuads.cend(); ++it)
			codebook[it.value()] = it.key();
	} else {
		// This texture needs lossy compression
		indexedImages.clear();
	}

	return uniqueQuads.size();
}
Ejemplo n.º 10
0
MovementGraph::MovementGraph( ImageContainer& images ) : QWidget(nullptr), images(images) {
	//Create and add chart to widget
	auto plot = new QChart();
	auto view = new QChartView( plot );
	view->setRenderHint( QPainter::Antialiasing );
	setLayout( new QHBoxLayout( this ) );
	layout()->addWidget( view );
	layout()->setContentsMargins( 0, 0, 0, 0 );
	
	//Start adding lines
	auto moves = imagesMoves( images );
	for( auto frame : images.getFrames() ){
		auto line = new QLineSeries(); //TODO: is needed to be on the heap?
		
		FrameContainer container( images, frame );
		for( unsigned i=0; i<container.count(); ++i )
			addPoint( *line, container, i, moves );
		
		line->setColor( getColor( frame ) );
		plot->addSeries( line );
	}
	
	//Custimize chart visuals
	plot->setTitle( "Position of images" );
	plot->legend()->hide();
	plot->createDefaultAxes();
	//plot->setInteractions( QCP::iRangeDrag | QCP::iRangeZoom | QCP::iSelectPlottables );
	//TOOD: replace this functionality?
	
	resize( 640, 480 );
	show();
}
Ejemplo n.º 11
0
void writeCompressedData(QDataStream& stream, const ImageContainer& images, int pixelFormat) {
	QVector<QImage> indexedImages;
	QVector<quint64> codebook;

	const int numQuads = encodeLossless(images, pixelFormat, indexedImages, codebook, 256);

	qDebug() << "Source images contain" << numQuads << "unique quads";

	if (numQuads > 256) {
		if ((pixelFormat != PIXELFORMAT_ARGB1555) && (pixelFormat != PIXELFORMAT_ARGB4444)) {
			QVector<Vec<12>> vectors;
			VectorQuantizer<12> vq;
			vectorizeRGB(images, vectors);
			vq.compress(vectors, 256);
			devectorizeRGB(images, vectors, vq, pixelFormat, indexedImages, codebook);
		} else {
			QVector<Vec<16>> vectors;
			VectorQuantizer<16> vq;
			vectorizeARGB(images, vectors);
			vq.compress(vectors, 256);
			devectorizeARGB(images, vectors, vq, pixelFormat, indexedImages, codebook);
		}
	}

	// Build the codebook
	quint16 codes[256 * 4];
	memset(codes, 0, 2048);
	for (int i=0; i<codebook.size(); i++) {
		const quint64& quad = codebook[i];
		codes[i * 4 + 0] = (quint16)((quad >> 48) & 0xFFFF);
		codes[i * 4 + 1] = (quint16)((quad >> 16) & 0xFFFF);
		codes[i * 4 + 2] = (quint16)((quad >> 32) & 0xFFFF);
		codes[i * 4 + 3] = (quint16)((quad >>  0) & 0xFFFF);
	}

	// Write the codebook
	for (int i=0; i<1024; i++)
		stream << codes[i];

	// Write the 1x1 mipmap level
	if (images.imageCount() > 1)
		writeZeroes(stream, 1);

	// Write all mipmap levels
	for (int i=0; i<indexedImages.size(); i++) {
		const QImage& img = indexedImages[i];
		const Twiddler twiddler(img.width(), img.height());
		const int pixels = img.width() * img.height();

		for (int j=0; j<pixels; j++) {
			const int index = twiddler.index(j);
			const int x = index % img.width();
			const int y = index / img.width();
			stream << (quint8)img.pixelIndex(x, y);
		}
	}
}
Ejemplo n.º 12
0
void FrameAligner::align( class AContainer& container, class AProcessWatcher* watcher ) const{
	auto frames = container.getFrames();
	auto base_point = container.minPoint();
	
	ImageContainer images;
	for( auto& frame : frames ){
		FrameContainer current( container, frame );
		images.addImage( FloatRender( 1.0, 1.0 ).render( current ) );
		//TODO: this should be a sub-pixel precision render!
	}
	
	//TODO: also show progress for this!
	RecursiveAligner( settings, 1.0 ).align( images ); //TODO: make configurable
	
	ProgressWrapper( watcher ).loopAll( frames.size(), [&](int i){
			FrameContainer current( container, frames[i] );
			auto aligned_offset = base_point - current.minPoint();
			current.offsetAll( aligned_offset + (images.pos(i) - images.minPoint()) );
		} );
}
Ejemplo n.º 13
0
void convert16BPP(QDataStream& stream, const ImageContainer& images, int textureType) {
	const int pixelFormat = (textureType >> PIXELFORMAT_SHIFT) & PIXELFORMAT_MASK;

	if (textureType & FLAG_STRIDED) {
		writeStrideData(stream, images.getByIndex(0), pixelFormat);
	} else if (textureType & FLAG_COMPRESSED) {
		writeCompressedData(stream, images, pixelFormat);
	} else {
		writeUncompressedData(stream, images, pixelFormat);
	}
}
GstFlowReturn GStreamerReader::AllocateVideoBufferFull(GstPad* aPad,
                                                       guint64 aOffset,
                                                       guint aSize,
                                                       GstCaps* aCaps,
                                                       GstBuffer** aBuf,
                                                       nsRefPtr<PlanarYCbCrImage>& aImage)
{
  /* allocate an image using the container */
  ImageContainer* container = mDecoder->GetImageContainer();
  ImageFormat format = PLANAR_YCBCR;
  PlanarYCbCrImage* img = reinterpret_cast<PlanarYCbCrImage*>(container->CreateImage(&format, 1).get());
  nsRefPtr<PlanarYCbCrImage> image = dont_AddRef(img);

  /* prepare a GstBuffer pointing to the underlying PlanarYCbCrImage buffer */
  GstBuffer* buf = gst_buffer_new();
  GST_BUFFER_SIZE(buf) = aSize;
  /* allocate the actual YUV buffer */
  GST_BUFFER_DATA(buf) = image->AllocateAndGetNewBuffer(aSize);

  aImage = image;

#if GST_VERSION_MICRO >= 36
  /* create a GBoxed handle to hold the image */
  BufferData* data = new BufferData(image);

  /* store it in a GValue so we can put it in a GstStructure */
  GValue value = {0,};
  g_value_init(&value, buffer_data_get_type());
  g_value_take_boxed(&value, data);

  /* store the value in the structure */
  GstStructure* structure = gst_structure_new("moz-reader-data", nullptr);
  gst_structure_take_value(structure, "image", &value);

  /* and attach the structure to the buffer */
  gst_buffer_set_qdata(buf, g_quark_from_string("moz-reader-data"), structure);
#endif

  *aBuf = buf;
  return GST_FLOW_OK;
}
Ejemplo n.º 15
0
void ImageLoader::loadImages( QStringList files, ImageContainer& container, Deteleciner& detelecine, int alpha_mask, AProcessWatcher* watcher ){
	auto cache = loadImages( files, watcher );
	container.prepareAdds( files.count() );
	
	for( unsigned i=0; i<cache.size(); i++ ){ //TODO: Show process
		auto file = files[i];
		
		if( QFileInfo( file ).completeSuffix() == "xml.overmix" )
			ImageContainerSaver::load( container, file );
		else{
			auto& img = cache[i];
			
			//De-telecine
			if( detelecine.isActive() ){
				img = detelecine.process( img );
				file = ""; //The result might be a combination of several files
			}
			if( !img.is_valid() )
				continue;
			
			container.addImage( std::move( img ), alpha_mask, -1, file );
		}
	}
}
Ejemplo n.º 16
0
MovementGraph::MovementGraph( ImageContainer& images ) : QWidget(nullptr), images(images) {
	auto plot = new QCustomPlot( this );
	setLayout( new QHBoxLayout( this ) );
	layout()->addWidget( plot );
	layout()->setContentsMargins( 0, 0, 0, 0 );
	
	auto moves = imagesMoves( images );
	
	for( auto frame : images.getFrames() ){
		Line line;
		FrameContainer container( images, frame );
		for( unsigned i=0; i<container.count(); ++i )
			addPoint( line, container, i, moves );
		line.addToPlot( *plot, getColor( frame ) );
	}
	
	setLimit( *plot, images, moves );
	plot->setInteractions( QCP::iRangeDrag | QCP::iRangeZoom | QCP::iSelectPlottables );
	
	resize( 640, 480 );
	show();
}