예제 #1
0
void PNGImageDecoder::decode(bool onlySize)
{
    if (failed())
        return;

    if (!m_reader)
        m_reader = adoptPtr(new PNGImageReader(this));

    // If we couldn't decode the image but we've received all the data, decoding
    // has failed.
    if (!m_reader->decode(*m_data, onlySize) && isAllDataReceived())
        setFailed();
    // If we're done decoding the image, we don't need the PNGImageReader
    // anymore.  (If we failed, |m_reader| has already been cleared.)
    else if (isComplete())
        m_reader.clear();
}
예제 #2
0
void BMPImageDecoder::decode(bool onlySize)
{
#if PLATFORM(CHROMIUM)
    TRACE_EVENT("BMPImageDecoder::decode", this, 0);
#endif
    if (failed())
        return;

    // If we couldn't decode the image but we've received all the data, decoding
    // has failed.
    if (!decodeHelper(onlySize) && isAllDataReceived())
        setFailed();
    // If we're done decoding the image, we don't need the BMPImageReader
    // anymore.  (If we failed, |m_reader| has already been cleared.)
    else if (!m_frameBufferCache.isEmpty() && (m_frameBufferCache.first().status() == ImageFrame::FrameComplete))
        m_reader.clear();
}
예제 #3
0
bool ICOImageDecoder::processDirectory() {
  // Read directory.
  ASSERT(!m_decodedOffset);
  if (m_data->size() < sizeOfDirectory)
    return false;
  const uint16_t fileType = readUint16(2);
  m_dirEntriesCount = readUint16(4);
  m_decodedOffset = sizeOfDirectory;

  // See if this is an icon filetype we understand, and make sure we have at
  // least one entry in the directory.
  if (((fileType != ICON) && (fileType != CURSOR)) || (!m_dirEntriesCount))
    return setFailed();

  m_fileType = static_cast<FileType>(fileType);
  return true;
}
예제 #4
0
void JPEGImageDecoder::decode(bool onlySize)
{
    if (failed())
        return;

    if (!m_reader)
        m_reader = adoptPtr(new JPEGImageReader(this));

    // If we couldn't decode the image but we've received all the data, decoding
    // has failed.
    if (!m_reader->decode(*m_data, onlySize) && isAllDataReceived())
        setFailed();
    // If we're done decoding the image, we don't need the JPEGImageReader
    // anymore.  (If we failed, |m_reader| has already been cleared.)
    else if (!m_frameBufferCache.isEmpty() && (m_frameBufferCache[0].status() == ImageFrame::FrameComplete))
        m_reader.clear();
}
예제 #5
0
void PNGImageDecoder::decode(bool onlySize)
{
    if (failed())
        return;

    if (!m_reader)
        m_reader = adoptPtr(new PNGImageReader(this));

    // If we couldn't decode the image but have received all the data, decoding
    // has failed.
    if (!m_reader->decode(*m_data, onlySize) && isAllDataReceived())
        setFailed();

    // If decoding is done or failed, we don't need the PNGImageReader anymore.
    if (isComplete(this) || failed())
        m_reader.clear();
}
예제 #6
0
void ICOImageDecoder::decode(size_t index, bool onlySize)
{
    if (failed())
        return;

    // If we couldn't decode the image but we've received all the data, decoding
    // has failed.
    if ((!decodeDirectory() || (!onlySize && !decodeAtIndex(index))) && isAllDataReceived())
        setFailed();
    // If we're done decoding this frame, we don't need the BMPImageReader or
    // PNGImageDecoder anymore.  (If we failed, these have already been
    // cleared.)
    else if ((m_frameBufferCache.size() > index) && (m_frameBufferCache[index].status() == ImageFrame::FrameComplete)) {
        m_bmpReaders[index] = nullptr;
        m_pngDecoders[index] = nullptr;
    }
}
CLSSIS3820ScalerDarkCurrentMeasurementAction::CLSSIS3820ScalerDarkCurrentMeasurementAction(CLSSIS3820ScalerDarkCurrentMeasurementActionInfo *info, QObject *parent) :
    AMListAction3(info, AMListAction3::Sequential, parent)
{
	CLSSIS3820Scaler *scaler = CLSBeamline::clsBeamline()->scaler();
	double secondsDwell = scalerDarkCurrentMeasurementActionInfo()->dwellTime();

	connect( this, SIGNAL(failed()), this, SLOT(onActionFailed()) );

	if (scaler && scaler->isConnected() && secondsDwell > 0) {

		// pre-measurement settings.
		double oldDwell = scaler->dwellTime();

		// first turn off beam.
//		addSubAction(AMBeamline::bl()->createTurnOffBeamActions());

		// set the scaler's dwell time to new time.
		addSubAction(scaler->createDwellTimeAction3(secondsDwell));

		// initiate a scaler measurement and wait until it is complete.
		addSubAction(scaler->createStartAction3(true));
		addSubAction(scaler->createWaitForDwellFinishedAction(secondsDwell + 5.0));

		// notify attached and able scaler channel detectors that the latest measurement was a dark current measurement.
		AMListAction3 *notifyChannelDetectors = new AMListAction3(new AMListActionInfo3("Set last measurement as dark current measurement", "Set last measurement as dark current measurement"));

		for (int i = 0; i < scaler->channels().count(); i++) {
			CLSSIS3820ScalerChannel *channel = scaler->channelAt(i);

			if (channel && channel->isEnabled() && channel->detector() && channel->detector()->canDoDarkCurrentCorrection()) {
				notifyChannelDetectors->addSubAction(channel->detector()->createSetLastMeasurementAsDarkCurrentAction());
			}
		}

		addSubAction(notifyChannelDetectors);

		// reset settings to pre-measurement conditions.
		addSubAction(scaler->createDwellTimeAction3(oldDwell));

	} else {
		AMErrorMon::alert(this, CLSSIS3820SCALERDARKCURRENTMEASUREMENTACTION_SCALER_NOT_VALID, "Failed to complete dark current measurement--scaler not valid.");
		setFailed();
	}

}
예제 #8
0
ImageFrame* WEBPImageDecoder::frameBufferAtIndex(size_t index)
{
    if (index >= frameCount())
        return 0;

    ImageFrame& frame = m_frameBufferCache[index];
    if (frame.status() == ImageFrame::FrameComplete)
        return &frame;

    Vector<size_t> framesToDecode;
    size_t frameToDecode = index;
    do {
        framesToDecode.append(frameToDecode);
        frameToDecode = m_frameBufferCache[frameToDecode].requiredPreviousFrameIndex();
    } while (frameToDecode != kNotFound && m_frameBufferCache[frameToDecode].status() != ImageFrame::FrameComplete);

    ASSERT(m_demux);
    for (size_t i = framesToDecode.size(); i > 0; --i) {
        size_t frameIndex = framesToDecode[i - 1];
        if ((m_formatFlags & ANIMATION_FLAG) && !initFrameBuffer(frameIndex))
            return 0;
        WebPIterator webpFrame;
        if (!WebPDemuxGetFrame(m_demux, frameIndex + 1, &webpFrame))
            return 0;
        PlatformInstrumentation::willDecodeImage("WEBP");
        decode(webpFrame.fragment.bytes, webpFrame.fragment.size, false, frameIndex);
        PlatformInstrumentation::didDecodeImage();
        WebPDemuxReleaseIterator(&webpFrame);

        if (failed())
            return 0;

        // We need more data to continue decoding.
        if (m_frameBufferCache[frameIndex].status() != ImageFrame::FrameComplete)
            break;
    }

    // It is also a fatal error if all data is received and we have decoded all
    // frames available but the file is truncated.
    if (index >= m_frameBufferCache.size() - 1 && isAllDataReceived() && m_demux && m_demuxState != WEBP_DEMUX_DONE)
        setFailed();

    frame.notifyBitmapIfPixelsChanged();
    return &frame;
}
예제 #9
0
void VESPERSSetStringAction::onValueChanged()
{
	const AMProcessVariable *pv = ((AMPVControl *)(control_))->readPV();
	QString string = VESPERS::pvToString(pv);
	QString text = ((VESPERSSetStringActionInfo *)info())->text();

	if (string == text){

		AMErrorMon::debug(this, VESPERSSETSTRINGACTION_SUCCEEDED, QString("[SUCCEEDED] Set string.  Desired: %1 Result: %2.").arg(text).arg(string));
		setSucceeded();
	}

	else{

		AMErrorMon::debug(this, VESPERSSETSTRINGACTION_FAILED, QString("[FAILED] Set string.  Desired: %1 Result: %2.").arg(text).arg(string));
		setFailed();
	}
}
예제 #10
0
// TODO: Do not increment the m_frameBufferCache.size() by one but more than one
void ImageDecoderQt::forceLoadEverything()
{
    int imageCount = 0;

    do {
        m_frameBufferCache.resize(++imageCount);
    } while (internalHandleCurrentImage(imageCount - 1));

    // If we failed decoding the first image we actually
    // have no images and need to set the failed bit.
    // Otherwise, we want to forget about
    // the last attempt to decode a image.
    m_frameBufferCache.resize(imageCount - 1);
    for (size_t i = 0; i < m_frameBufferCache.size(); ++i)
        m_frameBufferCache[i].setPremultiplyAlpha(m_premultiplyAlpha);
    if (imageCount == 1)
      setFailed();
}
예제 #11
0
void AMScanAction::onControllerInitialized()
{
	if (state() == AMAction3::Skipping){

		disconnect(controller_, SIGNAL(cancelled()), this, SLOT(onControllerCancelled()));
		connect(controller_, SIGNAL(cancelled()), this, SLOT(onControllerSucceeded()));
		controller_->cancel();
	}

	else if (!controller_->start()){

		AMErrorMon::alert(this, AMSCANACTION_CANT_START_CONTROLLER, "Could not start the scan controller.");
		setFailed();
	}

	else {
		setStarted();
	}
}
예제 #12
0
void ImageDecoderQt::internalDecodeSize()
{
    ASSERT(m_reader);

    // If we have a QSize() something failed
    QSize size = m_reader->size();
    if (size.isEmpty()) {
        setFailed();
        return clearPointers();
    }

    setSize(size.width(), size.height());

    // We don't need the tables set by prepareScaleDataIfNecessary,
    // but their dimensions are used by ImageDecoder::scaledSize().
    prepareScaleDataIfNecessary();
    if (m_scaled)
        m_reader->setScaledSize(scaledSize());
}
예제 #13
0
UString URPCObject::processMessage(URPCEnvelope& envelope, bool& bContainsFault)
{
   U_TRACE(0, "URPCObject::processMessage(%p,%p)", &envelope, &bContainsFault)

   U_INTERNAL_ASSERT_POINTER(URPCMethod::encoder)

   UString retval;

   // Iterate over the list of methods

   URPCMethod* method = find(envelope.getMethodName());

   if (method == 0)
      {
      // Return object not found error. This would be a Client fault

      setFailed();

      URPCMethod::pFault->setFaultReason(U_CONSTANT_TO_PARAM("The requested method does not exist on this server"));

      bContainsFault = true;
      retval         = URPCMethod::encoder->encodeFault(URPCMethod::pFault);
      }
   else
      {
      UString ns = envelope.getNsName();

      U_INTERNAL_DUMP("envelope.nsName = %V", ns.rep)

      // check the name of namespace qualified element information (gSOAP)

      if (ns.empty()) ns = method->getNamespaces();
      if (ns.empty()) ns = *UString::str_ns;

      bContainsFault = (method->execute(envelope) == false);
      retval         = URPCMethod::encoder->encodeMethodResponse(*method, ns);
      }

   U_RETURN_STRING(retval);
}
예제 #14
0
bool ICOImageDecoder::processDirectoryEntries() {
  // Read directory entries.
  ASSERT(m_decodedOffset == sizeOfDirectory);
  if ((m_decodedOffset > m_data->size()) ||
      ((m_data->size() - m_decodedOffset) <
       (m_dirEntriesCount * sizeOfDirEntry)))
    return false;

  // Enlarge member vectors to hold all the entries.
  m_dirEntries.resize(m_dirEntriesCount);
  m_bmpReaders.resize(m_dirEntriesCount);
  m_pngDecoders.resize(m_dirEntriesCount);

  for (IconDirectoryEntries::iterator i(m_dirEntries.begin());
       i != m_dirEntries.end(); ++i)
    *i = readDirectoryEntry();  // Updates m_decodedOffset.

  // Make sure the specified image offsets are past the end of the directory
  // entries.
  for (IconDirectoryEntries::iterator i(m_dirEntries.begin());
       i != m_dirEntries.end(); ++i) {
    if (i->m_imageOffset < m_decodedOffset)
      return setFailed();
  }

  DEFINE_THREAD_SAFE_STATIC_LOCAL(
      blink::CustomCountHistogram, dimensionsLocationHistogram,
      new blink::CustomCountHistogram(
          "Blink.DecodedImage.EffectiveDimensionsLocation.ICO", 0, 50000, 50));
  dimensionsLocationHistogram.count(m_decodedOffset - 1);

  // Arrange frames in decreasing quality order.
  std::sort(m_dirEntries.begin(), m_dirEntries.end(), compareEntries);

  // The image size is the size of the largest entry.
  const IconDirectoryEntry& dirEntry = m_dirEntries.first();
  // Technically, this next call shouldn't be able to fail, since the width
  // and height here are each <= 256, and |m_frameSize| is empty.
  return setSize(dirEntry.m_size.width(), dirEntry.m_size.height());
}
예제 #15
0
void EglWaylandBackend::init()
{
    if (!initRenderingContext()) {
        setFailed("Could not initialize rendering context");
        return;
    }

    initEGL();
    GLPlatform *glPlatform = GLPlatform::instance();
    glPlatform->detect(EglPlatformInterface);
    glPlatform->printResults();
    initGL(EglPlatformInterface);

    setSupportsBufferAge(false);

    if (hasGLExtension("EGL_EXT_buffer_age")) {
        const QByteArray useBufferAge = qgetenv("KWIN_USE_BUFFER_AGE");

        if (useBufferAge != "0")
            setSupportsBufferAge(true);
    }
}
예제 #16
0
void AMDetectorReadAction::startImplementation(){
	// If you still don't have a detector, check the exposed detectors one last time.
	//if(!detector_)
	//	detector_ = AMBeamline::bl()->exposedDetectorByInfo(*(detectorReadInfo()->detectorInfo()));
	if(!detector_ && AMBeamlineSupport::beamlineDetectorAPI())
		detector_ = AMBeamlineSupport::beamlineDetectorAPI()->exposedDetectorByInfo(*(detectorReadInfo()->detectorInfo()));

	if(!detector_) {
		AMErrorMon::alert(this,
						  AMDETECTORREADACTION_NO_VALID_DETECTOR,
						  QString("There was an error reading the detector '%1', because the detector was not found. Please report this problem to the Acquaman developers.").arg(detectorReadInfo()->name()));
		setFailed();
		return;
	}

	if(detector_->readMethod() == AMDetectorDefinitions::WaitRead){
		// connect to detector initialization signals
		connect(detector_, SIGNAL(newValuesAvailable()), this, SLOT(onDetectorNewValuesAvailable()));
	}
	else
		internalSetSucceeded();
}
예제 #17
0
void GIFImageDecoder::parse(GIFParseQuery query)
{
    if (failed())
        return;

    if (!m_reader) {
        m_reader = adoptPtr(new GIFImageReader(this));
        m_reader->setData(m_data);
    }

    if (!m_reader->parse(query)) {
        setFailed();
        return;
    }

    const size_t oldSize = m_frameBufferCache.size();
    m_frameBufferCache.resize(m_reader->imagesCount());

    for (size_t i = oldSize; i < m_reader->imagesCount(); ++i) {
        ImageFrame& buffer = m_frameBufferCache[i];
        const GIFFrameContext* frameContext = m_reader->frameContext(i);
        buffer.setPremultiplyAlpha(m_premultiplyAlpha);
        buffer.setRequiredPreviousFrameIndex(findRequiredPreviousFrame(i));
        buffer.setDuration(frameContext->delayTime);
        buffer.setDisposalMethod(frameContext->disposalMethod);

        // Initialize the frame rect in our buffer.
        IntRect frameRect(frameContext->xOffset, frameContext->yOffset, frameContext->width, frameContext->height);

        // Make sure the frameRect doesn't extend outside the buffer.
        if (frameRect.maxX() > size().width())
            frameRect.setWidth(size().width() - frameContext->xOffset);
        if (frameRect.maxY() > size().height())
            frameRect.setHeight(size().height() - frameContext->yOffset);

        buffer.setOriginalFrameRect(frameRect);
    }
}
예제 #18
0
void ICOImageDecoder::decode(size_t index, bool onlySize) {
  if (failed())
    return;

  // Defensively clear the FastSharedBufferReader's cache, as another caller
  // may have called SharedBuffer::mergeSegmentsIntoBuffer().
  m_fastReader.clearCache();

  // If we couldn't decode the image but we've received all the data, decoding
  // has failed.
  if ((!decodeDirectory() || (!onlySize && !decodeAtIndex(index))) &&
      isAllDataReceived()) {
    setFailed();
    // If we're done decoding this frame, we don't need the BMPImageReader or
    // PNGImageDecoder anymore.  (If we failed, these have already been
    // cleared.)
  } else if ((m_frameBufferCache.size() > index) &&
             (m_frameBufferCache[index].getStatus() ==
              ImageFrame::FrameComplete)) {
    m_bmpReaders[index].reset();
    m_pngDecoders[index].reset();
  }
}
예제 #19
0
void ICOImageDecoder::decode(size_t index, bool onlySize)
{
    if (failed())
        return;

    // If we couldn't decode the image but we've received all the data, decoding
    // has failed.
    if ((!decodeDirectory() || (!onlySize && !decodeAtIndex(index))) && isAllDataReceived())
        setFailed();
    // If we're done decoding this frame, we don't need the BMPImageReader or
    // PNGImageDecoder anymore.  (If we failed, these have already been
    // cleared.)
    else if ((m_frameBufferCache.size() > index) && m_frameBufferCache[index].isComplete()) {
        m_bmpReaders[index] = nullptr;
        m_pngDecoders[index] = nullptr;
    }

    if (m_frameBufferCache.isEmpty())
        m_frameBufferCache.resize(m_dirEntries.size());
    // CAUTION: We must not resize m_frameBufferCache again after this, as
    // decodeAtIndex() may give a BMPImageReader a pointer to one of the
    // entries.
}
예제 #20
0
bool ICOImageDecoder::processDirectoryEntries()
{
    // Read directory entries.
    ASSERT(m_decodedOffset == sizeOfDirectory);
    if ((m_decodedOffset > m_data->size()) || ((m_data->size() - m_decodedOffset) < (m_dirEntries.size() * sizeOfDirEntry)))
        return false;
    for (IconDirectoryEntries::iterator i(m_dirEntries.begin()); i != m_dirEntries.end(); ++i)
        *i = readDirectoryEntry();  // Updates m_decodedOffset.

    // Make sure the specified image offsets are past the end of the directory
    // entries.
    for (IconDirectoryEntries::iterator i(m_dirEntries.begin()); i != m_dirEntries.end(); ++i) {
        if (i->m_imageOffset < m_decodedOffset)
            return setFailed();
    }

    // Arrange frames in decreasing quality order.
    std::sort(m_dirEntries.begin(), m_dirEntries.end(), compareEntries);

    // The image size is the size of the largest entry.
    const IconDirectoryEntry& dirEntry = m_dirEntries.first();
    setSize(dirEntry.m_size.width(), dirEntry.m_size.height());
    return true;
}
예제 #21
0
void KJavaApplet::create( )
{
    if (  !context->create( this ) )
        setFailed();
    d->reallyExists = true;
}
예제 #22
0
bool WEBPImageDecoder::decode(bool onlySize)
{
    if (failed())
        return false;

    const uint8_t* dataBytes = reinterpret_cast<const uint8_t*>(m_data->data());
    const size_t dataSize = m_data->size();

    if (!ImageDecoder::isSizeAvailable()) {
        static const size_t imageHeaderSize = 30;
        if (dataSize < imageHeaderSize)
            return false;
        int width, height;
#ifdef QCMS_WEBP_COLOR_CORRECTION
        WebPData inputData = { dataBytes, dataSize };
        WebPDemuxState state;
        WebPDemuxer* demuxer = WebPDemuxPartial(&inputData, &state);
        if (!demuxer)
            return setFailed();

        width = WebPDemuxGetI(demuxer, WEBP_FF_CANVAS_WIDTH);
        height = WebPDemuxGetI(demuxer, WEBP_FF_CANVAS_HEIGHT);
        m_formatFlags = WebPDemuxGetI(demuxer, WEBP_FF_FORMAT_FLAGS);
        m_hasAlpha = !!(m_formatFlags & ALPHA_FLAG);

        WebPDemuxDelete(demuxer);
        if (state <= WEBP_DEMUX_PARSING_HEADER)
            return false;
#elif (WEBP_DECODER_ABI_VERSION >= 0x0163)
        WebPBitstreamFeatures features;
        if (WebPGetFeatures(dataBytes, dataSize, &features) != VP8_STATUS_OK)
            return setFailed();
        width = features.width;
        height = features.height;
        m_hasAlpha = features.has_alpha;
#else
        // Earlier version won't be able to display WebP files with alpha.
        if (!WebPGetInfo(dataBytes, dataSize, &width, &height))
            return setFailed();
        m_hasAlpha = false;
#endif
        if (!setSize(width, height))
            return setFailed();
    }

    ASSERT(ImageDecoder::isSizeAvailable());
    if (onlySize)
        return true;

    ASSERT(!m_frameBufferCache.isEmpty());
    ImageFrame& buffer = m_frameBufferCache[0];
    ASSERT(buffer.status() != ImageFrame::FrameComplete);

    if (buffer.status() == ImageFrame::FrameEmpty) {
        if (!buffer.setSize(size().width(), size().height()))
            return setFailed();
        buffer.setStatus(ImageFrame::FramePartial);
        buffer.setHasAlpha(m_hasAlpha);
        buffer.setOriginalFrameRect(IntRect(IntPoint(), size()));
    }

    if (!m_decoder) {
        WEBP_CSP_MODE mode = outputMode(m_hasAlpha);
        if (!m_premultiplyAlpha)
            mode = outputMode(false);
        if ((m_formatFlags & ICCP_FLAG) && !ignoresGammaAndColorProfile())
            mode = MODE_RGBA; // Decode to RGBA for input to libqcms.
        int rowStride = size().width() * sizeof(ImageFrame::PixelData);
        uint8_t* output = reinterpret_cast<uint8_t*>(buffer.getAddr(0, 0));
        int outputSize = size().height() * rowStride;
        m_decoder = WebPINewRGB(mode, output, outputSize, rowStride);
        if (!m_decoder)
            return setFailed();
    }

    switch (WebPIUpdate(m_decoder, dataBytes, dataSize)) {
    case VP8_STATUS_OK:
        if ((m_formatFlags & ICCP_FLAG) && !ignoresGammaAndColorProfile()) 
            applyColorProfile(dataBytes, dataSize, buffer);
        buffer.setStatus(ImageFrame::FrameComplete);
        clear();
        return true;
    case VP8_STATUS_SUSPENDED:
        if ((m_formatFlags & ICCP_FLAG) && !ignoresGammaAndColorProfile()) 
            applyColorProfile(dataBytes, dataSize, buffer);
        return false;
    default:
        clear();                         
        return setFailed();
    }
}
예제 #23
0
bool BMPImageReader::processRLEData()
{
    if (m_decodedOffset > m_data->size())
        return false;

    // RLE decoding is poorly specified.  Two main problems:
    // (1) Are EOL markers necessary?  What happens when we have too many
    //     pixels for one row?
    //     http://www.fileformat.info/format/bmp/egff.htm says extra pixels
    //     should wrap to the next line.  Real BMPs I've encountered seem to
    //     instead expect extra pixels to be ignored until the EOL marker is
    //     seen, although this has only happened in a few cases and I suspect
    //     those BMPs may be invalid.  So we only change lines on EOL (or Delta
    //     with dy > 0), and fail in most cases when pixels extend past the end
    //     of the line.
    // (2) When Delta, EOL, or EOF are seen, what happens to the "skipped"
    //     pixels?
    //     http://www.daubnet.com/formats/BMP.html says these should be filled
    //     with color 0.  However, the "do nothing" and "don't care" comments
    //     of other references suggest leaving these alone, i.e. letting them
    //     be transparent to the background behind the image.  This seems to
    //     match how MSPAINT treats BMPs, so we do that.  Note that when we
    //     actually skip pixels for a case like this, we need to note on the
    //     framebuffer that we have alpha.

    // Impossible to decode row-at-a-time, so just do things as a stream of
    // bytes.
    while (true) {
        // Every entry takes at least two bytes; bail if there isn't enough
        // data.
        if ((m_data->size() - m_decodedOffset) < 2)
            return false;

        // For every entry except EOF, we'd better not have reached the end of
        // the image.
        const uint8_t count = m_data->data()[m_decodedOffset];
        const uint8_t code = m_data->data()[m_decodedOffset + 1];
        if ((count || (code != 1)) && pastEndOfImage(0))
            return setFailed();

        // Decode.
        if (count == 0) {
            switch (code) {
            case 0:  // Magic token: EOL
                // Skip any remaining pixels in this row.
                if (m_coord.x() < m_parent->size().width())
                    m_buffer->setHasAlpha(true);
                moveBufferToNextRow();

                m_decodedOffset += 2;
                break;

            case 1:  // Magic token: EOF
                // Skip any remaining pixels in the image.
                if ((m_coord.x() < m_parent->size().width()) || (m_isTopDown ? (m_coord.y() < (m_parent->size().height() - 1)) : (m_coord.y() > 0)))
                    m_buffer->setHasAlpha(true);
                return true;

            case 2: {  // Magic token: Delta
                // The next two bytes specify dx and dy.  Bail if there isn't
                // enough data.
                if ((m_data->size() - m_decodedOffset) < 4)
                    return false;

                // Fail if this takes us past the end of the desired row or
                // past the end of the image.
                const uint8_t dx = m_data->data()[m_decodedOffset + 2];
                const uint8_t dy = m_data->data()[m_decodedOffset + 3];
                if (dx || dy)
                    m_buffer->setHasAlpha(true);
                if (((m_coord.x() + dx) > m_parent->size().width()) || pastEndOfImage(dy))
                    return setFailed();

                // Skip intervening pixels.
                m_coord.move(dx, m_isTopDown ? dy : -dy);

                m_decodedOffset += 4;
                break;
            }

            default:  // Absolute mode
                // |code| pixels specified as in BI_RGB, zero-padded at the end
                // to a multiple of 16 bits.
                // Because processNonRLEData() expects m_decodedOffset to
                // point to the beginning of the pixel data, bump it past
                // the escape bytes and then reset if decoding failed.
                m_decodedOffset += 2;
                if (!processNonRLEData(true, code)) {
                    m_decodedOffset -= 2;
                    return false;
                }
                break;
            }
        } else {  // Encoded mode
            // The following color data is repeated for |count| total pixels.
            // Strangely, some BMPs seem to specify excessively large counts
            // here; ignore pixels past the end of the row.
            const int endX = std::min(m_coord.x() + count, m_parent->size().width());

            if (m_infoHeader.biCompression == RLE24) {
                // Bail if there isn't enough data.
                if ((m_data->size() - m_decodedOffset) < 4)
                    return false;

                // One BGR triple that we copy |count| times.
                fillRGBA(endX, m_data->data()[m_decodedOffset + 3], m_data->data()[m_decodedOffset + 2], code, 0xff);
                m_decodedOffset += 4;
            } else {
                // RLE8 has one color index that gets repeated; RLE4 has two
                // color indexes in the upper and lower 4 bits of the byte,
                // which are alternated.
                size_t colorIndexes[2] = {code, code};
                if (m_infoHeader.biCompression == RLE4) {
                    colorIndexes[0] = (colorIndexes[0] >> 4) & 0xf;
                    colorIndexes[1] &= 0xf;
                }
                if ((colorIndexes[0] >= m_infoHeader.biClrUsed) || (colorIndexes[1] >= m_infoHeader.biClrUsed))
                    return setFailed();
                for (int which = 0; m_coord.x() < endX; ) {
                    setI(colorIndexes[which]);
                    which = !which;
                }

                m_decodedOffset += 2;
            }
        }
    }
예제 #24
0
bool BMPImageReader::processBitmasks()
{
    // Create m_bitMasks[] values.
    if (m_infoHeader.biCompression != BITFIELDS) {
        // The format doesn't actually use bitmasks.  To simplify the decode
        // logic later, create bitmasks for the RGB data.  For Windows V4+,
        // this overwrites the masks we read from the header, which are
        // supposed to be ignored in non-BITFIELDS cases.
        // 16 bits:    MSB <-                     xRRRRRGG GGGBBBBB -> LSB
        // 24/32 bits: MSB <- [AAAAAAAA] RRRRRRRR GGGGGGGG BBBBBBBB -> LSB
        const int numBits = (m_infoHeader.biBitCount == 16) ? 5 : 8;
        for (int i = 0; i <= 2; ++i)
            m_bitMasks[i] = ((static_cast<uint32_t>(1) << (numBits * (3 - i))) - 1) ^ ((static_cast<uint32_t>(1) << (numBits * (2 - i))) - 1);

        // For Windows V4+ 32-bit RGB, don't overwrite the alpha mask from the
        // header (see note in readInfoHeader()).
        if (m_infoHeader.biBitCount < 32)
            m_bitMasks[3] = 0;
        else if (!isWindowsV4Plus())
            m_bitMasks[3] = static_cast<uint32_t>(0xff000000);
    } else if (!isWindowsV4Plus()) {
        // For Windows V4+ BITFIELDS mode bitmaps, this was already done when
        // we read the info header.

        // Fail if we don't have enough file space for the bitmasks.
        static const size_t SIZEOF_BITMASKS = 12;
        if (((m_headerOffset + m_infoHeader.biSize + SIZEOF_BITMASKS) < (m_headerOffset + m_infoHeader.biSize)) || (m_imgDataOffset && (m_imgDataOffset < (m_headerOffset + m_infoHeader.biSize + SIZEOF_BITMASKS))))
            return setFailed();

        // Read bitmasks.
        if ((m_data->size() - m_decodedOffset) < SIZEOF_BITMASKS)
            return false;
        m_bitMasks[0] = readUint32(0);
        m_bitMasks[1] = readUint32(4);
        m_bitMasks[2] = readUint32(8);
        // No alpha in anything other than Windows V4+.
        m_bitMasks[3] = 0;

        m_decodedOffset += SIZEOF_BITMASKS;
    }

    // We've now decoded all the non-image data we care about.  Skip anything
    // else before the actual raster data.
    if (m_imgDataOffset)
        m_decodedOffset = m_imgDataOffset;
    m_needToProcessBitmasks = false;

    // Check masks and set shift values.
    for (int i = 0; i < 4; ++i) {
        // Trim the mask to the allowed bit depth.  Some Windows V4+ BMPs
        // specify a bogus alpha channel in bits that don't exist in the pixel
        // data (for example, bits 25-31 in a 24-bit RGB format).
        if (m_infoHeader.biBitCount < 32)
            m_bitMasks[i] &= ((static_cast<uint32_t>(1) << m_infoHeader.biBitCount) - 1);

        // For empty masks (common on the alpha channel, especially after the
        // trimming above), quickly clear the shifts and continue, to avoid an
        // infinite loop in the counting code below.
        uint32_t tempMask = m_bitMasks[i];
        if (!tempMask) {
            m_bitShiftsRight[i] = m_bitShiftsLeft[i] = 0;
            continue;
        }

        // Make sure bitmask does not overlap any other bitmasks.
        for (int j = 0; j < i; ++j) {
            if (tempMask & m_bitMasks[j])
                return setFailed();
        }

        // Count offset into pixel data.
        for (m_bitShiftsRight[i] = 0; !(tempMask & 1); tempMask >>= 1)
            ++m_bitShiftsRight[i];

        // Count size of mask.
        for (m_bitShiftsLeft[i] = 8; tempMask & 1; tempMask >>= 1)
            --m_bitShiftsLeft[i];

        // Make sure bitmask is contiguous.
        if (tempMask)
            return setFailed();

        // Since RGBABuffer tops out at 8 bits per channel, adjust the shift
        // amounts to use the most significant 8 bits of the channel.
        if (m_bitShiftsLeft[i] < 0) {
            m_bitShiftsRight[i] -= m_bitShiftsLeft[i];
            m_bitShiftsLeft[i] = 0;
        }
    }

    return true;
}
예제 #25
0
bool BMPImageReader::readInfoHeader()
{
    // Pre-initialize some fields that not all headers set.
    m_infoHeader.biCompression = RGB;
    m_infoHeader.biClrUsed = 0;

    if (m_isOS21x) {
        m_infoHeader.biWidth = readUint16(4);
        m_infoHeader.biHeight = readUint16(6);
        ASSERT(m_andMaskState == None);  // ICO is a Windows format, not OS/2!
        m_infoHeader.biBitCount = readUint16(10);
        return true;
    }

    m_infoHeader.biWidth = readUint32(4);
    m_infoHeader.biHeight = readUint32(8);
    if (m_andMaskState != None)
        m_infoHeader.biHeight /= 2;
    m_infoHeader.biBitCount = readUint16(14);

    // Read compression type, if present.
    if (m_infoHeader.biSize >= 20) {
        uint32_t biCompression = readUint32(16);

        // Detect OS/2 2.x-specific compression types.
        if ((biCompression == 3) && (m_infoHeader.biBitCount == 1)) {
            m_infoHeader.biCompression = HUFFMAN1D;
            m_isOS22x = true;
        } else if ((biCompression == 4) && (m_infoHeader.biBitCount == 24)) {
            m_infoHeader.biCompression = RLE24;
            m_isOS22x = true;
        } else if (biCompression > 5)
            return setFailed();  // Some type we don't understand.
        else
            m_infoHeader.biCompression = static_cast<CompressionType>(biCompression);
    }

    // Read colors used, if present.
    if (m_infoHeader.biSize >= 36)
        m_infoHeader.biClrUsed = readUint32(32);

    // Windows V4+ can safely read the four bitmasks from 40-56 bytes in, so do
    // that here.  If the bit depth is less than 16, these values will be
    // ignored by the image data decoders.  If the bit depth is at least 16 but
    // the compression format isn't BITFIELDS, these values will be ignored and
    // overwritten* in processBitmasks().
    // NOTE: We allow alpha here.  Microsoft doesn't really document this well,
    // but some BMPs appear to use it.
    //
    // For non-Windows V4+, m_bitMasks[] et. al will be initialized later
    // during processBitmasks().
    //
    // *Except the alpha channel.  Bizarrely, some RGB bitmaps expect decoders
    // to pay attention to the alpha mask here, so there's a special case in
    // processBitmasks() that doesn't always overwrite that value.
    if (isWindowsV4Plus()) {
        m_bitMasks[0] = readUint32(40);
        m_bitMasks[1] = readUint32(44);
        m_bitMasks[2] = readUint32(48);
        m_bitMasks[3] = readUint32(52);
    }

    // Detect top-down BMPs.
    if (m_infoHeader.biHeight < 0) {
        m_isTopDown = true;
        m_infoHeader.biHeight = -m_infoHeader.biHeight;
    }

    return true;
}
예제 #26
0
bool WEBPImageDecoder::decode(bool onlySize)
{
    if (failed())
        return false;

#if defined(__LB_SHELL__)
    // We dont want progressive decoding.
    if (!isAllDataReceived())
        return false;
#endif

    const uint8_t* dataBytes = reinterpret_cast<const uint8_t*>(m_data->data());
    const size_t dataSize = m_data->size();

    if (!ImageDecoder::isSizeAvailable()) {
        static const size_t imageHeaderSize = 30;
        if (dataSize < imageHeaderSize)
            return false;
        int width, height;
#if (WEBP_DECODER_ABI_VERSION >= 0x0163)
        WebPBitstreamFeatures features;
        if (WebPGetFeatures(dataBytes, dataSize, &features) != VP8_STATUS_OK)
            return setFailed();
        width = features.width;
        height = features.height;
        m_hasAlpha = features.has_alpha;
#else
        // Earlier version won't be able to display WebP files with alpha.
        if (!WebPGetInfo(dataBytes, dataSize, &width, &height))
            return setFailed();
        m_hasAlpha = false;
#endif
        if (!setSize(width, height))
            return setFailed();
    }

    ASSERT(ImageDecoder::isSizeAvailable());
    if (onlySize)
        return true;

    ASSERT(!m_frameBufferCache.isEmpty());
    ImageFrame& buffer = m_frameBufferCache[0];
    ASSERT(buffer.status() != ImageFrame::FrameComplete);

    if (buffer.status() == ImageFrame::FrameEmpty) {
        if (!buffer.setSize(size().width(), size().height()))
            return setFailed();
        buffer.setStatus(ImageFrame::FramePartial);
        buffer.setHasAlpha(m_hasAlpha);
        buffer.setOriginalFrameRect(IntRect(IntPoint(), size()));
    }

    if (!m_decoder) {
        int rowStride = size().width() * sizeof(ImageFrame::PixelData);
        uint8_t* output = reinterpret_cast<uint8_t*>(buffer.getAddr(0, 0));
        int outputSize = size().height() * rowStride;
        m_decoder = WebPINewRGB(outputMode(m_hasAlpha), output, outputSize, rowStride);
        if (!m_decoder)
            return setFailed();
    }

    switch (WebPIUpdate(m_decoder, dataBytes, dataSize)) {
    case VP8_STATUS_OK:
        buffer.setStatus(ImageFrame::FrameComplete);
        WebPIDelete(m_decoder);
        m_decoder = 0;
        return true;
    case VP8_STATUS_SUSPENDED:
        return false;
    default:
        WebPIDelete(m_decoder);
        m_decoder = 0;
        return setFailed();
    }
}
예제 #27
0
    void RPIImageDecoder::decode(bool onlySize)
    {
        unsigned int width, height;

        if (failed())
            return;

        // make sure we have all the data before doing anything
        if (!isAllDataReceived())
            return;

        if (onlySize)
        {
            if (readSize(width, height));
            {
                setSize(width, height);
            }
            return;
        }
        else
        {
            readSize(width, height);

            clock_t start = clock();

            ImageFrame& buffer = m_frameBufferCache[0];

            if (m_frameBufferCache.isEmpty())
            {
                log("decode : frameBuffercache is empty");
                setFailed();
                return;
            }

            if (buffer.status() == ImageFrame::FrameEmpty)
            {
                if (!buffer.setSize(width, height))
                {
                    log("decode : could not define buffer size");
                    setFailed();
                    return;
                }

                // The buffer is transparent outside the decoded area while the image is
                // loading. The completed image will be marked fully opaque in jpegComplete().
                buffer.setHasAlpha(false);
            }

            // lock the mutex so that we only process once at a time
            pthread_mutex_lock(&decode_mutex);

            // setup decoder request information
            BRCMIMAGE_REQUEST_T* dec_request = getDecoderRequest();
            BRCMIMAGE_T *decoder = getDecoder();

            memset(dec_request, 0, sizeof(BRCMIMAGE_REQUEST_T));
            dec_request->input = (unsigned char*)m_data->data();
            dec_request->input_size = m_data->size();
            dec_request->output = (unsigned char*)buffer.getAddr(0, 0);
            dec_request->output_alloc_size = width * height * 4;
            dec_request->output_handle = 0;
            dec_request->pixel_format = PIXEL_FORMAT_RGBA;
            dec_request->buffer_width = 0;
            dec_request->buffer_height = 0;

            brcmimage_acquire(decoder);
            BRCMIMAGE_STATUS_T status = brcmimage_process(decoder, dec_request);

            if (status == BRCMIMAGE_SUCCESS)
            {
                clock_t copy = clock();

                unsigned char *ptr = (unsigned char *)buffer.getAddr(0, 0);
                for (unsigned int i = 0; i < dec_request->height * dec_request->width; i++)
                {
                    // we swap RGBA -> BGRA
                    unsigned char tmp = *ptr;
                    *ptr = ptr[2];
                    ptr[2] = tmp;
                    ptr += 4;
                }

                brcmimage_release(decoder);

                buffer.setPixelsChanged(true);
                buffer.setStatus(ImageFrame::FrameComplete);
                buffer.setHasAlpha(m_hasAlpha);

                clock_t end = clock();
                unsigned long millis = (end - start) * 1000 / CLOCKS_PER_SEC;
                unsigned long copymillis = (end - copy) * 1000 / CLOCKS_PER_SEC;

                log("decode : image (%d x %d)(Alpha=%d) decoded in %d ms (copy in %d ms), source size = %d bytes", width, height, m_hasAlpha, millis, copymillis, m_data->size());

            }
            else
            {
                log("decode : Decoding failed with status %d", status);
            }

            pthread_mutex_unlock(&decode_mutex);
        }


    }
예제 #28
0
bool JPEGImageDecoder::outputScanlines()
{
    if (m_frameBufferCache.isEmpty())
        return false;

    // Initialize the framebuffer if needed.
    ImageFrame& buffer = m_frameBufferCache[0];
    if (buffer.status() == ImageFrame::FrameEmpty) {
        if (!buffer.setSize(scaledSize().width(), scaledSize().height()))
            return setFailed();
        buffer.setStatus(ImageFrame::FramePartial);
        buffer.setHasAlpha(false);
        buffer.setColorProfile(m_colorProfile);

        // For JPEGs, the frame always fills the entire image.
        buffer.setOriginalFrameRect(IntRect(IntPoint(), size()));
    }

    jpeg_decompress_struct* info = m_reader->info();

#if !ENABLE(IMAGE_DECODER_DOWN_SAMPLING) && defined(TURBO_JPEG_RGB_SWIZZLE)
    if (turboSwizzled(info->out_color_space)) {
         ASSERT(!m_scaled);
         while (info->output_scanline < info->output_height) {
             unsigned char* row = reinterpret_cast<unsigned char*>(buffer.getAddr(0, info->output_scanline));
             if (jpeg_read_scanlines(info, &row, 1) != 1)
                  return false;
         }
         return true;
     }
#endif

    JSAMPARRAY samples = m_reader->samples();

    while (info->output_scanline < info->output_height) {
        // jpeg_read_scanlines will increase the scanline counter, so we
        // save the scanline before calling it.
        int sourceY = info->output_scanline;
        /* Request one scanline.  Returns 0 or 1 scanlines. */
        if (jpeg_read_scanlines(info, samples, 1) != 1)
            return false;

        int destY = scaledY(sourceY);
        if (destY < 0)
            continue;
        int width = m_scaled ? m_scaledColumns.size() : info->output_width;
        for (int x = 0; x < width; ++x) {
            JSAMPLE* jsample = *samples + (m_scaled ? m_scaledColumns[x] : x) * ((info->out_color_space == JCS_RGB) ? 3 : 4);
            if (info->out_color_space == JCS_RGB)
                buffer.setRGBA(x, destY, jsample[0], jsample[1], jsample[2], 0xFF);
            else if (info->out_color_space == JCS_CMYK) {
                // Source is 'Inverted CMYK', output is RGB.
                // See: http://www.easyrgb.com/math.php?MATH=M12#text12
                // Or:  http://www.ilkeratalay.com/colorspacesfaq.php#rgb
                // From CMYK to CMY:
                // X =   X    * (1 -   K   ) +   K  [for X = C, M, or Y]
                // Thus, from Inverted CMYK to CMY is:
                // X = (1-iX) * (1 - (1-iK)) + (1-iK) => 1 - iX*iK
                // From CMY (0..1) to RGB (0..1):
                // R = 1 - C => 1 - (1 - iC*iK) => iC*iK  [G and B similar]
                unsigned k = jsample[3];
                buffer.setRGBA(x, destY, jsample[0] * k / 255, jsample[1] * k / 255, jsample[2] * k / 255, 0xFF);
            } else {
                ASSERT_NOT_REACHED();
                return setFailed();
            }
        }
    }

    return true;
}
void AMDetectorTriggerAction::onAcquisitionFailed(){
	if(triggerSource_)
		disconnect(triggerSource_, 0, this, 0);
	disconnect(detector_, 0, this, 0);
	setFailed();
}
예제 #30
0
bool GIFImageDecoder::initFrameBuffer(unsigned frameIndex)
{
    // Initialize the frame rect in our buffer.
    const GIFFrameContext* frameContext = m_reader->frameContext();
    IntRect frameRect(frameContext->xOffset, frameContext->yOffset, frameContext->width, frameContext->height);

    // Make sure the frameRect doesn't extend outside the buffer.
    if (frameRect.maxX() > size().width())
        frameRect.setWidth(size().width() - frameContext->xOffset);
    if (frameRect.maxY() > size().height())
        frameRect.setHeight(size().height() - frameContext->yOffset);

    ImageFrame* const buffer = &m_frameBufferCache[frameIndex];
    int left = upperBoundScaledX(frameRect.x());
    int right = lowerBoundScaledX(frameRect.maxX(), left);
    int top = upperBoundScaledY(frameRect.y());
    int bottom = lowerBoundScaledY(frameRect.maxY(), top);
    buffer->setOriginalFrameRect(IntRect(left, top, right - left, bottom - top));
    
    if (!frameIndex) {
        // This is the first frame, so we're not relying on any previous data.
        if (!buffer->setSize(scaledSize().width(), scaledSize().height()))
            return setFailed();
    } else {
        // The starting state for this frame depends on the previous frame's
        // disposal method.
        //
        // Frames that use the DisposeOverwritePrevious method are effectively
        // no-ops in terms of changing the starting state of a frame compared to
        // the starting state of the previous frame, so skip over them.  (If the
        // first frame specifies this method, it will get treated like
        // DisposeOverwriteBgcolor below and reset to a completely empty image.)
        const ImageFrame* prevBuffer = &m_frameBufferCache[--frameIndex];
        ImageFrame::FrameDisposalMethod prevMethod = prevBuffer->disposalMethod();
        while (frameIndex && (prevMethod == ImageFrame::DisposeOverwritePrevious)) {
            prevBuffer = &m_frameBufferCache[--frameIndex];
            prevMethod = prevBuffer->disposalMethod();
        }
        ASSERT(prevBuffer->status() == ImageFrame::FrameComplete);

        if ((prevMethod == ImageFrame::DisposeNotSpecified) || (prevMethod == ImageFrame::DisposeKeep)) {
            // Preserve the last frame as the starting state for this frame.
            if (!buffer->copyBitmapData(*prevBuffer))
                return setFailed();
        } else {
            // We want to clear the previous frame to transparent, without
            // affecting pixels in the image outside of the frame.
            const IntRect& prevRect = prevBuffer->originalFrameRect();
            const IntSize& bufferSize = scaledSize();
            if (!frameIndex || prevRect.contains(IntRect(IntPoint(), scaledSize()))) {
                // Clearing the first frame, or a frame the size of the whole
                // image, results in a completely empty image.
                if (!buffer->setSize(bufferSize.width(), bufferSize.height()))
                    return setFailed();
            } else {
              // Copy the whole previous buffer, then clear just its frame.
              if (!buffer->copyBitmapData(*prevBuffer))
                  return setFailed();
              for (int y = prevRect.y(); y < prevRect.maxY(); ++y) {
                  for (int x = prevRect.x(); x < prevRect.maxX(); ++x)
                      buffer->setRGBA(x, y, 0, 0, 0, 0);
              }
              if ((prevRect.width() > 0) && (prevRect.height() > 0))
                  buffer->setHasAlpha(true);
            }
        }
    }

    // Update our status to be partially complete.
    buffer->setStatus(ImageFrame::FramePartial);

    // Reset the alpha pixel tracker for this frame.
    m_currentBufferSawAlpha = false;
    return true;
}