//****************************************************************************** void nsGIFDecoder2::BeginGIF() { if (mGIFOpen) return; mGIFOpen = true; PostSize(mGIFStruct.screen_width, mGIFStruct.screen_height); }
//****************************************************************************** void nsGIFDecoder2::BeginGIF() { if (mGIFOpen) return; mGIFOpen = PR_TRUE; PostSize(mGIFStruct.screen_width, mGIFStruct.screen_height); // If we're doing a size decode, we have what we came for if (IsSizeDecode()) return; }
void vtFence3d::AddFencepost(const FPoint3 &p1, int iMatIdx) { // create fencepost block vtMesh *pPostMesh = new vtMesh(osg::PrimitiveSet::TRIANGLE_FAN, VT_Normals | VT_TexCoords, 20); FPoint3 PostSize(m_Params.m_fPostWidth, m_Params.m_fPostHeight, m_Params.m_fPostDepth); pPostMesh->CreateOptimizedBlock(PostSize); // scoot over and upwards to put it above ground FMatrix4 t; t.Identity(); t.Translate(p1); pPostMesh->TransformVertices(t); m_pFenceGeom->AddMesh(pPostMesh, iMatIdx); }
LexerTransition<nsIconDecoder::State> nsIconDecoder::ReadHeader(const char* aData) { // Grab the width and height. uint8_t width = uint8_t(aData[0]); uint8_t height = uint8_t(aData[1]); // The input is 32bpp, so we expect 4 bytes of data per pixel. mBytesPerRow = width * 4; // Post our size to the superclass. PostSize(width, height); // Icons have alpha. PostHasTransparency(); // If we're doing a metadata decode, we're done. if (IsMetadataDecode()) { return Transition::TerminateSuccess(); } MOZ_ASSERT(!mImageData, "Already have a buffer allocated?"); Maybe<SurfacePipe> pipe = SurfacePipeFactory::CreateSurfacePipe(this, 0, Size(), OutputSize(), FullFrame(), SurfaceFormat::B8G8R8A8, SurfacePipeFlags()); if (!pipe) { return Transition::TerminateFailure(); } mPipe = Move(*pipe); MOZ_ASSERT(mImageData, "Should have a buffer now"); return Transition::To(State::ROW_OF_PIXELS, mBytesPerRow); }
void nsGIFDecoder2::WriteInternal(const char *aBuffer, uint32_t aCount, DecodeStrategy) { NS_ABORT_IF_FALSE(!HasError(), "Shouldn't call WriteInternal after error!"); // These variables changed names, and renaming would make a much bigger patch :( const uint8_t *buf = (const uint8_t *)aBuffer; uint32_t len = aCount; const uint8_t *q = buf; // Add what we have sofar to the block // If previous call to me left something in the hold first complete current block // Or if we are filling the colormaps, first complete the colormap uint8_t* p = (mGIFStruct.state == gif_global_colormap) ? (uint8_t*)mGIFStruct.global_colormap : (mGIFStruct.state == gif_image_colormap) ? (uint8_t*)mColormap : (mGIFStruct.bytes_in_hold) ? mGIFStruct.hold : nullptr; if (len == 0 && buf == nullptr) { // We've just gotten the frame we asked for. Time to use the data we // stashed away. len = mGIFStruct.bytes_in_hold; q = buf = p; } else if (p) { // Add what we have sofar to the block uint32_t l = std::min(len, mGIFStruct.bytes_to_consume); memcpy(p+mGIFStruct.bytes_in_hold, buf, l); if (l < mGIFStruct.bytes_to_consume) { // Not enough in 'buf' to complete current block, get more mGIFStruct.bytes_in_hold += l; mGIFStruct.bytes_to_consume -= l; return; } // Point 'q' to complete block in hold (or in colormap) q = p; } // Invariant: // 'q' is start of current to be processed block (hold, colormap or buf) // 'bytes_to_consume' is number of bytes to consume from 'buf' // 'buf' points to the bytes to be consumed from the input buffer // 'len' is number of bytes left in input buffer from position 'buf'. // At entrance of the for loop will 'buf' will be moved 'bytes_to_consume' // to point to next buffer, 'len' is adjusted accordingly. // So that next round in for loop, q gets pointed to the next buffer. for (;len >= mGIFStruct.bytes_to_consume; q=buf, mGIFStruct.bytes_in_hold = 0) { // Eat the current block from the buffer, q keeps pointed at current block buf += mGIFStruct.bytes_to_consume; len -= mGIFStruct.bytes_to_consume; switch (mGIFStruct.state) { case gif_lzw: if (!DoLzw(q)) { mGIFStruct.state = gif_error; break; } GETN(1, gif_sub_block); break; case gif_lzw_start: { // Make sure the transparent pixel is transparent in the colormap if (mGIFStruct.is_transparent) { // Save old value so we can restore it later if (mColormap == mGIFStruct.global_colormap) mOldColor = mColormap[mGIFStruct.tpixel]; mColormap[mGIFStruct.tpixel] = 0; } /* Initialize LZW parser/decoder */ mGIFStruct.datasize = *q; const int clear_code = ClearCode(); if (mGIFStruct.datasize > MAX_LZW_BITS || clear_code >= MAX_BITS) { mGIFStruct.state = gif_error; break; } mGIFStruct.avail = clear_code + 2; mGIFStruct.oldcode = -1; mGIFStruct.codesize = mGIFStruct.datasize + 1; mGIFStruct.codemask = (1 << mGIFStruct.codesize) - 1; mGIFStruct.datum = mGIFStruct.bits = 0; /* init the tables */ for (int i = 0; i < clear_code; i++) mGIFStruct.suffix[i] = i; mGIFStruct.stackp = mGIFStruct.stack; GETN(1, gif_sub_block); } break; /* All GIF files begin with "GIF87a" or "GIF89a" */ case gif_type: if (!strncmp((char*)q, "GIF89a", 6)) { mGIFStruct.version = 89; } else if (!strncmp((char*)q, "GIF87a", 6)) { mGIFStruct.version = 87; } else { mGIFStruct.state = gif_error; break; } GETN(7, gif_global_header); break; case gif_global_header: /* This is the height and width of the "screen" or * frame into which images are rendered. The * individual images can be smaller than the * screen size and located with an origin anywhere * within the screen. */ mGIFStruct.screen_width = GETINT16(q); mGIFStruct.screen_height = GETINT16(q + 2); mGIFStruct.global_colormap_depth = (q[4]&0x07) + 1; if (IsSizeDecode()) { MOZ_ASSERT(!mGIFOpen, "Gif should not be open at this point"); PostSize(mGIFStruct.screen_width, mGIFStruct.screen_height); return; } // screen_bgcolor is not used //mGIFStruct.screen_bgcolor = q[5]; // q[6] = Pixel Aspect Ratio // Not used // float aspect = (float)((q[6] + 15) / 64.0); if (q[4] & 0x80) { /* global map */ // Get the global colormap const uint32_t size = (3 << mGIFStruct.global_colormap_depth); if (len < size) { // Use 'hold' pattern to get the global colormap GETN(size, gif_global_colormap); break; } // Copy everything, go to colormap state to do CMS correction memcpy(mGIFStruct.global_colormap, buf, size); buf += size; len -= size; GETN(0, gif_global_colormap); break; } GETN(1, gif_image_start); break; case gif_global_colormap: // Everything is already copied into global_colormap // Convert into Cairo colors including CMS transformation ConvertColormap(mGIFStruct.global_colormap, 1<<mGIFStruct.global_colormap_depth); GETN(1, gif_image_start); break; case gif_image_start: switch (*q) { case GIF_TRAILER: mGIFStruct.state = gif_done; break; case GIF_EXTENSION_INTRODUCER: GETN(2, gif_extension); break; case GIF_IMAGE_SEPARATOR: GETN(9, gif_image_header); break; default: /* If we get anything other than GIF_IMAGE_SEPARATOR, * GIF_EXTENSION_INTRODUCER, or GIF_TRAILER, there is extraneous data * between blocks. The GIF87a spec tells us to keep reading * until we find an image separator, but GIF89a says such * a file is corrupt. We follow GIF89a and bail out. */ if (mGIFStruct.images_decoded > 0) { /* The file is corrupt, but one or more images have * been decoded correctly. In this case, we proceed * as if the file were correctly terminated and set * the state to gif_done, so the GIF will display. */ mGIFStruct.state = gif_done; } else { /* No images decoded, there is nothing to display. */ mGIFStruct.state = gif_error; } } break; case gif_extension: mGIFStruct.bytes_to_consume = q[1]; if (mGIFStruct.bytes_to_consume) { switch (*q) { case GIF_GRAPHIC_CONTROL_LABEL: // The GIF spec mandates that the GIFControlExtension header block length is 4 bytes, // and the parser for this block reads 4 bytes, so we must enforce that the buffer // contains at least this many bytes. If the GIF specifies a different length, we // allow that, so long as it's larger; the additional data will simply be ignored. mGIFStruct.state = gif_control_extension; mGIFStruct.bytes_to_consume = std::max(mGIFStruct.bytes_to_consume, 4u); break; // The GIF spec also specifies the lengths of the following two extensions' headers // (as 12 and 11 bytes, respectively). Because we ignore the plain text extension entirely // and sanity-check the actual length of the application extension header before reading it, // we allow GIFs to deviate from these values in either direction. This is important for // real-world compatibility, as GIFs in the wild exist with application extension headers // that are both shorter and longer than 11 bytes. case GIF_APPLICATION_EXTENSION_LABEL: mGIFStruct.state = gif_application_extension; break; case GIF_PLAIN_TEXT_LABEL: mGIFStruct.state = gif_skip_block; break; case GIF_COMMENT_LABEL: mGIFStruct.state = gif_consume_comment; break; default: mGIFStruct.state = gif_skip_block; } } else { GETN(1, gif_image_start); } break; case gif_consume_block: if (!*q) GETN(1, gif_image_start); else GETN(*q, gif_skip_block); break; case gif_skip_block: GETN(1, gif_consume_block); break; case gif_control_extension: mGIFStruct.is_transparent = *q & 0x1; mGIFStruct.tpixel = q[3]; mGIFStruct.disposal_method = ((*q) >> 2) & 0x7; // Some specs say 3rd bit (value 4), other specs say value 3 // Let's choose 3 (the more popular) if (mGIFStruct.disposal_method == 4) mGIFStruct.disposal_method = 3; mGIFStruct.delay_time = GETINT16(q + 1) * 10; GETN(1, gif_consume_block); break; case gif_comment_extension: if (*q) GETN(*q, gif_consume_comment); else GETN(1, gif_image_start); break; case gif_consume_comment: GETN(1, gif_comment_extension); break; case gif_application_extension: /* Check for netscape application extension */ if (mGIFStruct.bytes_to_consume == 11 && (!strncmp((char*)q, "NETSCAPE2.0", 11) || !strncmp((char*)q, "ANIMEXTS1.0", 11))) GETN(1, gif_netscape_extension_block); else GETN(1, gif_consume_block); break; /* Netscape-specific GIF extension: animation looping */ case gif_netscape_extension_block: if (*q) // We might need to consume 3 bytes in // gif_consume_netscape_extension, so make sure we have at least that. GETN(std::max(3, static_cast<int>(*q)), gif_consume_netscape_extension); else GETN(1, gif_image_start); break; /* Parse netscape-specific application extensions */ case gif_consume_netscape_extension: switch (q[0] & 7) { case 1: /* Loop entire animation specified # of times. Only read the loop count during the first iteration. */ mGIFStruct.loop_count = GETINT16(q + 1); GETN(1, gif_netscape_extension_block); break; case 2: /* Wait for specified # of bytes to enter buffer */ // Don't do this, this extension doesn't exist (isn't used at all) // and doesn't do anything, as our streaming/buffering takes care of it all... // See: http://semmix.pl/color/exgraf/eeg24.htm GETN(1, gif_netscape_extension_block); break; default: // 0,3-7 are yet to be defined netscape extension codes mGIFStruct.state = gif_error; } break; case gif_image_header: { /* Get image offsets, with respect to the screen origin */ mGIFStruct.x_offset = GETINT16(q); mGIFStruct.y_offset = GETINT16(q + 2); /* Get image width and height. */ mGIFStruct.width = GETINT16(q + 4); mGIFStruct.height = GETINT16(q + 6); if (!mGIFStruct.images_decoded) { /* Work around broken GIF files where the logical screen * size has weird width or height. We assume that GIF87a * files don't contain animations. */ if ((mGIFStruct.screen_height < mGIFStruct.height) || (mGIFStruct.screen_width < mGIFStruct.width) || (mGIFStruct.version == 87)) { mGIFStruct.screen_height = mGIFStruct.height; mGIFStruct.screen_width = mGIFStruct.width; mGIFStruct.x_offset = 0; mGIFStruct.y_offset = 0; } // Create the image container with the right size. BeginGIF(); if (HasError()) { // Setting the size led to an error. mGIFStruct.state = gif_error; return; } // If we were doing a size decode, we're done if (IsSizeDecode()) return; } /* Work around more broken GIF files that have zero image width or height */ if (!mGIFStruct.height || !mGIFStruct.width) { mGIFStruct.height = mGIFStruct.screen_height; mGIFStruct.width = mGIFStruct.screen_width; if (!mGIFStruct.height || !mGIFStruct.width) { mGIFStruct.state = gif_error; break; } } /* Depth of colors is determined by colormap */ /* (q[8] & 0x80) indicates local colormap */ /* bits per pixel is (q[8]&0x07 + 1) when local colormap is set */ uint32_t depth = mGIFStruct.global_colormap_depth; if (q[8] & 0x80) depth = (q[8]&0x07) + 1; uint32_t realDepth = depth; while (mGIFStruct.tpixel >= (1 << realDepth) && (realDepth < 8)) { realDepth++; } // Mask to limit the color values within the colormap mColorMask = 0xFF >> (8 - realDepth); BeginImageFrame(realDepth); if (NeedsNewFrame()) { // We now need a new frame from the decoder framework. We leave all our // data in the buffer as if it wasn't consumed, copy to our hold and return // to the decoder framework. uint32_t size = len + mGIFStruct.bytes_to_consume + mGIFStruct.bytes_in_hold; if (size) { if (SetHold(q, mGIFStruct.bytes_to_consume + mGIFStruct.bytes_in_hold, buf, len)) { // Back into the decoder infrastructure so we can get called again. GETN(9, gif_image_header_continue); return; } } break; } else { // FALL THROUGH } } case gif_image_header_continue: { // While decoders can reuse frames, we unconditionally increment // mGIFStruct.images_decoded when we're done with a frame, so we both can // and need to zero out the colormap and image data after every new frame. memset(mImageData, 0, mImageDataLength); if (mColormap) { memset(mColormap, 0, mColormapSize); } if (!mGIFStruct.images_decoded) { // Send a onetime invalidation for the first frame if it has a y-axis offset. // Otherwise, the area may never be refreshed and the placeholder will remain // on the screen. (Bug 37589) if (mGIFStruct.y_offset > 0) { nsIntRect r(0, 0, mGIFStruct.screen_width, mGIFStruct.y_offset); PostInvalidation(r); } } if (q[8] & 0x40) { mGIFStruct.interlaced = true; mGIFStruct.ipass = 1; } else { mGIFStruct.interlaced = false; mGIFStruct.ipass = 0; } /* Only apply the Haeberli display hack on the first frame */ mGIFStruct.progressive_display = (mGIFStruct.images_decoded == 0); /* Clear state from last image */ mGIFStruct.irow = 0; mGIFStruct.rows_remaining = mGIFStruct.height; mGIFStruct.rowp = mImageData; /* Depth of colors is determined by colormap */ /* (q[8] & 0x80) indicates local colormap */ /* bits per pixel is (q[8]&0x07 + 1) when local colormap is set */ uint32_t depth = mGIFStruct.global_colormap_depth; if (q[8] & 0x80) depth = (q[8]&0x07) + 1; uint32_t realDepth = depth; while (mGIFStruct.tpixel >= (1 << realDepth) && (realDepth < 8)) { realDepth++; } if (q[8] & 0x80) /* has a local colormap? */ { mGIFStruct.local_colormap_size = 1 << depth; if (!mGIFStruct.images_decoded) { // First frame has local colormap, allocate space for it // as the image frame doesn't have its own palette mColormapSize = sizeof(uint32_t) << realDepth; if (!mGIFStruct.local_colormap) { mGIFStruct.local_colormap = (uint32_t*)moz_xmalloc(mColormapSize); } mColormap = mGIFStruct.local_colormap; } const uint32_t size = 3 << depth; if (mColormapSize > size) { // Clear the notfilled part of the colormap memset(((uint8_t*)mColormap) + size, 0, mColormapSize - size); } if (len < size) { // Use 'hold' pattern to get the image colormap GETN(size, gif_image_colormap); break; } // Copy everything, go to colormap state to do CMS correction memcpy(mColormap, buf, size); buf += size; len -= size; GETN(0, gif_image_colormap); break; } else { /* Switch back to the global palette */ if (mGIFStruct.images_decoded) { // Copy global colormap into the palette of current frame memcpy(mColormap, mGIFStruct.global_colormap, mColormapSize); } else { mColormap = mGIFStruct.global_colormap; } } GETN(1, gif_lzw_start); } break; case gif_image_colormap: // Everything is already copied into local_colormap // Convert into Cairo colors including CMS transformation ConvertColormap(mColormap, mGIFStruct.local_colormap_size); GETN(1, gif_lzw_start); break; case gif_sub_block: mGIFStruct.count = *q; if (mGIFStruct.count) { /* Still working on the same image: Process next LZW data block */ /* Make sure there are still rows left. If the GIF data */ /* is corrupt, we may not get an explicit terminator. */ if (!mGIFStruct.rows_remaining) { #ifdef DONT_TOLERATE_BROKEN_GIFS mGIFStruct.state = gif_error; break; #else /* This is an illegal GIF, but we remain tolerant. */ GETN(1, gif_sub_block); #endif if (mGIFStruct.count == GIF_TRAILER) { /* Found a terminator anyway, so consider the image done */ GETN(1, gif_done); break; } } GETN(mGIFStruct.count, gif_lzw); } else { /* See if there are any more images in this sequence. */ EndImageFrame(); GETN(1, gif_image_start); } break; case gif_done: MOZ_ASSERT(!IsSizeDecode(), "Size decodes shouldn't reach gif_done"); FinishInternal(); goto done; case gif_error: PostDataError(); return; // We shouldn't ever get here. default: break; } } // if an error state is set but no data remains, code flow reaches here if (mGIFStruct.state == gif_error) { PostDataError(); return; } // Copy the leftover into mGIFStruct.hold if (len) { // Add what we have sofar to the block if (mGIFStruct.state != gif_global_colormap && mGIFStruct.state != gif_image_colormap) { if (!SetHold(buf, len)) { PostDataError(); return; } } else { uint8_t* p = (mGIFStruct.state == gif_global_colormap) ? (uint8_t*)mGIFStruct.global_colormap : (uint8_t*)mColormap; memcpy(p, buf, len); mGIFStruct.bytes_in_hold = len; } mGIFStruct.bytes_to_consume -= len; } // We want to flush before returning if we're on the first frame done: if (!mGIFStruct.images_decoded) { FlushImageData(); mLastFlushedRow = mCurrentRow; mLastFlushedPass = mCurrentPass; } return; }
void nsIconDecoder::WriteInternal(const char* aBuffer, uint32_t aCount, DecodeStrategy) { NS_ABORT_IF_FALSE(!HasError(), "Shouldn't call WriteInternal after error!"); // We put this here to avoid errors about crossing initialization with case // jumps on linux. uint32_t bytesToRead = 0; // Loop until the input data is gone while (aCount > 0) { switch (mState) { case iconStateStart: // Grab the width mWidth = (uint8_t)*aBuffer; // Book Keeping aBuffer++; aCount--; mState = iconStateHaveHeight; break; case iconStateHaveHeight: // Grab the Height mHeight = (uint8_t)*aBuffer; // Post our size to the superclass PostSize(mWidth, mHeight); PostHasTransparency(); if (HasError()) { // Setting the size led to an error. mState = iconStateFinished; return; } // If We're doing a size decode, we're done if (IsSizeDecode()) { mState = iconStateFinished; break; } if (!mImageData) { PostDecoderError(NS_ERROR_OUT_OF_MEMORY); return; } // Book Keeping aBuffer++; aCount--; mState = iconStateReadPixels; break; case iconStateReadPixels: { // How many bytes are we reading? bytesToRead = std::min(aCount, mImageDataLength - mPixBytesRead); // Copy the bytes memcpy(mImageData + mPixBytesRead, aBuffer, bytesToRead); // Performance isn't critical here, so our update rectangle is // always the full icon nsIntRect r(0, 0, mWidth, mHeight); // Invalidate PostInvalidation(r); // Book Keeping aBuffer += bytesToRead; aCount -= bytesToRead; mPixBytesRead += bytesToRead; // If we've got all the pixel bytes, we're finished if (mPixBytesRead == mImageDataLength) { PostFrameStop(); PostDecodeDone(); mState = iconStateFinished; } break; } case iconStateFinished: // Consume all excess data silently aCount = 0; break; } } }
LexerTransition<nsJPEGDecoder::State> nsJPEGDecoder::ReadJPEGData(const char* aData, size_t aLength) { mSegment = reinterpret_cast<const JOCTET*>(aData); mSegmentLen = aLength; // Return here if there is a fatal error within libjpeg. nsresult error_code; // This cast to nsresult makes sense because setjmp() returns whatever we // passed to longjmp(), which was actually an nsresult. if ((error_code = static_cast<nsresult>(setjmp(mErr.setjmp_buffer))) != NS_OK) { if (error_code == NS_ERROR_FAILURE) { // Error due to corrupt data. Make sure that we don't feed any more data // to libjpeg-turbo. mState = JPEG_SINK_NON_JPEG_TRAILER; MOZ_LOG(sJPEGDecoderAccountingLog, LogLevel::Debug, ("} (setjmp returned NS_ERROR_FAILURE)")); } else { // Error for another reason. (Possibly OOM.) PostDecoderError(error_code); mState = JPEG_ERROR; MOZ_LOG(sJPEGDecoderAccountingLog, LogLevel::Debug, ("} (setjmp returned an error)")); } return Transition::TerminateFailure(); } MOZ_LOG(sJPEGLog, LogLevel::Debug, ("[this=%p] nsJPEGDecoder::Write -- processing JPEG data\n", this)); switch (mState) { case JPEG_HEADER: { LOG_SCOPE((mozilla::LogModule*)sJPEGLog, "nsJPEGDecoder::Write -- entering JPEG_HEADER" " case"); // Step 3: read file parameters with jpeg_read_header() if (jpeg_read_header(&mInfo, TRUE) == JPEG_SUSPENDED) { MOZ_LOG(sJPEGDecoderAccountingLog, LogLevel::Debug, ("} (JPEG_SUSPENDED)")); return Transition::ContinueUnbuffered(State::JPEG_DATA); // I/O suspension } // If we have a sample size specified for -moz-sample-size, use it. if (mSampleSize > 0) { mInfo.scale_num = 1; mInfo.scale_denom = mSampleSize; } // Used to set up image size so arrays can be allocated jpeg_calc_output_dimensions(&mInfo); // Post our size to the superclass PostSize(mInfo.output_width, mInfo.output_height, ReadOrientationFromEXIF()); if (HasError()) { // Setting the size led to an error. mState = JPEG_ERROR; return Transition::TerminateFailure(); } // If we're doing a metadata decode, we're done. if (IsMetadataDecode()) { return Transition::TerminateSuccess(); } // We're doing a full decode. if (mCMSMode != eCMSMode_Off && (mInProfile = GetICCProfile(mInfo)) != nullptr) { uint32_t profileSpace = qcms_profile_get_color_space(mInProfile); bool mismatch = false; #ifdef DEBUG_tor fprintf(stderr, "JPEG profileSpace: 0x%08X\n", profileSpace); #endif switch (mInfo.jpeg_color_space) { case JCS_GRAYSCALE: if (profileSpace == icSigRgbData) { mInfo.out_color_space = JCS_RGB; } else if (profileSpace != icSigGrayData) { mismatch = true; } break; case JCS_RGB: if (profileSpace != icSigRgbData) { mismatch = true; } break; case JCS_YCbCr: if (profileSpace == icSigRgbData) { mInfo.out_color_space = JCS_RGB; } else { // qcms doesn't support ycbcr mismatch = true; } break; case JCS_CMYK: case JCS_YCCK: // qcms doesn't support cmyk mismatch = true; break; default: mState = JPEG_ERROR; PostDataError(); MOZ_LOG(sJPEGDecoderAccountingLog, LogLevel::Debug, ("} (unknown colorpsace (1))")); return Transition::TerminateFailure(); } if (!mismatch) { qcms_data_type type; switch (mInfo.out_color_space) { case JCS_GRAYSCALE: type = QCMS_DATA_GRAY_8; break; case JCS_RGB: type = QCMS_DATA_RGB_8; break; default: mState = JPEG_ERROR; PostDataError(); MOZ_LOG(sJPEGDecoderAccountingLog, LogLevel::Debug, ("} (unknown colorpsace (2))")); return Transition::TerminateFailure(); } #if 0 // We don't currently support CMYK profiles. The following // code dealt with lcms types. Add something like this // back when we gain support for CMYK. // Adobe Photoshop writes YCCK/CMYK files with inverted data if (mInfo.out_color_space == JCS_CMYK) { type |= FLAVOR_SH(mInfo.saw_Adobe_marker ? 1 : 0); } #endif if (gfxPlatform::GetCMSOutputProfile()) { // Calculate rendering intent. int intent = gfxPlatform::GetRenderingIntent(); if (intent == -1) { intent = qcms_profile_get_rendering_intent(mInProfile); } // Create the color management transform. mTransform = qcms_transform_create(mInProfile, type, gfxPlatform::GetCMSOutputProfile(), QCMS_DATA_RGB_8, (qcms_intent)intent); } } else { #ifdef DEBUG_tor fprintf(stderr, "ICM profile colorspace mismatch\n"); #endif } } if (!mTransform) { switch (mInfo.jpeg_color_space) { case JCS_GRAYSCALE: case JCS_RGB: case JCS_YCbCr: // if we're not color managing we can decode directly to // MOZ_JCS_EXT_NATIVE_ENDIAN_XRGB if (mCMSMode != eCMSMode_All) { mInfo.out_color_space = MOZ_JCS_EXT_NATIVE_ENDIAN_XRGB; mInfo.out_color_components = 4; } else { mInfo.out_color_space = JCS_RGB; } break; case JCS_CMYK: case JCS_YCCK: // libjpeg can convert from YCCK to CMYK, but not to RGB mInfo.out_color_space = JCS_CMYK; break; default: mState = JPEG_ERROR; PostDataError(); MOZ_LOG(sJPEGDecoderAccountingLog, LogLevel::Debug, ("} (unknown colorpsace (3))")); return Transition::TerminateFailure(); } } // Don't allocate a giant and superfluous memory buffer // when not doing a progressive decode. mInfo.buffered_image = mDecodeStyle == PROGRESSIVE && jpeg_has_multiple_scans(&mInfo); MOZ_ASSERT(!mImageData, "Already have a buffer allocated?"); nsIntSize targetSize = mDownscaler ? mDownscaler->TargetSize() : GetSize(); nsresult rv = AllocateFrame(0, targetSize, nsIntRect(nsIntPoint(), targetSize), gfx::SurfaceFormat::B8G8R8A8); if (NS_FAILED(rv)) { mState = JPEG_ERROR; MOZ_LOG(sJPEGDecoderAccountingLog, LogLevel::Debug, ("} (could not initialize image frame)")); return Transition::TerminateFailure(); } MOZ_ASSERT(mImageData, "Should have a buffer now"); if (mDownscaler) { nsresult rv = mDownscaler->BeginFrame(GetSize(), Nothing(), mImageData, /* aHasAlpha = */ false); if (NS_FAILED(rv)) { mState = JPEG_ERROR; return Transition::TerminateFailure(); } } MOZ_LOG(sJPEGDecoderAccountingLog, LogLevel::Debug, (" JPEGDecoderAccounting: nsJPEGDecoder::" "Write -- created image frame with %ux%u pixels", mInfo.output_width, mInfo.output_height)); mState = JPEG_START_DECOMPRESS; MOZ_FALLTHROUGH; // to start decompressing. } case JPEG_START_DECOMPRESS: { LOG_SCOPE((mozilla::LogModule*)sJPEGLog, "nsJPEGDecoder::Write -- entering" " JPEG_START_DECOMPRESS case"); // Step 4: set parameters for decompression // FIXME -- Should reset dct_method and dither mode // for final pass of progressive JPEG mInfo.dct_method = JDCT_ISLOW; mInfo.dither_mode = JDITHER_FS; mInfo.do_fancy_upsampling = TRUE; mInfo.enable_2pass_quant = FALSE; mInfo.do_block_smoothing = TRUE; // Step 5: Start decompressor if (jpeg_start_decompress(&mInfo) == FALSE) { MOZ_LOG(sJPEGDecoderAccountingLog, LogLevel::Debug, ("} (I/O suspension after jpeg_start_decompress())")); return Transition::ContinueUnbuffered(State::JPEG_DATA); // I/O suspension } // If this is a progressive JPEG ... mState = mInfo.buffered_image ? JPEG_DECOMPRESS_PROGRESSIVE : JPEG_DECOMPRESS_SEQUENTIAL; MOZ_FALLTHROUGH; // to decompress sequential JPEG. } case JPEG_DECOMPRESS_SEQUENTIAL: { if (mState == JPEG_DECOMPRESS_SEQUENTIAL) { LOG_SCOPE((mozilla::LogModule*)sJPEGLog, "nsJPEGDecoder::Write -- " "JPEG_DECOMPRESS_SEQUENTIAL case"); bool suspend; OutputScanlines(&suspend); if (suspend) { MOZ_LOG(sJPEGDecoderAccountingLog, LogLevel::Debug, ("} (I/O suspension after OutputScanlines() - SEQUENTIAL)")); return Transition::ContinueUnbuffered(State::JPEG_DATA); // I/O suspension } // If we've completed image output ... NS_ASSERTION(mInfo.output_scanline == mInfo.output_height, "We didn't process all of the data!"); mState = JPEG_DONE; } MOZ_FALLTHROUGH; // to decompress progressive JPEG. } case JPEG_DECOMPRESS_PROGRESSIVE: { if (mState == JPEG_DECOMPRESS_PROGRESSIVE) { LOG_SCOPE((mozilla::LogModule*)sJPEGLog, "nsJPEGDecoder::Write -- JPEG_DECOMPRESS_PROGRESSIVE case"); int status; do { status = jpeg_consume_input(&mInfo); } while ((status != JPEG_SUSPENDED) && (status != JPEG_REACHED_EOI)); for (;;) { if (mInfo.output_scanline == 0) { int scan = mInfo.input_scan_number; // if we haven't displayed anything yet (output_scan_number==0) // and we have enough data for a complete scan, force output // of the last full scan if ((mInfo.output_scan_number == 0) && (scan > 1) && (status != JPEG_REACHED_EOI)) scan--; if (!jpeg_start_output(&mInfo, scan)) { MOZ_LOG(sJPEGDecoderAccountingLog, LogLevel::Debug, ("} (I/O suspension after jpeg_start_output() -" " PROGRESSIVE)")); return Transition::ContinueUnbuffered(State::JPEG_DATA); // I/O suspension } } if (mInfo.output_scanline == 0xffffff) { mInfo.output_scanline = 0; } bool suspend; OutputScanlines(&suspend); if (suspend) { if (mInfo.output_scanline == 0) { // didn't manage to read any lines - flag so we don't call // jpeg_start_output() multiple times for the same scan mInfo.output_scanline = 0xffffff; } MOZ_LOG(sJPEGDecoderAccountingLog, LogLevel::Debug, ("} (I/O suspension after OutputScanlines() - PROGRESSIVE)")); return Transition::ContinueUnbuffered(State::JPEG_DATA); // I/O suspension } if (mInfo.output_scanline == mInfo.output_height) { if (!jpeg_finish_output(&mInfo)) { MOZ_LOG(sJPEGDecoderAccountingLog, LogLevel::Debug, ("} (I/O suspension after jpeg_finish_output() -" " PROGRESSIVE)")); return Transition::ContinueUnbuffered(State::JPEG_DATA); // I/O suspension } if (jpeg_input_complete(&mInfo) && (mInfo.input_scan_number == mInfo.output_scan_number)) break; mInfo.output_scanline = 0; if (mDownscaler) { mDownscaler->ResetForNextProgressivePass(); } } } mState = JPEG_DONE; } MOZ_FALLTHROUGH; // to finish decompressing. } case JPEG_DONE: { LOG_SCOPE((mozilla::LogModule*)sJPEGLog, "nsJPEGDecoder::ProcessData -- entering" " JPEG_DONE case"); // Step 7: Finish decompression if (jpeg_finish_decompress(&mInfo) == FALSE) { MOZ_LOG(sJPEGDecoderAccountingLog, LogLevel::Debug, ("} (I/O suspension after jpeg_finish_decompress() - DONE)")); return Transition::ContinueUnbuffered(State::JPEG_DATA); // I/O suspension } // Make sure we don't feed any more data to libjpeg-turbo. mState = JPEG_SINK_NON_JPEG_TRAILER; // We're done. return Transition::TerminateSuccess(); } case JPEG_SINK_NON_JPEG_TRAILER: MOZ_LOG(sJPEGLog, LogLevel::Debug, ("[this=%p] nsJPEGDecoder::ProcessData -- entering" " JPEG_SINK_NON_JPEG_TRAILER case\n", this)); MOZ_ASSERT_UNREACHABLE("Should stop getting data after entering state " "JPEG_SINK_NON_JPEG_TRAILER"); return Transition::TerminateSuccess(); case JPEG_ERROR: MOZ_ASSERT_UNREACHABLE("Should stop getting data after entering state " "JPEG_ERROR"); return Transition::TerminateFailure(); } MOZ_ASSERT_UNREACHABLE("Escaped the JPEG decoder state machine"); return Transition::TerminateFailure(); }
void nsIconDecoder::WriteInternal(const char *aBuffer, PRUint32 aCount) { NS_ABORT_IF_FALSE(!HasError(), "Shouldn't call WriteInternal after error!"); // We put this here to avoid errors about crossing initialization with case // jumps on linux. PRUint32 bytesToRead = 0; nsresult rv; // Performance isn't critical here, so our update rectangle is // always the full icon nsIntRect r(0, 0, mWidth, mHeight); // Loop until the input data is gone while (aCount > 0) { switch (mState) { case iconStateStart: // Grab the width mWidth = (PRUint8)*aBuffer; // Book Keeping aBuffer++; aCount--; mState = iconStateHaveHeight; break; case iconStateHaveHeight: // Grab the Height mHeight = (PRUint8)*aBuffer; // Post our size to the superclass PostSize(mWidth, mHeight); if (HasError()) { // Setting the size led to an error. mState = iconStateFinished; return; } // If We're doing a size decode, we're done if (IsSizeDecode()) { mState = iconStateFinished; break; } // Add the frame and signal rv = mImage.EnsureFrame(0, 0, 0, mWidth, mHeight, gfxASurface::ImageFormatARGB32, &mImageData, &mPixBytesTotal); if (NS_FAILED(rv)) { PostDecoderError(rv); return; } // Tell the superclass we're starting a frame PostFrameStart(); // Book Keeping aBuffer++; aCount--; mState = iconStateReadPixels; break; case iconStateReadPixels: // How many bytes are we reading? bytesToRead = NS_MIN(aCount, mPixBytesTotal - mPixBytesRead); // Copy the bytes memcpy(mImageData + mPixBytesRead, aBuffer, bytesToRead); // Invalidate PostInvalidation(r); // Book Keeping aBuffer += bytesToRead; aCount -= bytesToRead; mPixBytesRead += bytesToRead; // If we've got all the pixel bytes, we're finished if (mPixBytesRead == mPixBytesTotal) { PostFrameStop(); PostDecodeDone(); mState = iconStateFinished; } break; case iconStateFinished: // Consume all excess data silently aCount = 0; break; } } }
nsresult nsIconDecoder::WriteInternal(const char *aBuffer, PRUint32 aCount) { nsresult rv; // We put this here to avoid errors about crossing initialization with case // jumps on linux. PRUint32 bytesToRead = 0; // Performance isn't critical here, so our update rectangle is // always the full icon nsIntRect r(0, 0, mWidth, mHeight); // Loop until the input data is gone while (aCount > 0) { switch (mState) { case iconStateStart: // Grab the width mWidth = (PRUint8)*aBuffer; // Book Keeping aBuffer++; aCount--; mState = iconStateHaveHeight; break; case iconStateHaveHeight: // Grab the Height mHeight = (PRUint8)*aBuffer; // Post our size to the superclass PostSize(mWidth, mHeight); // If We're doing a size decode, we're done if (IsSizeDecode()) { mState = iconStateFinished; break; } // Add the frame and signal rv = mImage->AppendFrame(0, 0, mWidth, mHeight, gfxASurface::ImageFormatARGB32, &mImageData, &mPixBytesTotal); if (NS_FAILED(rv)) { mState = iconStateError; return rv; } // Tell the superclass we're starting a frame PostFrameStart(); // Book Keeping aBuffer++; aCount--; mState = iconStateReadPixels; break; case iconStateReadPixels: // How many bytes are we reading? bytesToRead = PR_MIN(aCount, mPixBytesTotal - mPixBytesRead); // Copy the bytes memcpy(mImageData + mPixBytesRead, aBuffer, bytesToRead); // Invalidate PostInvalidation(r); // Book Keeping aBuffer += bytesToRead; aCount -= bytesToRead; mPixBytesRead += bytesToRead; // If we've got all the pixel bytes, we're finished if (mPixBytesRead == mPixBytesTotal) { NotifyDone(/* aSuccess = */ PR_TRUE); mState = iconStateFinished; } break; case iconStateFinished: // Consume all excess data silently aCount = 0; break; case iconStateError: return NS_IMAGELIB_ERROR_FAILURE; break; } } return NS_OK; }
void nsPNGDecoder::WriteInternal(const char *aBuffer, PRUint32 aCount) { // We use gotos, so we need to declare variables here PRUint32 width = 0; PRUint32 height = 0; NS_ABORT_IF_FALSE(!HasError(), "Shouldn't call WriteInternal after error!"); // If we only want width/height, we don't need to go through libpng if (IsSizeDecode()) { // Are we done? if (mHeaderBytesRead == BYTES_NEEDED_FOR_DIMENSIONS) return; // Read data into our header buffer PRUint32 bytesToRead = NS_MIN(aCount, BYTES_NEEDED_FOR_DIMENSIONS - mHeaderBytesRead); memcpy(mHeaderBuf + mHeaderBytesRead, aBuffer, bytesToRead); mHeaderBytesRead += bytesToRead; // If we're done now, verify the data and set up the container if (mHeaderBytesRead == BYTES_NEEDED_FOR_DIMENSIONS) { // Check that the signature bytes are right if (memcmp(mHeaderBuf, nsPNGDecoder::pngSignatureBytes, sizeof(pngSignatureBytes))) { PostDataError(); return; } // Grab the width and height, accounting for endianness (thanks libpng!) width = png_get_uint_32(mHeaderBuf + WIDTH_OFFSET); height = png_get_uint_32(mHeaderBuf + HEIGHT_OFFSET); // Too big? if ((width > MOZ_PNG_MAX_DIMENSION) || (height > MOZ_PNG_MAX_DIMENSION)) { PostDataError(); return; } // Post our size to the superclass PostSize(width, height); } } // Otherwise, we're doing a standard decode else { // libpng uses setjmp/longjmp for error handling - set the buffer if (setjmp(png_jmpbuf(mPNG))) { // We might not really know what caused the error, but it makes more // sense to blame the data. if (!HasError()) PostDataError(); png_destroy_read_struct(&mPNG, &mInfo, NULL); return; } // Pass the data off to libpng png_process_data(mPNG, mInfo, (unsigned char *)aBuffer, aCount); } }
void nsJPEGDecoder::WriteInternal(const char *aBuffer, uint32_t aCount) { mSegment = (const JOCTET *)aBuffer; mSegmentLen = aCount; NS_ABORT_IF_FALSE(!HasError(), "Shouldn't call WriteInternal after error!"); /* Return here if there is a fatal error within libjpeg. */ nsresult error_code; // This cast to nsresult makes sense because setjmp() returns whatever we // passed to longjmp(), which was actually an nsresult. if ((error_code = (nsresult)setjmp(mErr.setjmp_buffer)) != NS_OK) { if (error_code == NS_ERROR_FAILURE) { PostDataError(); /* Error due to corrupt stream - return NS_OK and consume silently so that libpr0n doesn't throw away a partial image load */ mState = JPEG_SINK_NON_JPEG_TRAILER; PR_LOG(gJPEGDecoderAccountingLog, PR_LOG_DEBUG, ("} (setjmp returned NS_ERROR_FAILURE)")); return; } else { /* Error due to reasons external to the stream (probably out of memory) - let libpr0n attempt to clean up, even though mozilla is seconds away from falling flat on its face. */ PostDecoderError(error_code); mState = JPEG_ERROR; PR_LOG(gJPEGDecoderAccountingLog, PR_LOG_DEBUG, ("} (setjmp returned an error)")); return; } } PR_LOG(gJPEGlog, PR_LOG_DEBUG, ("[this=%p] nsJPEGDecoder::Write -- processing JPEG data\n", this)); switch (mState) { case JPEG_HEADER: { LOG_SCOPE(gJPEGlog, "nsJPEGDecoder::Write -- entering JPEG_HEADER case"); /* Step 3: read file parameters with jpeg_read_header() */ if (jpeg_read_header(&mInfo, TRUE) == JPEG_SUSPENDED) { PR_LOG(gJPEGDecoderAccountingLog, PR_LOG_DEBUG, ("} (JPEG_SUSPENDED)")); return; /* I/O suspension */ } // Post our size to the superclass PostSize(mInfo.image_width, mInfo.image_height); if (HasError()) { // Setting the size led to an error. mState = JPEG_ERROR; return; } /* If we're doing a size decode, we're done. */ if (IsSizeDecode()) return; /* We're doing a full decode. */ if (mCMSMode != eCMSMode_Off && (mInProfile = GetICCProfile(mInfo)) != nullptr) { uint32_t profileSpace = qcms_profile_get_color_space(mInProfile); bool mismatch = false; #ifdef DEBUG_tor fprintf(stderr, "JPEG profileSpace: 0x%08X\n", profileSpace); #endif switch (mInfo.jpeg_color_space) { case JCS_GRAYSCALE: if (profileSpace == icSigRgbData) mInfo.out_color_space = JCS_RGB; else if (profileSpace != icSigGrayData) mismatch = true; break; case JCS_RGB: if (profileSpace != icSigRgbData) mismatch = true; break; case JCS_YCbCr: if (profileSpace == icSigRgbData) mInfo.out_color_space = JCS_RGB; else // qcms doesn't support ycbcr mismatch = true; break; case JCS_CMYK: case JCS_YCCK: // qcms doesn't support cmyk mismatch = true; break; default: mState = JPEG_ERROR; PostDataError(); PR_LOG(gJPEGDecoderAccountingLog, PR_LOG_DEBUG, ("} (unknown colorpsace (1))")); return; } if (!mismatch) { qcms_data_type type; switch (mInfo.out_color_space) { case JCS_GRAYSCALE: type = QCMS_DATA_GRAY_8; break; case JCS_RGB: type = QCMS_DATA_RGB_8; break; default: mState = JPEG_ERROR; PostDataError(); PR_LOG(gJPEGDecoderAccountingLog, PR_LOG_DEBUG, ("} (unknown colorpsace (2))")); return; } #if 0 /* We don't currently support CMYK profiles. The following * code dealt with lcms types. Add something like this * back when we gain support for CMYK. */ /* Adobe Photoshop writes YCCK/CMYK files with inverted data */ if (mInfo.out_color_space == JCS_CMYK) type |= FLAVOR_SH(mInfo.saw_Adobe_marker ? 1 : 0); #endif if (gfxPlatform::GetCMSOutputProfile()) { /* Calculate rendering intent. */ int intent = gfxPlatform::GetRenderingIntent(); if (intent == -1) intent = qcms_profile_get_rendering_intent(mInProfile); /* Create the color management transform. */ mTransform = qcms_transform_create(mInProfile, type, gfxPlatform::GetCMSOutputProfile(), QCMS_DATA_RGB_8, (qcms_intent)intent); } } else { #ifdef DEBUG_tor fprintf(stderr, "ICM profile colorspace mismatch\n"); #endif } } if (!mTransform) { switch (mInfo.jpeg_color_space) { case JCS_GRAYSCALE: case JCS_RGB: case JCS_YCbCr: mInfo.out_color_space = JCS_RGB; break; case JCS_CMYK: case JCS_YCCK: /* libjpeg can convert from YCCK to CMYK, but not to RGB */ mInfo.out_color_space = JCS_CMYK; break; default: mState = JPEG_ERROR; PostDataError(); PR_LOG(gJPEGDecoderAccountingLog, PR_LOG_DEBUG, ("} (unknown colorpsace (3))")); return; break; } } /* * Don't allocate a giant and superfluous memory buffer * when the image is a sequential JPEG. */ mInfo.buffered_image = jpeg_has_multiple_scans(&mInfo); /* Used to set up image size so arrays can be allocated */ jpeg_calc_output_dimensions(&mInfo); uint32_t imagelength; if (NS_FAILED(mImage.EnsureFrame(0, 0, 0, mInfo.image_width, mInfo.image_height, gfxASurface::ImageFormatRGB24, &mImageData, &imagelength))) { mState = JPEG_ERROR; PostDecoderError(NS_ERROR_OUT_OF_MEMORY); PR_LOG(gJPEGDecoderAccountingLog, PR_LOG_DEBUG, ("} (could not initialize image frame)")); return; } PR_LOG(gJPEGDecoderAccountingLog, PR_LOG_DEBUG, (" JPEGDecoderAccounting: nsJPEGDecoder::Write -- created image frame with %ux%u pixels", mInfo.image_width, mInfo.image_height)); // Tell the superclass we're starting a frame PostFrameStart(); mState = JPEG_START_DECOMPRESS; } case JPEG_START_DECOMPRESS: { LOG_SCOPE(gJPEGlog, "nsJPEGDecoder::Write -- entering JPEG_START_DECOMPRESS case"); /* Step 4: set parameters for decompression */ /* FIXME -- Should reset dct_method and dither mode * for final pass of progressive JPEG */ mInfo.dct_method = JDCT_ISLOW; mInfo.dither_mode = JDITHER_FS; mInfo.do_fancy_upsampling = TRUE; mInfo.enable_2pass_quant = FALSE; mInfo.do_block_smoothing = TRUE; /* Step 5: Start decompressor */ if (jpeg_start_decompress(&mInfo) == FALSE) { PR_LOG(gJPEGDecoderAccountingLog, PR_LOG_DEBUG, ("} (I/O suspension after jpeg_start_decompress())")); return; /* I/O suspension */ } /* Force to use our YCbCr to Packed RGB converter when possible */ if (!mTransform && (mCMSMode != eCMSMode_All) && mInfo.jpeg_color_space == JCS_YCbCr && mInfo.out_color_space == JCS_RGB) { /* Special case for the most common case: transform from YCbCr direct into packed ARGB */ mInfo.out_color_components = 4; /* Packed ARGB pixels are always 4 bytes...*/ mInfo.cconvert->color_convert = ycc_rgb_convert_argb; } /* If this is a progressive JPEG ... */ mState = mInfo.buffered_image ? JPEG_DECOMPRESS_PROGRESSIVE : JPEG_DECOMPRESS_SEQUENTIAL; } case JPEG_DECOMPRESS_SEQUENTIAL: { if (mState == JPEG_DECOMPRESS_SEQUENTIAL) { LOG_SCOPE(gJPEGlog, "nsJPEGDecoder::Write -- JPEG_DECOMPRESS_SEQUENTIAL case"); bool suspend; OutputScanlines(&suspend); if (suspend) { PR_LOG(gJPEGDecoderAccountingLog, PR_LOG_DEBUG, ("} (I/O suspension after OutputScanlines() - SEQUENTIAL)")); return; /* I/O suspension */ } /* If we've completed image output ... */ NS_ASSERTION(mInfo.output_scanline == mInfo.output_height, "We didn't process all of the data!"); mState = JPEG_DONE; } } case JPEG_DECOMPRESS_PROGRESSIVE: { if (mState == JPEG_DECOMPRESS_PROGRESSIVE) { LOG_SCOPE(gJPEGlog, "nsJPEGDecoder::Write -- JPEG_DECOMPRESS_PROGRESSIVE case"); int status; do { status = jpeg_consume_input(&mInfo); } while ((status != JPEG_SUSPENDED) && (status != JPEG_REACHED_EOI)); for (;;) { if (mInfo.output_scanline == 0) { int scan = mInfo.input_scan_number; /* if we haven't displayed anything yet (output_scan_number==0) and we have enough data for a complete scan, force output of the last full scan */ if ((mInfo.output_scan_number == 0) && (scan > 1) && (status != JPEG_REACHED_EOI)) scan--; if (!jpeg_start_output(&mInfo, scan)) { PR_LOG(gJPEGDecoderAccountingLog, PR_LOG_DEBUG, ("} (I/O suspension after jpeg_start_output() - PROGRESSIVE)")); return; /* I/O suspension */ } } if (mInfo.output_scanline == 0xffffff) mInfo.output_scanline = 0; bool suspend; OutputScanlines(&suspend); if (suspend) { if (mInfo.output_scanline == 0) { /* didn't manage to read any lines - flag so we don't call jpeg_start_output() multiple times for the same scan */ mInfo.output_scanline = 0xffffff; } PR_LOG(gJPEGDecoderAccountingLog, PR_LOG_DEBUG, ("} (I/O suspension after OutputScanlines() - PROGRESSIVE)")); return; /* I/O suspension */ } if (mInfo.output_scanline == mInfo.output_height) { if (!jpeg_finish_output(&mInfo)) { PR_LOG(gJPEGDecoderAccountingLog, PR_LOG_DEBUG, ("} (I/O suspension after jpeg_finish_output() - PROGRESSIVE)")); return; /* I/O suspension */ } if (jpeg_input_complete(&mInfo) && (mInfo.input_scan_number == mInfo.output_scan_number)) break; mInfo.output_scanline = 0; } } mState = JPEG_DONE; } } case JPEG_DONE: { LOG_SCOPE(gJPEGlog, "nsJPEGDecoder::ProcessData -- entering JPEG_DONE case"); /* Step 7: Finish decompression */ if (jpeg_finish_decompress(&mInfo) == FALSE) { PR_LOG(gJPEGDecoderAccountingLog, PR_LOG_DEBUG, ("} (I/O suspension after jpeg_finish_decompress() - DONE)")); return; /* I/O suspension */ } mState = JPEG_SINK_NON_JPEG_TRAILER; /* we're done dude */ break; } case JPEG_SINK_NON_JPEG_TRAILER: PR_LOG(gJPEGlog, PR_LOG_DEBUG, ("[this=%p] nsJPEGDecoder::ProcessData -- entering JPEG_SINK_NON_JPEG_TRAILER case\n", this)); break; case JPEG_ERROR: NS_ABORT_IF_FALSE(0, "Should always return immediately after error and not re-enter decoder"); } PR_LOG(gJPEGDecoderAccountingLog, PR_LOG_DEBUG, ("} (end of function)")); return; }
void vtFence3d::AddFenceMeshes(vtHeightField3d *pHeightField) { // Trigger the creation of any materials we may need GetMatIndex(""); uint i, j; uint numfencepts = m_pFencePts.GetSize(); FLine3 p3; FPoint3 diff, fp; FPoint3 PostSize(m_Params.m_fPostWidth, m_Params.m_fPostHeight, m_Params.m_fPostDepth); // All culture (roads and buildings) can be draped on int iIncludeCulture = CE_ALL; // first, project the posts from earth to world m_Posts3d.SetSize(numfencepts); for (i = 0; i < numfencepts; i++) pHeightField->ConvertEarthToSurfacePoint(m_pFencePts[i], m_Posts3d[i], iIncludeCulture); // Find highest point m_fMaxGroundY = -1E8; for (i = 0; i < numfencepts; i++) if (m_Posts3d[i].y > m_fMaxGroundY) m_fMaxGroundY = m_Posts3d[i].y; if (m_Params.m_PostType != "none") { // has posts // determine where the fence posts go for (i = 0; i < numfencepts; i++) { if (i == numfencepts-1) { p3.Append(m_Posts3d[i]); continue; } // get start and end group points for this section FPoint3 wpos1 = m_Posts3d[i]; FPoint3 wpos2 = m_Posts3d[i+1]; // look at world distance (approximate meters, _not_ earth // coordinates, which might be in e.g. feet or degrees) diff = wpos2 - wpos1; float distance = sqrt(diff.x*diff.x+diff.z*diff.z); uint segments = (uint) (distance / m_Params.m_fPostSpacing); if (segments < 1) segments = 1; FPoint3 diff_per_segment = diff / (float) segments; for (j = 0; j < segments; j++) { fp = wpos1 + (diff_per_segment * (float)j); if (i > 0 && i < numfencepts-1) { // randomly offset by up to 4% of fence spacing, for "realism" fp.x += random_offset(0.04f * m_Params.m_fPostSpacing); fp.z += random_offset(0.04f * m_Params.m_fPostSpacing); } // false: true elevation, true: include culture (structures and roads) pHeightField->FindAltitudeAtPoint(fp, fp.y, false, CE_ALL); p3.Append(fp); } } // generate the posts // Look first for post materials (type 3) int iMatIdx = GetMatIndex(m_Params.m_PostType, RGBf(), 3); // If that didn't work, look for any material by that name if (iMatIdx == -1) int iMatIdx = GetMatIndex(m_Params.m_PostType); for (i = 0; i < p3.GetSize(); i++) AddFencepost(p3[i], iMatIdx); } else { // no post spacing to consider, so just use the input vertices p3.SetSize(numfencepts); for (i = 0; i < numfencepts; i++) p3[i] = m_Posts3d[i]; } if (m_Params.m_PostExtension != "none") AddPostExtensions(p3); // if not enough points, nothing connections to create if (p3.GetSize() < 2) return; if (m_Params.m_iConnectType == 0) // none { // nothing to do } else if (m_Params.m_iConnectType == 1) // wire { AddWireMeshes(p3); } if (m_Params.m_ConnectMaterial == "none") return; if (m_Params.m_iConnectType == 2) // simple { if (m_Params.m_fConnectWidth == 0.0f) AddFlatConnectionMesh(p3); else if (m_Params.m_fConnectWidth > 0.0f) AddThickConnectionMesh(p3); } else if (m_Params.m_iConnectType == 3) // profile { AddProfileConnectionMesh(p3); } }
void nsWEBPDecoder::WriteInternal(const char *aBuffer, uint32_t aCount) { MOZ_ASSERT(!HasError(), "Shouldn't call WriteInternal after error!"); const uint8_t* buf = (const uint8_t*)aBuffer; VP8StatusCode rv = WebPIAppend(mDecoder, buf, aCount); if (rv == VP8_STATUS_OUT_OF_MEMORY) { PostDecoderError(NS_ERROR_OUT_OF_MEMORY); return; } else if (rv == VP8_STATUS_INVALID_PARAM || rv == VP8_STATUS_BITSTREAM_ERROR) { PostDataError(); return; } else if (rv == VP8_STATUS_UNSUPPORTED_FEATURE || rv == VP8_STATUS_USER_ABORT) { PostDecoderError(NS_ERROR_FAILURE); return; } // Catch any remaining erroneous return value. if (rv != VP8_STATUS_OK && rv != VP8_STATUS_SUSPENDED) { PostDecoderError(NS_ERROR_FAILURE); return; } int lastLineRead = -1; int height = 0; int width = 0; int stride = 0; mData = WebPIDecGetRGB(mDecoder, &lastLineRead, &width, &height, &stride); // The only valid format for WebP decoding for both alpha and non-alpha // images is BGRA, where Opaque images have an A of 255. // Assume transparency for all images. // XXX: This could be compositor-optimized by doing a one-time check for // all-255 alpha pixels, but that might interfere with progressive // decoding. Probably not worth it? PostHasTransparency(); if (lastLineRead == -1 || !mData) return; if (width <= 0 || height <= 0) { PostDataError(); return; } if (!HasSize()) PostSize(width, height); if (IsSizeDecode()) return; if (!mImageData) { PostDecoderError(NS_ERROR_FAILURE); return; } // Transfer from mData to mImageData if (lastLineRead > mLastLine) { for (int line = mLastLine; line < lastLineRead; line++) { for (int pix = 0; pix < width; pix++) { // RGBA -> BGRA uint32_t DataOffset = 4 * (line * width + pix); mImageData[DataOffset+0] = mData[DataOffset+2]; mImageData[DataOffset+1] = mData[DataOffset+1]; mImageData[DataOffset+2] = mData[DataOffset+0]; mImageData[DataOffset+3] = mData[DataOffset+3]; } } // Invalidate nsIntRect r(0, mLastLine, width, lastLineRead); PostInvalidation(r); } mLastLine = lastLineRead; return; }
void nsJPEGDecoder::WriteInternal(const char* aBuffer, uint32_t aCount, DecodeStrategy) { mSegment = (const JOCTET*)aBuffer; mSegmentLen = aCount; NS_ABORT_IF_FALSE(!HasError(), "Shouldn't call WriteInternal after error!"); // Return here if there is a fatal error within libjpeg. nsresult error_code; // This cast to nsresult makes sense because setjmp() returns whatever we // passed to longjmp(), which was actually an nsresult. if ((error_code = (nsresult)setjmp(mErr.setjmp_buffer)) != NS_OK) { if (error_code == NS_ERROR_FAILURE) { PostDataError(); // Error due to corrupt stream - return NS_OK and consume silently // so that ImageLib doesn't throw away a partial image load mState = JPEG_SINK_NON_JPEG_TRAILER; PR_LOG(GetJPEGDecoderAccountingLog(), PR_LOG_DEBUG, ("} (setjmp returned NS_ERROR_FAILURE)")); return; } else { // Error due to reasons external to the stream (probably out of // memory) - let ImageLib attempt to clean up, even though // mozilla is seconds away from falling flat on its face. PostDecoderError(error_code); mState = JPEG_ERROR; PR_LOG(GetJPEGDecoderAccountingLog(), PR_LOG_DEBUG, ("} (setjmp returned an error)")); return; } } PR_LOG(GetJPEGLog(), PR_LOG_DEBUG, ("[this=%p] nsJPEGDecoder::Write -- processing JPEG data\n", this)); switch (mState) { case JPEG_HEADER: { LOG_SCOPE(GetJPEGLog(), "nsJPEGDecoder::Write -- entering JPEG_HEADER" " case"); // Step 3: read file parameters with jpeg_read_header() if (jpeg_read_header(&mInfo, TRUE) == JPEG_SUSPENDED) { PR_LOG(GetJPEGDecoderAccountingLog(), PR_LOG_DEBUG, ("} (JPEG_SUSPENDED)")); return; // I/O suspension } int sampleSize = mImage.GetRequestedSampleSize(); if (sampleSize > 0) { mInfo.scale_num = 1; mInfo.scale_denom = sampleSize; } // Used to set up image size so arrays can be allocated jpeg_calc_output_dimensions(&mInfo); // Post our size to the superclass PostSize(mInfo.output_width, mInfo.output_height, ReadOrientationFromEXIF()); if (HasError()) { // Setting the size led to an error. mState = JPEG_ERROR; return; } // If we're doing a size decode, we're done. if (IsSizeDecode()) { return; } // We're doing a full decode. if (mCMSMode != eCMSMode_Off && (mInProfile = GetICCProfile(mInfo)) != nullptr) { uint32_t profileSpace = qcms_profile_get_color_space(mInProfile); bool mismatch = false; #ifdef DEBUG_tor fprintf(stderr, "JPEG profileSpace: 0x%08X\n", profileSpace); #endif switch (mInfo.jpeg_color_space) { case JCS_GRAYSCALE: if (profileSpace == icSigRgbData) { mInfo.out_color_space = JCS_RGB; } else if (profileSpace != icSigGrayData) { mismatch = true; } break; case JCS_RGB: if (profileSpace != icSigRgbData) { mismatch = true; } break; case JCS_YCbCr: if (profileSpace == icSigRgbData) { mInfo.out_color_space = JCS_RGB; } else { // qcms doesn't support ycbcr mismatch = true; } break; case JCS_CMYK: case JCS_YCCK: // qcms doesn't support cmyk mismatch = true; break; default: mState = JPEG_ERROR; PostDataError(); PR_LOG(GetJPEGDecoderAccountingLog(), PR_LOG_DEBUG, ("} (unknown colorpsace (1))")); return; } if (!mismatch) { qcms_data_type type; switch (mInfo.out_color_space) { case JCS_GRAYSCALE: type = QCMS_DATA_GRAY_8; break; case JCS_RGB: type = QCMS_DATA_RGB_8; break; default: mState = JPEG_ERROR; PostDataError(); PR_LOG(GetJPEGDecoderAccountingLog(), PR_LOG_DEBUG, ("} (unknown colorpsace (2))")); return; } #if 0 // We don't currently support CMYK profiles. The following // code dealt with lcms types. Add something like this // back when we gain support for CMYK. // Adobe Photoshop writes YCCK/CMYK files with inverted data if (mInfo.out_color_space == JCS_CMYK) { type |= FLAVOR_SH(mInfo.saw_Adobe_marker ? 1 : 0); } #endif if (gfxPlatform::GetCMSOutputProfile()) { // Calculate rendering intent. int intent = gfxPlatform::GetRenderingIntent(); if (intent == -1) { intent = qcms_profile_get_rendering_intent(mInProfile); } // Create the color management transform. mTransform = qcms_transform_create(mInProfile, type, gfxPlatform::GetCMSOutputProfile(), QCMS_DATA_RGB_8, (qcms_intent)intent); } } else { #ifdef DEBUG_tor fprintf(stderr, "ICM profile colorspace mismatch\n"); #endif } } if (!mTransform) { switch (mInfo.jpeg_color_space) { case JCS_GRAYSCALE: case JCS_RGB: case JCS_YCbCr: // if we're not color managing we can decode directly to // MOZ_JCS_EXT_NATIVE_ENDIAN_XRGB if (mCMSMode != eCMSMode_All) { mInfo.out_color_space = MOZ_JCS_EXT_NATIVE_ENDIAN_XRGB; mInfo.out_color_components = 4; } else { mInfo.out_color_space = JCS_RGB; } break; case JCS_CMYK: case JCS_YCCK: // libjpeg can convert from YCCK to CMYK, but not to RGB mInfo.out_color_space = JCS_CMYK; break; default: mState = JPEG_ERROR; PostDataError(); PR_LOG(GetJPEGDecoderAccountingLog(), PR_LOG_DEBUG, ("} (unknown colorpsace (3))")); return; break; } } // Don't allocate a giant and superfluous memory buffer // when not doing a progressive decode. mInfo.buffered_image = mDecodeStyle == PROGRESSIVE && jpeg_has_multiple_scans(&mInfo); if (!mImageData) { mState = JPEG_ERROR; PostDecoderError(NS_ERROR_OUT_OF_MEMORY); PR_LOG(GetJPEGDecoderAccountingLog(), PR_LOG_DEBUG, ("} (could not initialize image frame)")); return; } PR_LOG(GetJPEGDecoderAccountingLog(), PR_LOG_DEBUG, (" JPEGDecoderAccounting: nsJPEGDecoder::" "Write -- created image frame with %ux%u pixels", mInfo.output_width, mInfo.output_height)); mState = JPEG_START_DECOMPRESS; } case JPEG_START_DECOMPRESS: { LOG_SCOPE(GetJPEGLog(), "nsJPEGDecoder::Write -- entering" " JPEG_START_DECOMPRESS case"); // Step 4: set parameters for decompression // FIXME -- Should reset dct_method and dither mode // for final pass of progressive JPEG mInfo.dct_method = JDCT_ISLOW; mInfo.dither_mode = JDITHER_FS; mInfo.do_fancy_upsampling = TRUE; mInfo.enable_2pass_quant = FALSE; mInfo.do_block_smoothing = TRUE; // Step 5: Start decompressor if (jpeg_start_decompress(&mInfo) == FALSE) { PR_LOG(GetJPEGDecoderAccountingLog(), PR_LOG_DEBUG, ("} (I/O suspension after jpeg_start_decompress())")); return; // I/O suspension } // If this is a progressive JPEG ... mState = mInfo.buffered_image ? JPEG_DECOMPRESS_PROGRESSIVE : JPEG_DECOMPRESS_SEQUENTIAL; } case JPEG_DECOMPRESS_SEQUENTIAL: { if (mState == JPEG_DECOMPRESS_SEQUENTIAL) { LOG_SCOPE(GetJPEGLog(), "nsJPEGDecoder::Write -- " "JPEG_DECOMPRESS_SEQUENTIAL case"); bool suspend; OutputScanlines(&suspend); if (suspend) { PR_LOG(GetJPEGDecoderAccountingLog(), PR_LOG_DEBUG, ("} (I/O suspension after OutputScanlines() - SEQUENTIAL)")); return; // I/O suspension } // If we've completed image output ... NS_ASSERTION(mInfo.output_scanline == mInfo.output_height, "We didn't process all of the data!"); mState = JPEG_DONE; } } case JPEG_DECOMPRESS_PROGRESSIVE: { if (mState == JPEG_DECOMPRESS_PROGRESSIVE) { LOG_SCOPE(GetJPEGLog(), "nsJPEGDecoder::Write -- JPEG_DECOMPRESS_PROGRESSIVE case"); int status; do { status = jpeg_consume_input(&mInfo); } while ((status != JPEG_SUSPENDED) && (status != JPEG_REACHED_EOI)); for (;;) { if (mInfo.output_scanline == 0) { int scan = mInfo.input_scan_number; // if we haven't displayed anything yet (output_scan_number==0) // and we have enough data for a complete scan, force output // of the last full scan if ((mInfo.output_scan_number == 0) && (scan > 1) && (status != JPEG_REACHED_EOI)) scan--; if (!jpeg_start_output(&mInfo, scan)) { PR_LOG(GetJPEGDecoderAccountingLog(), PR_LOG_DEBUG, ("} (I/O suspension after jpeg_start_output() -" " PROGRESSIVE)")); return; // I/O suspension } } if (mInfo.output_scanline == 0xffffff) { mInfo.output_scanline = 0; } bool suspend; OutputScanlines(&suspend); if (suspend) { if (mInfo.output_scanline == 0) { // didn't manage to read any lines - flag so we don't call // jpeg_start_output() multiple times for the same scan mInfo.output_scanline = 0xffffff; } PR_LOG(GetJPEGDecoderAccountingLog(), PR_LOG_DEBUG, ("} (I/O suspension after OutputScanlines() - PROGRESSIVE)")); return; // I/O suspension } if (mInfo.output_scanline == mInfo.output_height) { if (!jpeg_finish_output(&mInfo)) { PR_LOG(GetJPEGDecoderAccountingLog(), PR_LOG_DEBUG, ("} (I/O suspension after jpeg_finish_output() -" " PROGRESSIVE)")); return; // I/O suspension } if (jpeg_input_complete(&mInfo) && (mInfo.input_scan_number == mInfo.output_scan_number)) break; mInfo.output_scanline = 0; } } mState = JPEG_DONE; } } case JPEG_DONE: { LOG_SCOPE(GetJPEGLog(), "nsJPEGDecoder::ProcessData -- entering" " JPEG_DONE case"); // Step 7: Finish decompression if (jpeg_finish_decompress(&mInfo) == FALSE) { PR_LOG(GetJPEGDecoderAccountingLog(), PR_LOG_DEBUG, ("} (I/O suspension after jpeg_finish_decompress() - DONE)")); return; // I/O suspension } mState = JPEG_SINK_NON_JPEG_TRAILER; // we're done dude break; } case JPEG_SINK_NON_JPEG_TRAILER: PR_LOG(GetJPEGLog(), PR_LOG_DEBUG, ("[this=%p] nsJPEGDecoder::ProcessData -- entering" " JPEG_SINK_NON_JPEG_TRAILER case\n", this)); break; case JPEG_ERROR: NS_ABORT_IF_FALSE(0, "Should always return immediately after error and" " not re-enter decoder"); } PR_LOG(GetJPEGDecoderAccountingLog(), PR_LOG_DEBUG, ("} (end of function)")); return; }
void nsPNGDecoder::WriteInternal(const char* aBuffer, uint32_t aCount) { MOZ_ASSERT(!HasError(), "Shouldn't call WriteInternal after error!"); // If we only want width/height, we don't need to go through libpng if (IsSizeDecode()) { // Are we done? if (mHeaderBytesRead == BYTES_NEEDED_FOR_DIMENSIONS) { return; } // Scan the header for the width and height bytes uint32_t pos = 0; const uint8_t* bptr = (uint8_t*)aBuffer; while (pos < aCount && mHeaderBytesRead < BYTES_NEEDED_FOR_DIMENSIONS) { // Verify the signature bytes if (mHeaderBytesRead < sizeof(pngSignatureBytes)) { if (bptr[pos] != nsPNGDecoder::pngSignatureBytes[mHeaderBytesRead]) { PostDataError(); return; } } // Get width and height bytes into the buffer if ((mHeaderBytesRead >= WIDTH_OFFSET) && (mHeaderBytesRead < BYTES_NEEDED_FOR_DIMENSIONS)) { mSizeBytes[mHeaderBytesRead - WIDTH_OFFSET] = bptr[pos]; } pos ++; mHeaderBytesRead ++; } // If we're done now, verify the data and set up the container if (mHeaderBytesRead == BYTES_NEEDED_FOR_DIMENSIONS) { // Grab the width and height, accounting for endianness (thanks libpng!) uint32_t width = png_get_uint_32(mSizeBytes); uint32_t height = png_get_uint_32(mSizeBytes + 4); // Too big? if ((width > MOZ_PNG_MAX_DIMENSION) || (height > MOZ_PNG_MAX_DIMENSION)) { PostDataError(); return; } // Post our size to the superclass PostSize(width, height); } // Otherwise, we're doing a standard decode } else { // libpng uses setjmp/longjmp for error handling - set the buffer if (setjmp(png_jmpbuf(mPNG))) { // We might not really know what caused the error, but it makes more // sense to blame the data. if (!HasError()) { PostDataError(); } png_destroy_read_struct(&mPNG, &mInfo, nullptr); return; } // Pass the data off to libpng png_process_data(mPNG, mInfo, (unsigned char*)aBuffer, aCount); } }
void nsICODecoder::WriteInternal(const char* aBuffer, uint32_t aCount, DecodeStrategy aStrategy) { NS_ABORT_IF_FALSE(!HasError(), "Shouldn't call WriteInternal after error!"); if (!aCount) { if (mContainedDecoder) { WriteToContainedDecoder(aBuffer, aCount, aStrategy); } return; } while (aCount && (mPos < ICONCOUNTOFFSET)) { // Skip to the # of icons. if (mPos == 2) { // if the third byte is 1: This is an icon, 2: a cursor if ((*aBuffer != 1) && (*aBuffer != 2)) { PostDataError(); return; } mIsCursor = (*aBuffer == 2); } mPos++; aBuffer++; aCount--; } if (mPos == ICONCOUNTOFFSET && aCount >= 2) { mNumIcons = LittleEndian::readUint16(reinterpret_cast<const uint16_t*>(aBuffer)); aBuffer += 2; mPos += 2; aCount -= 2; } if (mNumIcons == 0) return; // Nothing to do. uint16_t colorDepth = 0; nsIntSize prefSize = mImage.GetRequestedResolution(); if (prefSize.width == 0 && prefSize.height == 0) { prefSize.SizeTo(PREFICONSIZE, PREFICONSIZE); } // A measure of the difference in size between the entry we've found // and the requested size. We will choose the smallest image that is // >= requested size (i.e. we assume it's better to downscale a larger // icon than to upscale a smaller one). int32_t diff = INT_MIN; // Loop through each entry's dir entry while (mCurrIcon < mNumIcons) { if (mPos >= DIRENTRYOFFSET + (mCurrIcon * sizeof(mDirEntryArray)) && mPos < DIRENTRYOFFSET + ((mCurrIcon + 1) * sizeof(mDirEntryArray))) { uint32_t toCopy = sizeof(mDirEntryArray) - (mPos - DIRENTRYOFFSET - mCurrIcon * sizeof(mDirEntryArray)); if (toCopy > aCount) { toCopy = aCount; } memcpy(mDirEntryArray + sizeof(mDirEntryArray) - toCopy, aBuffer, toCopy); mPos += toCopy; aCount -= toCopy; aBuffer += toCopy; } if (aCount == 0) return; // Need more data IconDirEntry e; if (mPos == (DIRENTRYOFFSET + ICODIRENTRYSIZE) + (mCurrIcon * sizeof(mDirEntryArray))) { mCurrIcon++; ProcessDirEntry(e); // We can't use GetRealWidth and GetRealHeight here because those operate // on mDirEntry, here we are going through each item in the directory. // Calculate the delta between this image's size and the desired size, // so we can see if it is better than our current-best option. // In the case of several equally-good images, we use the last one. int32_t delta = (e.mWidth == 0 ? 256 : e.mWidth) - prefSize.width + (e.mHeight == 0 ? 256 : e.mHeight) - prefSize.height; if (e.mBitCount >= colorDepth && ((diff < 0 && delta >= diff) || (delta >= 0 && delta <= diff))) { diff = delta; mImageOffset = e.mImageOffset; // ensure mImageOffset is >= size of the direntry headers (bug #245631) uint32_t minImageOffset = DIRENTRYOFFSET + mNumIcons * sizeof(mDirEntryArray); if (mImageOffset < minImageOffset) { PostDataError(); return; } colorDepth = e.mBitCount; memcpy(&mDirEntry, &e, sizeof(IconDirEntry)); } } } if (mPos < mImageOffset) { // Skip to (or at least towards) the desired image offset uint32_t toSkip = mImageOffset - mPos; if (toSkip > aCount) toSkip = aCount; mPos += toSkip; aBuffer += toSkip; aCount -= toSkip; } // If we are within the first PNGSIGNATURESIZE bytes of the image data, // then we have either a BMP or a PNG. We use the first PNGSIGNATURESIZE // bytes to determine which one we have. if (mCurrIcon == mNumIcons && mPos >= mImageOffset && mPos < mImageOffset + PNGSIGNATURESIZE) { uint32_t toCopy = PNGSIGNATURESIZE - (mPos - mImageOffset); if (toCopy > aCount) { toCopy = aCount; } memcpy(mSignature + (mPos - mImageOffset), aBuffer, toCopy); mPos += toCopy; aCount -= toCopy; aBuffer += toCopy; mIsPNG = !memcmp(mSignature, nsPNGDecoder::pngSignatureBytes, PNGSIGNATURESIZE); if (mIsPNG) { mContainedDecoder = new nsPNGDecoder(mImage); mContainedDecoder->SetObserver(mObserver); mContainedDecoder->SetSizeDecode(IsSizeDecode()); mContainedDecoder->InitSharedDecoder(mImageData, mImageDataLength, mColormap, mColormapSize, mCurrentFrame); if (!WriteToContainedDecoder(mSignature, PNGSIGNATURESIZE, aStrategy)) { return; } } } // If we have a PNG, let the PNG decoder do all of the rest of the work if (mIsPNG && mContainedDecoder && mPos >= mImageOffset + PNGSIGNATURESIZE) { if (!WriteToContainedDecoder(aBuffer, aCount, aStrategy)) { return; } if (!HasSize() && mContainedDecoder->HasSize()) { PostSize(mContainedDecoder->GetImageMetadata().GetWidth(), mContainedDecoder->GetImageMetadata().GetHeight()); } mPos += aCount; aBuffer += aCount; aCount = 0; // Raymond Chen says that 32bpp only are valid PNG ICOs // http://blogs.msdn.com/b/oldnewthing/archive/2010/10/22/10079192.aspx if (!IsSizeDecode() && !static_cast<nsPNGDecoder*>(mContainedDecoder.get())->IsValidICO()) { PostDataError(); } return; } // We've processed all of the icon dir entries and are within the // bitmap info size if (!mIsPNG && mCurrIcon == mNumIcons && mPos >= mImageOffset && mPos >= mImageOffset + PNGSIGNATURESIZE && mPos < mImageOffset + BITMAPINFOSIZE) { // As we were decoding, we did not know if we had a PNG signature or the // start of a bitmap information header. At this point we know we had // a bitmap information header and not a PNG signature, so fill the bitmap // information header with the data it should already have. memcpy(mBIHraw, mSignature, PNGSIGNATURESIZE); // We've found the icon. uint32_t toCopy = sizeof(mBIHraw) - (mPos - mImageOffset); if (toCopy > aCount) toCopy = aCount; memcpy(mBIHraw + (mPos - mImageOffset), aBuffer, toCopy); mPos += toCopy; aCount -= toCopy; aBuffer += toCopy; } // If we have a BMP inside the ICO and we have read the BIH header if (!mIsPNG && mPos == mImageOffset + BITMAPINFOSIZE) { // Make sure we have a sane value for the bitmap information header int32_t bihSize = ExtractBIHSizeFromBitmap(reinterpret_cast<int8_t*>(mBIHraw)); if (bihSize != BITMAPINFOSIZE) { PostDataError(); return; } // We are extracting the BPP from the BIH header as it should be trusted // over the one we have from the icon header mBPP = ExtractBPPFromBitmap(reinterpret_cast<int8_t*>(mBIHraw)); // Init the bitmap decoder which will do most of the work for us // It will do everything except the AND mask which isn't present in bitmaps // bmpDecoder is for local scope ease, it will be freed by mContainedDecoder nsBMPDecoder *bmpDecoder = new nsBMPDecoder(mImage); mContainedDecoder = bmpDecoder; bmpDecoder->SetUseAlphaData(true); mContainedDecoder->SetObserver(mObserver); mContainedDecoder->SetSizeDecode(IsSizeDecode()); mContainedDecoder->InitSharedDecoder(mImageData, mImageDataLength, mColormap, mColormapSize, mCurrentFrame); // The ICO format when containing a BMP does not include the 14 byte // bitmap file header. To use the code of the BMP decoder we need to // generate this header ourselves and feed it to the BMP decoder. int8_t bfhBuffer[BMPFILEHEADERSIZE]; if (!FillBitmapFileHeaderBuffer(bfhBuffer)) { PostDataError(); return; } if (!WriteToContainedDecoder((const char*)bfhBuffer, sizeof(bfhBuffer), aStrategy)) { return; } // Setup the cursor hot spot if one is present SetHotSpotIfCursor(); // Fix the ICO height from the BIH. // Fix the height on the BIH to be /2 so our BMP decoder will understand. if (!FixBitmapHeight(reinterpret_cast<int8_t*>(mBIHraw))) { PostDataError(); return; } // Fix the ICO width from the BIH. if (!FixBitmapWidth(reinterpret_cast<int8_t*>(mBIHraw))) { PostDataError(); return; } // Write out the BMP's bitmap info header if (!WriteToContainedDecoder(mBIHraw, sizeof(mBIHraw), aStrategy)) { return; } PostSize(mContainedDecoder->GetImageMetadata().GetWidth(), mContainedDecoder->GetImageMetadata().GetHeight()); // We have the size. If we're doing a size decode, we got what // we came for. if (IsSizeDecode()) return; // Sometimes the ICO BPP header field is not filled out // so we should trust the contained resource over our own // information. mBPP = bmpDecoder->GetBitsPerPixel(); // Check to make sure we have valid color settings uint16_t numColors = GetNumColors(); if (numColors == (uint16_t)-1) { PostDataError(); return; } } // If we have a BMP if (!mIsPNG && mContainedDecoder && mPos >= mImageOffset + BITMAPINFOSIZE) { uint16_t numColors = GetNumColors(); if (numColors == (uint16_t)-1) { PostDataError(); return; } // Feed the actual image data (not including headers) into the BMP decoder uint32_t bmpDataOffset = mDirEntry.mImageOffset + BITMAPINFOSIZE; uint32_t bmpDataEnd = mDirEntry.mImageOffset + BITMAPINFOSIZE + static_cast<nsBMPDecoder*>(mContainedDecoder.get())->GetCompressedImageSize() + 4 * numColors; // If we are feeding in the core image data, but we have not yet // reached the ICO's 'AND buffer mask' if (mPos >= bmpDataOffset && mPos < bmpDataEnd) { // Figure out how much data the BMP decoder wants uint32_t toFeed = bmpDataEnd - mPos; if (toFeed > aCount) { toFeed = aCount; } if (!WriteToContainedDecoder(aBuffer, toFeed, aStrategy)) { return; } mPos += toFeed; aCount -= toFeed; aBuffer += toFeed; } // If the bitmap is fully processed, treat any left over data as the ICO's // 'AND buffer mask' which appears after the bitmap resource. if (!mIsPNG && mPos >= bmpDataEnd) { // There may be an optional AND bit mask after the data. This is // only used if the alpha data is not already set. The alpha data // is used for 32bpp bitmaps as per the comment in ICODecoder.h // The alpha mask should be checked in all other cases. if (static_cast<nsBMPDecoder*>(mContainedDecoder.get())->GetBitsPerPixel() != 32 || !static_cast<nsBMPDecoder*>(mContainedDecoder.get())->HasAlphaData()) { uint32_t rowSize = ((GetRealWidth() + 31) / 32) * 4; // + 31 to round up if (mPos == bmpDataEnd) { mPos++; mRowBytes = 0; mCurLine = GetRealHeight(); mRow = (uint8_t*)moz_realloc(mRow, rowSize); if (!mRow) { PostDecoderError(NS_ERROR_OUT_OF_MEMORY); return; } } // Ensure memory has been allocated before decoding. NS_ABORT_IF_FALSE(mRow, "mRow is null"); if (!mRow) { PostDataError(); return; } while (mCurLine > 0 && aCount > 0) { uint32_t toCopy = std::min(rowSize - mRowBytes, aCount); if (toCopy) { memcpy(mRow + mRowBytes, aBuffer, toCopy); aCount -= toCopy; aBuffer += toCopy; mRowBytes += toCopy; } if (rowSize == mRowBytes) { mCurLine--; mRowBytes = 0; uint32_t* imageData = static_cast<nsBMPDecoder*>(mContainedDecoder.get())->GetImageData(); if (!imageData) { PostDataError(); return; } uint32_t* decoded = imageData + mCurLine * GetRealWidth(); uint32_t* decoded_end = decoded + GetRealWidth(); uint8_t* p = mRow, *p_end = mRow + rowSize; while (p < p_end) { uint8_t idx = *p++; for (uint8_t bit = 0x80; bit && decoded<decoded_end; bit >>= 1) { // Clear pixel completely for transparency. if (idx & bit) { *decoded = 0; } decoded++; } } } } }
void nsWEBPDecoder::WriteInternal(const char *aBuffer, uint32_t aCount, DecodeStrategy) { NS_ABORT_IF_FALSE(!HasError(), "Shouldn't call WriteInternal after error!"); const uint8_t* buf = (const uint8_t*)aBuffer; VP8StatusCode rv = WebPIAppend(mDecoder, buf, aCount); if (rv == VP8_STATUS_OUT_OF_MEMORY) { PostDecoderError(NS_ERROR_OUT_OF_MEMORY); return; } else if (rv == VP8_STATUS_INVALID_PARAM || rv == VP8_STATUS_BITSTREAM_ERROR) { PostDataError(); return; } else if (rv == VP8_STATUS_UNSUPPORTED_FEATURE || rv == VP8_STATUS_USER_ABORT) { PostDecoderError(NS_ERROR_FAILURE); return; } // Catch any remaining erroneous return value. if (rv != VP8_STATUS_OK && rv != VP8_STATUS_SUSPENDED) { PostDecoderError(NS_ERROR_FAILURE); return; } int lastLineRead = -1; int height = 0; int width = 0; int stride = 0; mData = WebPIDecGetRGB(mDecoder, &lastLineRead, &width, &height, &stride); if (lastLineRead == -1 || !mData) return; if (width <= 0 || height <= 0) { PostDataError(); return; } if (!HasSize()) PostSize(width, height); if (IsSizeDecode()) return; uint32_t imagelength; // First incremental Image data chunk. Special handling required. if (mLastLine == 0 && lastLineRead > 0) { imgFrame* aFrame; nsresult res = mImage.EnsureFrame(0, 0, 0, width, height, gfxASurface::ImageFormatARGB32, (uint8_t**)&mImageData, &imagelength, &aFrame); if (NS_FAILED(res) || !mImageData) { PostDecoderError(NS_ERROR_FAILURE); return; } } if (!mImageData) { PostDecoderError(NS_ERROR_FAILURE); return; } if (lastLineRead > mLastLine) { for (int line = mLastLine; line < lastLineRead; line++) { uint32_t *cptr32 = (uint32_t*)(mImageData + (line * width)); uint8_t *cptr8 = mData + (line * stride); for (int pix = 0; pix < width; pix++, cptr8 += 4) { // if((cptr8[3] != 0) && (cptr8[0] != 0) && (cptr8[1] != 0) && (cptr8[2] != 0)) *cptr32++ = gfxPackedPixel(cptr8[3], cptr8[0], cptr8[1], cptr8[2]); } } // Invalidate nsIntRect r(0, mLastLine, width, lastLineRead); PostInvalidation(r); } mLastLine = lastLineRead; return; }