void ImageDecoder::setData(SharedBuffer& data, bool allDataReceived) { m_isAllDataReceived = allDataReceived; #if PLATFORM(COCOA) // On Mac the NSData inside the SharedBuffer can be secretly appended to without the SharedBuffer's knowledge. // We use SharedBuffer's ability to wrap itself inside CFData to get around this, ensuring that ImageIO is // really looking at the SharedBuffer. CGImageSourceUpdateData(m_nativeDecoder.get(), data.createCFData().get(), allDataReceived); #else // Create a CGDataProvider to wrap the SharedBuffer. data.ref(); // We use the GetBytesAtPosition callback rather than the GetBytePointer one because SharedBuffer // does not provide a way to lock down the byte pointer and guarantee that it won't move, which // is a requirement for using the GetBytePointer callback. CGDataProviderDirectCallbacks providerCallbacks = { 0, 0, 0, sharedBufferGetBytesAtPosition, sharedBufferRelease }; RetainPtr<CGDataProviderRef> dataProvider = adoptCF(CGDataProviderCreateDirect(&data, data.size(), &providerCallbacks)); CGImageSourceUpdateDataProvider(m_nativeDecoder.get(), dataProvider.get(), allDataReceived); #endif }
status_t String16::replaceAll(char16_t replaceThis, char16_t withThis) { const size_t N = size(); const char16_t* str = string(); char16_t* edit = NULL; for (size_t i=0; i<N; i++) { if (str[i] == replaceThis) { if (!edit) { SharedBuffer* buf = SharedBuffer::bufferFromData(mString)->edit(); if (!buf) { return NO_MEMORY; } edit = (char16_t*)buf->data(); mString = str = edit; } edit[i] = withThis; } } return NO_ERROR; }
void DocumentLoader::substituteResourceDeliveryTimerFired(Timer<DocumentLoader>*) { if (m_pendingSubstituteResources.isEmpty()) return; ASSERT(m_frame && m_frame->page()); if (m_frame->page()->defersLoading()) return; SubstituteResourceMap copy; copy.swap(m_pendingSubstituteResources); SubstituteResourceMap::const_iterator end = copy.end(); for (SubstituteResourceMap::const_iterator it = copy.begin(); it != end; ++it) { RefPtr<ResourceLoader> loader = it->key; SubstituteResource* resource = it->value.get(); if (resource) { SharedBuffer* data = resource->data(); loader->didReceiveResponse(resource->response()); // Calling ResourceLoader::didReceiveResponse can end up cancelling the load, // so we need to check if the loader has reached its terminal state. if (loader->reachedTerminalState()) return; loader->didReceiveData(data->data(), data->size(), data->size(), true); // Calling ResourceLoader::didReceiveData can end up cancelling the load, // so we need to check if the loader has reached its terminal state. if (loader->reachedTerminalState()) return; loader->didFinishLoading(0); } else { // A null resource means that we should fail the load. // FIXME: Maybe we should use another error here - something like "not in cache". loader->didFail(loader->cannotShowURLError()); } } }
status_t String16::makeLower() { const size_t N = size(); const char16_t* str = string(); char16_t* edit = NULL; for (size_t i=0; i<N; i++) { const char16_t v = str[i]; if (v >= 'A' && v <= 'Z') { if (!edit) { SharedBuffer* buf = SharedBuffer::bufferFromData(mString)->edit(); if (!buf) { return NO_MEMORY; } edit = (char16_t*)buf->data(); mString = str = edit; } edit[i] = tolower((char)v); } } return NO_ERROR; }
status_t String16::append(const char16_t* chrs, size_t otherLen) { const size_t myLen = size(); if (myLen == 0) { setTo(chrs, otherLen); return NO_ERROR; } else if (otherLen == 0) { return NO_ERROR; } SharedBuffer* buf = SharedBuffer::bufferFromData(mString) ->editResize((myLen+otherLen+1)*sizeof(char16_t)); if (buf) { char16_t* str = (char16_t*)buf->data(); memcpy(str+myLen, chrs, otherLen*sizeof(char16_t)); str[myLen+otherLen] = 0; mString = str; return NO_ERROR; } return NO_MEMORY; }
ssize_t VectorImpl::setCapacity(size_t new_capacity) { // The capacity must always be greater than or equal to the size // of this vector. if (new_capacity <= size()) { return capacity(); } size_t new_allocation_size = 0; LOG_ALWAYS_FATAL_IF(!safe_mul(&new_allocation_size, new_capacity, mItemSize)); SharedBuffer* sb = SharedBuffer::alloc(new_allocation_size); if (sb) { void* array = sb->data(); _do_copy(array, mStorage, size()); release_storage(); mStorage = const_cast<void*>(array); } else { return NO_MEMORY; } return new_capacity; }
size_t EOTStream::read(void* buffer, size_t count) { size_t bytesToRead = count; if (m_inHeader) { size_t bytesFromHeader = min(m_eotHeader.size() - m_offset, count); memcpy(buffer, m_eotHeader.data() + m_offset, bytesFromHeader); m_offset += bytesFromHeader; bytesToRead -= bytesFromHeader; if (m_offset == m_eotHeader.size()) { m_inHeader = false; m_offset = 0; } } if (bytesToRead && !m_inHeader) { size_t bytesFromData = min(m_fontData->size() - m_offset, bytesToRead); memcpy(buffer, m_fontData->data() + m_offset, bytesFromData); m_offset += bytesFromData; bytesToRead -= bytesFromData; } return count - bytesToRead; }
ENS_API_EXPORT OMX_ERRORTYPE NmfHostMpc_ProcessingComponent::freeBuffer(OMX_U32 nPortIndex, OMX_U32 nBufferIndex, OMX_BOOL bBufferAllocated, void *bufferAllocInfo, void *portPrivateInfo) { OMX_ERRORTYPE error; SharedBuffer *sharedBuf = static_cast<SharedBuffer *>(portPrivateInfo); if (bBufferAllocated) { error = doBufferDeAllocation(nPortIndex, nBufferIndex, bufferAllocInfo); if(error != OMX_ErrorNone) return error; } else if (useBufferNeedsMemcpy()){ error = doBufferDeAllocation(nPortIndex, nBufferIndex, (void*)sharedBuf->getBufferAllocInfo()); if(error != OMX_ErrorNone) return error; } delete sharedBuf; return OMX_ErrorNone; }
static char* allocFromUTF32(const char32_t* in, size_t len) { if (len == 0) { return getEmptyString(); } const ssize_t bytes = utf32_to_utf8_length(in, len); if (bytes < 0) { return getEmptyString(); } SharedBuffer* buf = SharedBuffer::alloc(bytes+1); if (!buf) { return getEmptyString(); } char* str = (char*) buf->data(); utf32_to_utf8(in, len, str); return str; }
void MHTMLArchive::generateMHTMLPart( const String& boundary, EncodingPolicy encodingPolicy, const SerializedResource& resource, SharedBuffer& outputBuffer) { StringBuilder stringBuilder; stringBuilder.append("--" + boundary + "\r\n"); stringBuilder.appendLiteral("Content-Type: "); stringBuilder.append(resource.mimeType); const char* contentEncoding = 0; if (encodingPolicy == UseBinaryEncoding) contentEncoding = binary; else if (MIMETypeRegistry::isSupportedJavaScriptMIMEType(resource.mimeType) || MIMETypeRegistry::isSupportedNonImageMIMEType(resource.mimeType)) contentEncoding = quotedPrintable; else contentEncoding = base64; stringBuilder.appendLiteral("\r\nContent-Transfer-Encoding: "); stringBuilder.append(contentEncoding); stringBuilder.appendLiteral("\r\nContent-Location: "); stringBuilder.append(resource.url); stringBuilder.appendLiteral("\r\n\r\n"); CString asciiString = stringBuilder.toString().utf8(); outputBuffer.append(asciiString.data(), asciiString.length()); if (!strcmp(contentEncoding, binary)) { const char* data; size_t position = 0; while (size_t length = resource.data->getSomeData(data, position)) { outputBuffer.append(data, length); position += length; } } else { // FIXME: ideally we would encode the content as a stream without having to fetch it all. const char* data = resource.data->data(); size_t dataLength = resource.data->size(); Vector<char> encodedData; if (!strcmp(contentEncoding, quotedPrintable)) { quotedPrintableEncode(data, dataLength, encodedData); outputBuffer.append(encodedData.data(), encodedData.size()); outputBuffer.append("\r\n", 2); } else { ASSERT(!strcmp(contentEncoding, base64)); // We are not specifying insertLFs = true below as it would cut the lines with LFs and MHTML requires CRLFs. base64Encode(data, dataLength, encodedData); const size_t maximumLineLength = 76; size_t index = 0; size_t encodedDataLength = encodedData.size(); do { size_t lineLength = std::min(encodedDataLength - index, maximumLineLength); outputBuffer.append(encodedData.data() + index, lineLength); outputBuffer.append("\r\n", 2); index += maximumLineLength; } while (index < encodedDataLength); } } }
ENS_API_EXPORT OMX_ERRORTYPE NmfMpc_ProcessingComponent::useBuffer( OMX_U32 nPortIndex, OMX_U32 nBufferIndex, OMX_BUFFERHEADERTYPE* pBufferHdr, void **portPrivateInfo) { OMX_ERRORTYPE error; void *bufferAllocInfo = 0; OMX_U8 *pBuffer; if (nPortIndex>=mENSComponent.getPortCount() || mENSComponent.getPort(nPortIndex)==0) { return OMX_ErrorBadPortIndex; } if (useBufferNeedsMemcpy()) { error = doBufferAllocation(nPortIndex, nBufferIndex, pBufferHdr->nAllocLen, &pBuffer, &bufferAllocInfo); if (error != OMX_ErrorNone) return error; } else { ENS_Port *port = mENSComponent.getPort(nPortIndex); bufferAllocInfo = port->getSharedChunk(); pBuffer = pBufferHdr->pBuffer; OMX_ERRORTYPE error = ((MMHwBuffer *)bufferAllocInfo)->AddBufferInfo(nBufferIndex, (OMX_U32)pBufferHdr->pBuffer, pBufferHdr->nAllocLen); if (error != OMX_ErrorNone) return error; } OMX_U32 bufPhysicalAddr = getBufferPhysicalAddress(bufferAllocInfo, pBuffer, pBufferHdr->nAllocLen); OMX_U32 bufMpcAddress = getBufferMpcAddress(bufferAllocInfo); SharedBuffer *sharedBuf = new SharedBuffer(mENSComponent.getNMFDomainHandle(), pBufferHdr->nAllocLen, pBuffer, bufPhysicalAddr, bufMpcAddress, bufferAllocInfo, error); if (sharedBuf == 0) return OMX_ErrorInsufficientResources; if (error != OMX_ErrorNone) return error; sharedBuf->setOMXHeader(pBufferHdr); *portPrivateInfo = sharedBuf; return OMX_ErrorNone; }
static char* allocFromUTF32(const char32_t* in, size_t len) { if (len == 0) { return getEmptyString(); } const ssize_t resultStrLen = utf32_to_utf8_length(in, len) + 1; if (resultStrLen < 1) { return getEmptyString(); } SharedBuffer* buf = SharedBuffer::alloc(resultStrLen); ALOG_ASSERT(buf, "Unable to allocate shared buffer"); if (!buf) { return getEmptyString(); } char* resultStr = (char*) buf->data(); utf32_to_utf8(in, len, resultStr, resultStrLen); return resultStr; }
void DeferredImageDecoder::setData(SharedBuffer& data, bool allDataReceived) { if (m_actualDecoder) { m_data = RefPtr<SharedBuffer>(data); m_lastDataSize = data.size(); m_allDataReceived = allDataReceived; m_actualDecoder->setData(&data, allDataReceived); prepareLazyDecodedFrames(); } if (m_frameGenerator) m_frameGenerator->setData(&data, allDataReceived); }
void TextTrackLoader::processNewCueData(CachedResource* resource) { ASSERT(m_cachedCueData == resource); if (m_state == Failed || !resource->data()) return; SharedBuffer* buffer = resource->data(); if (m_parseOffset == buffer->size()) return; if (!m_cueParser) m_cueParser = WebVTTParser::create(this, m_scriptExecutionContext); const char* data; unsigned length; while ((length = buffer->getSomeData(data, m_parseOffset))) { m_cueParser->parseBytes(data, length); m_parseOffset += length; } }
static unsigned copyFromSharedBuffer(char* buffer, unsigned bufferLength, const SharedBuffer& sharedBuffer, unsigned offset) { unsigned bytesExtracted = 0; const char* moreData; while (unsigned moreDataLength = sharedBuffer.getSomeData(moreData, offset)) { unsigned bytesToCopy = std::min(bufferLength - bytesExtracted, moreDataLength); memcpy(buffer + bytesExtracted, moreData, bytesToCopy); bytesExtracted += bytesToCopy; if (bytesExtracted == bufferLength) break; offset += bytesToCopy; } return bytesExtracted; }
SharedBuffer* SharedBuffer::editResize(size_t newSize) const { if (onlyOwner()) { SharedBuffer* buf = const_cast<SharedBuffer*>(this); if (buf->mSize == newSize) return buf; // Don't overflow if the combined size of the new buffer / header is larger than // size_max. LOG_ALWAYS_FATAL_IF((newSize >= (SIZE_MAX - sizeof(SharedBuffer))), "Invalid buffer size %zu", newSize); buf = (SharedBuffer*)realloc(buf, sizeof(SharedBuffer) + newSize); if (buf != NULL) { buf->mSize = newSize; return buf; } } SharedBuffer* sb = alloc(newSize); if (sb) { const size_t mySize = mSize; memcpy(sb->data(), data(), newSize < mySize ? newSize : mySize); release(); } return sb; }
static void writeImageToDataObject(ChromiumDataObject* dataObject, Element* element, const KURL& url) { // Shove image data into a DataObject for use as a file CachedImage* cachedImage = getCachedImage(element); if (!cachedImage || !cachedImage->imageForRenderer(element->renderer()) || !cachedImage->isLoaded()) return; SharedBuffer* imageBuffer = cachedImage->imageForRenderer(element->renderer())->data(); if (!imageBuffer || !imageBuffer->size()) return; dataObject->setFileContent(imageBuffer); // Determine the filename for the file contents of the image. String filename = cachedImage->response().suggestedFilename(); if (filename.isEmpty()) filename = url.lastPathComponent(); if (filename.isEmpty()) filename = element->getAttribute(altAttr); else { // Strip any existing extension. Assume that alt text is usually not a filename. int extensionIndex = filename.reverseFind('.'); if (extensionIndex != -1) filename.truncate(extensionIndex); } String extension = MIMETypeRegistry::getPreferredExtensionForMIMEType( cachedImage->response().mimeType()); extension = extension.isEmpty() ? emptyString() : "." + extension; ClipboardChromium::validateFilename(filename, extension); dataObject->setFileContentFilename(filename + extension); dataObject->setFileExtension(extension); }
FontCustomPlatformData::FontCustomPlatformData(FT_Face freeTypeFace, SharedBuffer& buffer) : m_freeTypeFace(freeTypeFace) , m_fontFace(cairo_ft_font_face_create_for_ft_face(freeTypeFace, 0)) { // FIXME Should we be setting some hinting options here? buffer.ref(); // This is balanced by the buffer->deref() in releaseCustomFontData. static cairo_user_data_key_t bufferKey; cairo_font_face_set_user_data(m_fontFace, &bufferKey, &buffer, static_cast<cairo_destroy_func_t>(releaseCustomFontData)); // Cairo doesn't do FreeType reference counting, so we need to ensure that when // this cairo_font_face_t is destroyed, it cleans up the FreeType face as well. static cairo_user_data_key_t freeTypeFaceKey; cairo_font_face_set_user_data(m_fontFace, &freeTypeFaceKey, freeTypeFace, reinterpret_cast<cairo_destroy_func_t>(FT_Done_Face)); }
bool decode(const SharedBuffer& data, bool sizeOnly) { m_decodingSizeOnly = sizeOnly; // We need to do the setjmp here. Otherwise bad things will happen. if (setjmp(JMPBUF(m_png))) return m_decoder->setFailed(); const char* segment; while (unsigned segmentLength = data.getSomeData(segment, m_readOffset)) { m_readOffset += segmentLength; m_currentBufferSize = m_readOffset; png_process_data(m_png, m_info, reinterpret_cast<png_bytep>(const_cast<char*>(segment)), segmentLength); if (sizeOnly ? m_decoder->isDecodedSizeAvailable() : m_decoder->frameIsCompleteAtIndex(0)) return true; } return false; }
void compressZlib(SharedBuffer<u8> data, std::ostream &os) { z_stream z; const s32 bufsize = 16384; char output_buffer[bufsize]; int status = 0; int ret; z.zalloc = Z_NULL; z.zfree = Z_NULL; z.opaque = Z_NULL; ret = deflateInit(&z, -1); if(ret != Z_OK) throw SerializationError("compressZlib: deflateInit failed"); // Point zlib to our input buffer z.next_in = (Bytef*)&data[0]; z.avail_in = data.getSize(); // And get all output for(;;) { z.next_out = (Bytef*)output_buffer; z.avail_out = bufsize; status = deflate(&z, Z_FINISH); if(status == Z_NEED_DICT || status == Z_DATA_ERROR || status == Z_MEM_ERROR) { zerr(status); throw SerializationError("compressZlib: deflate failed"); } int count = bufsize - z.avail_out; if(count) os.write(output_buffer, count); // This determines zlib has given all output if(status == Z_STREAM_END) break; } deflateEnd(&z); }
void CachedRawResource::addDataBuffer(SharedBuffer& data) { CachedResourceHandle<CachedRawResource> protect(this); ASSERT(dataBufferingPolicy() == BufferData); m_data = &data; unsigned incrementalDataLength; const char* incrementalData = calculateIncrementalDataChunk(&data, incrementalDataLength); setEncodedSize(data.size()); notifyClientsDataWasReceived(incrementalData, incrementalDataLength); if (dataBufferingPolicy() == DoNotBufferData) { if (m_loader) m_loader->setDataBufferingPolicy(DoNotBufferData); clear(); return; } CachedResource::addDataBuffer(data); }
bool InspectorPageAgent::cachedResourceContent(Resource* cachedResource, String* result, bool* base64Encoded) { bool hasZeroSize; bool prepared = prepareResourceBuffer(cachedResource, &hasZeroSize); if (!prepared) return false; *base64Encoded = !hasTextContent(cachedResource); if (*base64Encoded) { RefPtr<SharedBuffer> buffer = hasZeroSize ? SharedBuffer::create() : cachedResource->resourceBuffer(); if (!buffer) return false; *result = base64Encode(buffer->data(), buffer->size()); return true; } if (hasZeroSize) { *result = ""; return true; } if (cachedResource) { switch (cachedResource->type()) { case Resource::CSSStyleSheet: *result = toCSSStyleSheetResource(cachedResource)->sheetText(false); return true; case Resource::Script: *result = toScriptResource(cachedResource)->script(); return true; case Resource::MainResource: return false; case Resource::Raw: { SharedBuffer* buffer = cachedResource->resourceBuffer(); if (!buffer) return false; OwnPtr<TextResourceDecoder> decoder = createXHRTextDecoder(cachedResource->response().mimeType(), cachedResource->response().textEncodingName()); String content = decoder->decode(buffer->data(), buffer->size()); *result = content + decoder->flush(); return true; } default: SharedBuffer* buffer = cachedResource->resourceBuffer(); return decodeBuffer(buffer ? buffer->data() : 0, buffer ? buffer->size() : 0, cachedResource->response().textEncodingName(), result); } } return false; }
bool decode(const SharedBuffer& data, bool sizeOnly) { m_decodingSizeOnly = sizeOnly; PNGImageDecoder* decoder = static_cast<PNGImageDecoder*>(png_get_progressive_ptr(m_png)); // We need to do the setjmp here. Otherwise bad things will happen. if (setjmp(JMPBUF(m_png))) return decoder->setFailed(); const char* segment; while (unsigned segmentLength = data.getSomeData(segment, m_readOffset)) { m_readOffset += segmentLength; m_currentBufferSize = m_readOffset; png_process_data(m_png, m_info, reinterpret_cast<png_bytep>(const_cast<char*>(segment)), segmentLength); // We explicitly specify the superclass isSizeAvailable() because we // merely want to check if we've managed to set the size, not // (recursively) trigger additional decoding if we haven't. if (sizeOnly ? decoder->ImageDecoder::isSizeAvailable() : decoder->isComplete()) return true; } return false; }
std::unique_ptr<FontCustomPlatformData> createFontCustomPlatformData(SharedBuffer& buffer) { RetainPtr<CFDataRef> bufferData = buffer.createCFData(); #if CORETEXT_WEB_FONTS RetainPtr<CTFontDescriptorRef> fontDescriptor = adoptCF(CTFontManagerCreateFontDescriptorFromData(bufferData.get())); if (!fontDescriptor) return nullptr; return std::make_unique<FontCustomPlatformData>(fontDescriptor.get()); #else RetainPtr<CGDataProviderRef> dataProvider = adoptCF(CGDataProviderCreateWithCFData(bufferData.get())); RetainPtr<CGFontRef> cgFontRef = adoptCF(CGFontCreateWithDataProvider(dataProvider.get())); if (!cgFontRef) return nullptr; return std::make_unique<FontCustomPlatformData>(cgFontRef.get()); #endif }
void MHTMLArchive::generateMHTMLHeader( const String& boundary, const String& title, const String& mimeType, SharedBuffer& outputBuffer) { ASSERT(!boundary.isEmpty()); ASSERT(!mimeType.isEmpty()); DateComponents now; now.setMillisecondsSinceEpochForDateTime(currentTimeMS()); // TODO(lukasza): Passing individual date/time components seems fragile. String dateString = makeRFC2822DateString( now.weekDay(), now.monthDay(), now.month(), now.fullYear(), now.hour(), now.minute(), now.second(), 0); StringBuilder stringBuilder; stringBuilder.appendLiteral("From: <Saved by Blink>\r\n"); stringBuilder.appendLiteral("Subject: "); // We replace non ASCII characters with '?' characters to match IE's behavior. stringBuilder.append(replaceNonPrintableCharacters(title)); stringBuilder.appendLiteral("\r\nDate: "); stringBuilder.append(dateString); stringBuilder.appendLiteral("\r\nMIME-Version: 1.0\r\n"); stringBuilder.appendLiteral("Content-Type: multipart/related;\r\n"); stringBuilder.appendLiteral("\ttype=\""); stringBuilder.append(mimeType); stringBuilder.appendLiteral("\";\r\n"); stringBuilder.appendLiteral("\tboundary=\""); stringBuilder.append(boundary); stringBuilder.appendLiteral("\"\r\n\r\n"); // We use utf8() below instead of ascii() as ascii() replaces CRLFs with ?? // (we still only have put ASCII characters in it). ASSERT(stringBuilder.toString().containsOnlyASCII()); CString asciiString = stringBuilder.toString().utf8(); outputBuffer.append(asciiString.data(), asciiString.length()); }
void sharedBufferRelease(void* info) { SharedBuffer* sharedBuffer = static_cast<SharedBuffer*>(info); sharedBuffer->deref(); }
bool decode(const SharedBuffer& data, bool onlySize) { unsigned newByteCount = data.size() - m_bufferLength; unsigned readOffset = m_bufferLength - m_info.src->bytes_in_buffer; m_info.src->bytes_in_buffer += newByteCount; m_info.src->next_input_byte = (JOCTET*)(data.data()) + readOffset; // If we still have bytes to skip, try to skip those now. if (m_bytesToSkip) skipBytes(m_bytesToSkip); m_bufferLength = data.size(); // We need to do the setjmp here. Otherwise bad things will happen if (setjmp(m_err.setjmp_buffer)) return m_decoder->setFailed(); switch (m_state) { case JPEG_HEADER: // Read file parameters with jpeg_read_header(). if (jpeg_read_header(&m_info, true) == JPEG_SUSPENDED) return false; // I/O suspension. switch (m_info.jpeg_color_space) { case JCS_GRAYSCALE: case JCS_RGB: case JCS_YCbCr: // libjpeg can convert GRAYSCALE and YCbCr image pixels to RGB. m_info.out_color_space = rgbOutputColorSpace(); #if defined(TURBO_JPEG_RGB_SWIZZLE) if (m_info.saw_JFIF_marker) break; // FIXME: Swizzle decoding does not support Adobe transform=0 // images (yet), so revert to using JSC_RGB in that case. if (m_info.saw_Adobe_marker && !m_info.Adobe_transform) m_info.out_color_space = JCS_RGB; #endif break; case JCS_CMYK: case JCS_YCCK: // libjpeg can convert YCCK to CMYK, but neither to RGB, so we // manually convert CMKY to RGB. m_info.out_color_space = JCS_CMYK; break; default: return m_decoder->setFailed(); } m_state = JPEG_START_DECOMPRESS; // We can fill in the size now that the header is available. if (!m_decoder->setSize(m_info.image_width, m_info.image_height)) return false; // Calculate and set decoded size. m_info.scale_num = m_decoder->desiredScaleNumerator(); m_info.scale_denom = scaleDenominator; jpeg_calc_output_dimensions(&m_info); m_decoder->setDecodedSize(m_info.output_width, m_info.output_height); m_decoder->setOrientation(readImageOrientation(info())); #if USE(QCMSLIB) // Allow color management of the decoded RGBA pixels if possible. if (!m_decoder->ignoresGammaAndColorProfile()) { ColorProfile colorProfile; readColorProfile(info(), colorProfile); createColorTransform(colorProfile, colorSpaceHasAlpha(m_info.out_color_space)); #if defined(TURBO_JPEG_RGB_SWIZZLE) // Input RGBA data to qcms. Note: restored to BGRA on output. if (m_transform && m_info.out_color_space == JCS_EXT_BGRA) m_info.out_color_space = JCS_EXT_RGBA; #endif } #endif // Don't allocate a giant and superfluous memory buffer when the // image is a sequential JPEG. m_info.buffered_image = jpeg_has_multiple_scans(&m_info); if (onlySize) { // We can stop here. Reduce our buffer length and available data. m_bufferLength -= m_info.src->bytes_in_buffer; m_info.src->bytes_in_buffer = 0; return true; } // FALL THROUGH case JPEG_START_DECOMPRESS: // Set parameters for decompression. // FIXME -- Should reset dct_method and dither mode for final pass // of progressive JPEG. m_info.dct_method = dctMethod(); m_info.dither_mode = ditherMode(); m_info.do_fancy_upsampling = doFancyUpsampling(); m_info.enable_2pass_quant = false; m_info.do_block_smoothing = true; // Make a one-row-high sample array that will go away when done with // image. Always make it big enough to hold an RGB row. Since this // uses the IJG memory manager, it must be allocated before the call // to jpeg_start_compress(). // FIXME: note that some output color spaces do not need the samples // buffer. Remove this allocation for those color spaces. m_samples = (*m_info.mem->alloc_sarray)(reinterpret_cast<j_common_ptr>(&m_info), JPOOL_IMAGE, m_info.output_width * 4, 1); // Start decompressor. if (!jpeg_start_decompress(&m_info)) return false; // I/O suspension. // If this is a progressive JPEG ... m_state = (m_info.buffered_image) ? JPEG_DECOMPRESS_PROGRESSIVE : JPEG_DECOMPRESS_SEQUENTIAL; // FALL THROUGH case JPEG_DECOMPRESS_SEQUENTIAL: if (m_state == JPEG_DECOMPRESS_SEQUENTIAL) { if (!m_decoder->outputScanlines()) return false; // I/O suspension. // If we've completed image output... ASSERT(m_info.output_scanline == m_info.output_height); m_state = JPEG_DONE; } // FALL THROUGH case JPEG_DECOMPRESS_PROGRESSIVE: if (m_state == JPEG_DECOMPRESS_PROGRESSIVE) { int status; do { status = jpeg_consume_input(&m_info); } while ((status != JPEG_SUSPENDED) && (status != JPEG_REACHED_EOI)); for (;;) { if (!m_info.output_scanline) { int scan = m_info.input_scan_number; // If we haven't displayed anything yet // (output_scan_number == 0) and we have enough data for // a complete scan, force output of the last full scan. if (!m_info.output_scan_number && (scan > 1) && (status != JPEG_REACHED_EOI)) --scan; if (!jpeg_start_output(&m_info, scan)) return false; // I/O suspension. } if (m_info.output_scanline == 0xffffff) m_info.output_scanline = 0; // If outputScanlines() fails, it deletes |this|. Therefore, // copy the decoder pointer and use it to check for failure // to avoid member access in the failure case. JPEGImageDecoder* decoder = m_decoder; if (!decoder->outputScanlines()) { if (decoder->failed()) // Careful; |this| is deleted. return false; if (!m_info.output_scanline) // Didn't manage to read any lines - flag so we // don't call jpeg_start_output() multiple times for // the same scan. m_info.output_scanline = 0xffffff; return false; // I/O suspension. } if (m_info.output_scanline == m_info.output_height) { if (!jpeg_finish_output(&m_info)) return false; // I/O suspension. if (jpeg_input_complete(&m_info) && (m_info.input_scan_number == m_info.output_scan_number)) break; m_info.output_scanline = 0; } } m_state = JPEG_DONE; } // FALL THROUGH case JPEG_DONE: // Finish decompression. return jpeg_finish_decompress(&m_info); case JPEG_ERROR: // We can get here if the constructor failed. return m_decoder->setFailed(); } return true; }
// ============================================================================ // main // ============================================================================ int main(int argc, char* argv[]) { yat::Message * m = 0; YAT_LOG_STATIC("Instanciating Task..."); Consumer * dt = new Consumer(kLO_WATER_MARK, kHI_WATER_MARK); YAT_LOG_STATIC("Starting Task..."); try { dt->go(2000); } catch (const yat::Exception&) { YAT_LOG_STATIC("yat exception caught - could not start task. aborting..."); dt->exit(); return 0; } catch (...) { YAT_LOG_STATIC("unknown exception caught - could not start task. aborting..."); dt->exit(); return 0; } /* for (size_t i = 0; i < kNUM_MSGS; i++) { try { //- post msg to consumer dt->post(new yat::Message(kDUMMY_MSG), kPOST_MSG_TMO); //- simulate some time consuming activity yat::ThreadingUtilities::sleep(0, 100000); } catch (const std::bad_alloc&) { YAT_LOG_STATIC("std::bad_alloc except. caught - could not post msg#" << i); } catch (const yat::Exception&) { YAT_LOG_STATIC("tango except. caught - could not post msg#" << i); } catch (...) { YAT_LOG_STATIC("unknown except. caught - could not post msg#" << i); } } */ yat::Buffer<double> data(kNUM_MSGS); for ( size_t i = 0; i < kNUM_MSGS; i++ ) data[i] = 1. * i; data.force_length(kNUM_MSGS); for (size_t i = 0; i < kNUM_MSGS; i++) { try { SharedBuffer* sb = new SharedBuffer(); sb->capacity(i + 1); sb->memcpy(data.base(), i + 1); std::cout << "SharedBuffer* sb.length = " << sb->length() << std::endl; dt->post(kDATA_MSG, sb->duplicate(), false); sb->release(); //- simulate some time consuming activity yat::ThreadingUtilities::sleep(0, 100000); } catch (const std::bad_alloc&) { YAT_LOG_STATIC("std::bad_alloc except. caught - could not post msg#" << i); } catch (const yat::Exception&) { YAT_LOG_STATIC("tango except. caught - could not post msg#" << i); } catch (...) { YAT_LOG_STATIC("unknown except. caught - could not post msg#" << i); } } try { dt->exit(); } catch (const yat::Exception&) { YAT_LOG_STATIC("tango except. caught - could stop task. aborting..."); } catch (...) { YAT_LOG_STATIC("unknown except. caught - could stop task. aborting..."); return 0; } return 0; }
bool decode(const SharedBuffer& data, bool onlySize) { m_decodingSizeOnly = onlySize; unsigned newByteCount = data.size() - m_bufferLength; unsigned readOffset = m_bufferLength - m_info.src->bytes_in_buffer; m_info.src->bytes_in_buffer += newByteCount; m_info.src->next_input_byte = (JOCTET*)(data.data()) + readOffset; // If we still have bytes to skip, try to skip those now. if (m_bytesToSkip) skipBytes(m_bytesToSkip); m_bufferLength = data.size(); // We need to do the setjmp here. Otherwise bad things will happen if (setjmp(m_err.setjmp_buffer)) return m_decoder->setFailed(); switch (m_state) { case JPEG_HEADER: // Read file parameters with jpeg_read_header(). if (jpeg_read_header(&m_info, true) == JPEG_SUSPENDED) return false; // I/O suspension. switch (m_info.jpeg_color_space) { case JCS_GRAYSCALE: case JCS_RGB: case JCS_YCbCr: // libjpeg can convert GRAYSCALE and YCbCr image pixels to RGB. m_info.out_color_space = rgbOutputColorSpace(); break; case JCS_CMYK: case JCS_YCCK: // libjpeg can convert YCCK to CMYK, but neither to RGB, so we // manually convert CMKY to RGB. m_info.out_color_space = JCS_CMYK; break; default: return m_decoder->setFailed(); } // Don't allocate a giant and superfluous memory buffer when the // image is a sequential JPEG. m_info.buffered_image = jpeg_has_multiple_scans(&m_info); // Used to set up image size so arrays can be allocated. jpeg_calc_output_dimensions(&m_info); // Make a one-row-high sample array that will go away when done with // image. Always make it big enough to hold an RGB row. Since this // uses the IJG memory manager, it must be allocated before the call // to jpeg_start_compress(). m_samples = (*m_info.mem->alloc_sarray)((j_common_ptr) &m_info, JPOOL_IMAGE, m_info.output_width * 4, 1); m_state = JPEG_START_DECOMPRESS; // We can fill in the size now that the header is available. if (!m_decoder->setSize(m_info.image_width, m_info.image_height)) return false; // Allow color management of the decoded RGBA pixels if possible. if (!m_decoder->ignoresGammaAndColorProfile()) { ColorProfile rgbInputDeviceColorProfile = readColorProfile(info()); if (!rgbInputDeviceColorProfile.isEmpty()) m_decoder->setColorProfile(rgbInputDeviceColorProfile); } if (m_decodingSizeOnly) { // We can stop here. Reduce our buffer length and available // data. m_bufferLength -= m_info.src->bytes_in_buffer; m_info.src->bytes_in_buffer = 0; return true; } // FALL THROUGH case JPEG_START_DECOMPRESS: // Set parameters for decompression. // FIXME -- Should reset dct_method and dither mode for final pass // of progressive JPEG. m_info.dct_method = JDCT_ISLOW; m_info.dither_mode = JDITHER_FS; m_info.do_fancy_upsampling = true; m_info.enable_2pass_quant = false; m_info.do_block_smoothing = true; // Start decompressor. if (!jpeg_start_decompress(&m_info)) return false; // I/O suspension. // If this is a progressive JPEG ... m_state = (m_info.buffered_image) ? JPEG_DECOMPRESS_PROGRESSIVE : JPEG_DECOMPRESS_SEQUENTIAL; // FALL THROUGH case JPEG_DECOMPRESS_SEQUENTIAL: if (m_state == JPEG_DECOMPRESS_SEQUENTIAL) { if (!m_decoder->outputScanlines()) return false; // I/O suspension. // If we've completed image output... ASSERT(m_info.output_scanline == m_info.output_height); m_state = JPEG_DONE; } // FALL THROUGH case JPEG_DECOMPRESS_PROGRESSIVE: if (m_state == JPEG_DECOMPRESS_PROGRESSIVE) { int status; do { status = jpeg_consume_input(&m_info); } while ((status != JPEG_SUSPENDED) && (status != JPEG_REACHED_EOI)); for (;;) { if (!m_info.output_scanline) { int scan = m_info.input_scan_number; // If we haven't displayed anything yet // (output_scan_number == 0) and we have enough data for // a complete scan, force output of the last full scan. if (!m_info.output_scan_number && (scan > 1) && (status != JPEG_REACHED_EOI)) --scan; if (!jpeg_start_output(&m_info, scan)) return false; // I/O suspension. } if (m_info.output_scanline == 0xffffff) m_info.output_scanline = 0; if (!m_decoder->outputScanlines()) { if (!m_info.output_scanline) // Didn't manage to read any lines - flag so we // don't call jpeg_start_output() multiple times for // the same scan. m_info.output_scanline = 0xffffff; return false; // I/O suspension. } if (m_info.output_scanline == m_info.output_height) { if (!jpeg_finish_output(&m_info)) return false; // I/O suspension. if (jpeg_input_complete(&m_info) && (m_info.input_scan_number == m_info.output_scan_number)) break; m_info.output_scanline = 0; } } m_state = JPEG_DONE; } // FALL THROUGH case JPEG_DONE: // Finish decompression. return jpeg_finish_decompress(&m_info); case JPEG_ERROR: // We can get here if the constructor failed. return m_decoder->setFailed(); } return true; }
RetainPtr<CFDictionaryRef> LegacyWebArchive::createPropertyListRepresentation(ArchiveResource* resource, MainResourceStatus isMainResource) { if (!resource) { // The property list representation of a null/empty WebResource has the following 3 objects stored as nil. // FIXME: 0 is not serializable. Presumably we need to use kCFNull here instead for compatibility. // FIXME: But why do we need to support a resource of 0? Who relies on that? RetainPtr<CFMutableDictionaryRef> propertyList(AdoptCF, CFDictionaryCreateMutable(0, 3, 0, 0)); CFDictionarySetValue(propertyList.get(), LegacyWebArchiveResourceDataKey, 0); CFDictionarySetValue(propertyList.get(), LegacyWebArchiveResourceURLKey, 0); CFDictionarySetValue(propertyList.get(), LegacyWebArchiveResourceMIMETypeKey, 0); return propertyList; } RetainPtr<CFMutableDictionaryRef> propertyList(AdoptCF, CFDictionaryCreateMutable(0, 6, 0, &kCFTypeDictionaryValueCallBacks)); // Resource data can be empty, but must be represented by an empty CFDataRef SharedBuffer* data = resource->data(); RetainPtr<CFDataRef> cfData; if (data) cfData.adoptCF(data->createCFData()); else cfData.adoptCF(CFDataCreate(0, 0, 0)); CFDictionarySetValue(propertyList.get(), LegacyWebArchiveResourceDataKey, cfData.get()); // Resource URL cannot be null RetainPtr<CFStringRef> cfURL(AdoptCF, resource->url().string().createCFString()); if (cfURL) CFDictionarySetValue(propertyList.get(), LegacyWebArchiveResourceURLKey, cfURL.get()); else { LOG(Archives, "LegacyWebArchive - NULL resource URL is invalid - returning null property list"); return 0; } // FrameName should be left out if empty for subresources, but always included for main resources const String& frameName(resource->frameName()); if (!frameName.isEmpty() || isMainResource) { RetainPtr<CFStringRef> cfFrameName(AdoptCF, frameName.createCFString()); CFDictionarySetValue(propertyList.get(), LegacyWebArchiveResourceFrameNameKey, cfFrameName.get()); } // Set MIMEType, TextEncodingName, and ResourceResponse only if they actually exist const String& mimeType(resource->mimeType()); if (!mimeType.isEmpty()) { RetainPtr<CFStringRef> cfMIMEType(AdoptCF, mimeType.createCFString()); CFDictionarySetValue(propertyList.get(), LegacyWebArchiveResourceMIMETypeKey, cfMIMEType.get()); } const String& textEncoding(resource->textEncoding()); if (!textEncoding.isEmpty()) { RetainPtr<CFStringRef> cfTextEncoding(AdoptCF, textEncoding.createCFString()); CFDictionarySetValue(propertyList.get(), LegacyWebArchiveResourceTextEncodingNameKey, cfTextEncoding.get()); } // Don't include the resource response for the main resource if (!isMainResource) { RetainPtr<CFDataRef> resourceResponseData = createPropertyListRepresentation(resource->response()); if (resourceResponseData) CFDictionarySetValue(propertyList.get(), LegacyWebArchiveResourceResponseKey, resourceResponseData.get()); } return propertyList; }