ImageData* ImageData::create(unsigned width, unsigned height, ExceptionState& exceptionState) { if (!width || !height) { exceptionState.throwDOMException(IndexSizeError, String::format("The source %s is zero or not a number.", width ? "height" : "width")); return nullptr; } Checked<unsigned, RecordOverflow> dataSize = 4; dataSize *= width; dataSize *= height; if (dataSize.hasOverflowed() || static_cast<int>(width) < 0 || static_cast<int>(height) < 0) { exceptionState.throwDOMException(IndexSizeError, "The requested image size exceeds the supported range."); return nullptr; } RefPtr<DOMUint8ClampedArray> byteArray = DOMUint8ClampedArray::createOrNull(dataSize.unsafeGet()); if (!byteArray) { exceptionState.throwDOMException(V8GeneralError, "Out of memory at ImageData creation"); return nullptr; } return new ImageData(IntSize(width, height), byteArray.release()); }
JSValue JSStringJoiner::join(ExecState* exec) { if (!m_isValid) return throwOutOfMemoryError(exec); if (!m_strings.size()) return jsEmptyString(exec); Checked<size_t, RecordOverflow> separatorLength = m_separator.length(); // FIXME: add special cases of joinStrings() for (separatorLength == 0) and (separatorLength == 1). ASSERT(m_strings.size() > 0); Checked<size_t, RecordOverflow> totalSeparactorsLength = separatorLength * (m_strings.size() - 1); Checked<size_t, RecordOverflow> outputStringSize = totalSeparactorsLength + m_accumulatedStringsLength; size_t finalSize; if (outputStringSize.safeGet(finalSize) == CheckedState::DidOverflow) return throwOutOfMemoryError(exec); if (!outputStringSize) return jsEmptyString(exec); RefPtr<StringImpl> outputStringImpl; if (m_is8Bits) outputStringImpl = joinStrings<LChar>(m_strings, m_separator, finalSize); else outputStringImpl = joinStrings<UChar>(m_strings, m_separator, finalSize); if (!outputStringImpl) return throwOutOfMemoryError(exec); return JSString::create(exec->vm(), outputStringImpl.release()); }
String Text::wholeText() const { const Text* startText = earliestLogicallyAdjacentTextNode(this); const Text* endText = latestLogicallyAdjacentTextNode(this); Node* onePastEndText = endText->nextSibling(); Checked<unsigned> resultLength = 0; for (const Node* n = startText; n != onePastEndText; n = n->nextSibling()) { if (!n->isTextNode()) continue; const Text* t = static_cast<const Text*>(n); const String& data = t->data(); resultLength += data.length(); } StringBuilder result; result.reserveCapacity(resultLength.unsafeGet()); for (const Node* n = startText; n != onePastEndText; n = n->nextSibling()) { if (!n->isTextNode()) continue; const Text* t = static_cast<const Text*>(n); result.append(t->data()); } ASSERT(result.length() == resultLength.unsafeGet()); return result.toString(); }
TryMallocReturnValue tryFastCalloc(size_t numElements, size_t elementSize) { Checked<size_t, RecordOverflow> checkedSize = elementSize; checkedSize *= numElements; if (checkedSize.hasOverflowed()) return nullptr; return tryFastZeroedMalloc(checkedSize.unsafeGet()); }
void* fastCalloc(size_t numElements, size_t elementSize) { Checked<size_t> checkedSize = elementSize; checkedSize *= numElements; void* result = fastZeroedMalloc(checkedSize.unsafeGet()); if (!result) CRASH(); return result; }
PassRefPtr<ImageData> ImageData::create(const IntSize& size) { Checked<int, RecordOverflow> dataSize = 4; dataSize *= size.width(); dataSize *= size.height(); if (dataSize.hasOverflowed()) return 0; return adoptRef(new ImageData(size)); }
void* fastCalloc(size_t numElements, size_t elementSize) { ASSERT_IS_WITHIN_LIMIT(numElements * elementSize); Checked<size_t> checkedSize = elementSize; checkedSize *= numElements; void* result = fastZeroedMalloc(checkedSize.unsafeGet()); if (!result) CRASH(); return result; }
PassRefPtr<DataView> DataView::create(PassRefPtr<ArrayBuffer> buffer, unsigned byteOffset, unsigned byteLength) { if (byteOffset > buffer->byteLength()) return 0; Checked<uint32_t, RecordOverflow> checkedOffset(byteOffset); Checked<uint32_t, RecordOverflow> checkedLength(byteLength); Checked<uint32_t, RecordOverflow> checkedMax = checkedOffset + checkedLength; if (checkedMax.hasOverflowed() || checkedMax.unsafeGet() > buffer->byteLength()) return 0; return adoptRef(new DataView(buffer, byteOffset, byteLength)); }
PassRefPtr<ImageData> ImageData::create(const IntSize& size, PassRefPtr<Uint8ClampedArray> byteArray) { Checked<int, RecordOverflow> dataSize = 4; dataSize *= size.width(); dataSize *= size.height(); if (dataSize.hasOverflowed()) return 0; if (dataSize.unsafeGet() < 0 || static_cast<unsigned>(dataSize.unsafeGet()) > byteArray->length()) return 0; return adoptRef(new ImageData(size, byteArray)); }
ImageData* ImageData::create(const IntSize& size) { Checked<int, RecordOverflow> dataSize = 4; dataSize *= size.width(); dataSize *= size.height(); if (dataSize.hasOverflowed() || dataSize.unsafeGet() < 0) return nullptr; RefPtr<DOMUint8ClampedArray> byteArray = DOMUint8ClampedArray::createOrNull(dataSize.unsafeGet()); if (!byteArray) return nullptr; return new ImageData(size, byteArray.release()); }
static void* TryMmap(size_t size, size_t *actual_size, size_t alignment) { // Enforce page alignment if (pagesize == 0) pagesize = getpagesize(); if (alignment < pagesize) alignment = pagesize; size = ((size + alignment - 1) / alignment) * alignment; // could theoretically return the "extra" bytes here, but this // is simple and correct. if (actual_size) *actual_size = size; // Ask for extra memory if alignment > pagesize size_t extra = 0; if (alignment > pagesize) { extra = alignment - pagesize; } Checked<size_t> mapSize = Checked<size_t>(size) + extra + 2 * pagesize; void* result = mmap(NULL, mapSize.unsafeGet(), PROT_READ | PROT_WRITE, MAP_PRIVATE|MAP_ANONYMOUS, VM_TAG_FOR_TCMALLOC_MEMORY, 0); if (result == reinterpret_cast<void*>(MAP_FAILED)) { mmap_failure = true; return NULL; } mmap(result, pagesize, PROT_NONE, MAP_FIXED | MAP_PRIVATE | MAP_ANON, VM_TAG_FOR_TCMALLOC_MEMORY, 0); mmap(static_cast<char*>(result) + (mapSize - pagesize).unsafeGet(), pagesize, PROT_NONE, MAP_FIXED | MAP_PRIVATE | MAP_ANON, VM_TAG_FOR_TCMALLOC_MEMORY, 0); result = static_cast<char*>(result) + pagesize; // Adjust the return memory so it is aligned uintptr_t ptr = reinterpret_cast<uintptr_t>(result); size_t adjust = 0; if ((ptr & (alignment - 1)) != 0) { adjust = alignment - (ptr & (alignment - 1)); } // Return the unused memory to the system if (adjust > 0) { munmap(reinterpret_cast<void*>(ptr), adjust); } if (adjust < extra) { munmap(reinterpret_cast<void*>(ptr + adjust + size), extra - adjust); } ptr += adjust; return reinterpret_cast<void*>(ptr); }
static bool validateOffsetCount(unsigned offset, unsigned count, unsigned length, unsigned& realCount, ExceptionState& exceptionState) { if (offset > length) { exceptionState.ThrowDOMException(IndexSizeError, "The offset " + String::number(offset) + " is greater than the node's length (" + String::number(length) + ")."); return false; } Checked<unsigned, RecordOverflow> offsetCount = offset; offsetCount += count; if (offsetCount.hasOverflowed() || offset + count > length) realCount = length - offset; else realCount = count; return true; }
PassRefPtr<ImageData> ImageData::create(unsigned sw, unsigned sh, ExceptionCode& ec) { if (!sw || !sh) { ec = INDEX_SIZE_ERR; return nullptr; } Checked<int, RecordOverflow> dataSize = 4; dataSize *= sw; dataSize *= sh; if (dataSize.hasOverflowed()) { ec = TypeError; return nullptr; } IntSize size(sw, sh); RefPtr<ImageData> data = adoptRef(new ImageData(size)); data->data()->zeroFill(); return data.release(); }
PassRefPtrWillBeRawPtr<ImageData> ImageData::create(unsigned width, unsigned height, ExceptionState& exceptionState) { if (!RuntimeEnabledFeatures::imageDataConstructorEnabled()) { exceptionState.throwTypeError("Illegal constructor"); return nullptr; } if (!width || !height) { exceptionState.throwDOMException(IndexSizeError, String::format("The source %s is zero or not a number.", width ? "height" : "width")); return nullptr; } Checked<unsigned, RecordOverflow> dataSize = 4; dataSize *= width; dataSize *= height; if (dataSize.hasOverflowed()) { exceptionState.throwDOMException(IndexSizeError, "The requested image size exceeds the supported range."); return nullptr; } return adoptRefWillBeNoop(new ImageData(IntSize(width, height))); }
bool ImageBuffer::getImageData(Multiply multiplied, const IntRect& rect, WTF::ArrayBufferContents& contents) const { Checked<int, RecordOverflow> dataSize = 4; dataSize *= rect.width(); dataSize *= rect.height(); if (dataSize.hasOverflowed()) return false; if (!isSurfaceValid()) { WTF::ArrayBufferContents result(rect.width() * rect.height(), 4, WTF::ArrayBufferContents::NotShared, WTF::ArrayBufferContents::ZeroInitialize); result.transfer(contents); return true; } ASSERT(canvas()); RefPtr<SkImage> snapshot = m_surface->newImageSnapshot(PreferNoAcceleration); if (!snapshot) return false; const bool mayHaveStrayArea = m_surface->isAccelerated() // GPU readback may fail silently || rect.x() < 0 || rect.y() < 0 || rect.maxX() > m_surface->size().width() || rect.maxY() > m_surface->size().height(); WTF::ArrayBufferContents result( rect.width() * rect.height(), 4, WTF::ArrayBufferContents::NotShared, mayHaveStrayArea ? WTF::ArrayBufferContents::ZeroInitialize : WTF::ArrayBufferContents::DontInitialize); SkAlphaType alphaType = (multiplied == Premultiplied) ? kPremul_SkAlphaType : kUnpremul_SkAlphaType; SkImageInfo info = SkImageInfo::Make(rect.width(), rect.height(), kRGBA_8888_SkColorType, alphaType); snapshot->readPixels(info, result.data(), 4 * rect.width(), rect.x(), rect.y()); result.transfer(contents); return true; }
PassRefPtr<Uint8ClampedArray> DrawingBuffer::paintRenderingResultsToImageData(int& width, int& height) { if (m_attributes.premultipliedAlpha) return nullptr; width = size().width(); height = size().height(); Checked<int, RecordOverflow> dataSize = 4; dataSize *= width; dataSize *= height; if (dataSize.hasOverflowed()) return nullptr; RefPtr<Uint8ClampedArray> pixels = Uint8ClampedArray::createUninitialized(width * height * 4); m_context->bindFramebuffer(GL_FRAMEBUFFER, framebuffer()); readBackFramebuffer(pixels->data(), width, height, ReadbackRGBA, WebGLImageConversion::AlphaDoNothing); flipVertically(pixels->data(), width, height); return pixels.release(); }
void HTMLCanvasElement::updateExternallyAllocatedMemory() const { int bufferCount = 0; if (m_imageBuffer) bufferCount++; if (is3D()) bufferCount += 2; if (m_copiedImage) bufferCount++; if (m_presentedImage) bufferCount++; Checked<intptr_t, RecordOverflow> checkedExternallyAllocatedMemory = 4 * bufferCount; checkedExternallyAllocatedMemory *= width(); checkedExternallyAllocatedMemory *= height(); intptr_t externallyAllocatedMemory; if (checkedExternallyAllocatedMemory.safeGet(externallyAllocatedMemory) == CheckedState::DidOverflow) externallyAllocatedMemory = std::numeric_limits<intptr_t>::max(); // Subtracting two intptr_t that are known to be positive will never underflow. v8::Isolate::GetCurrent()->AdjustAmountOfExternalAllocatedMemory(externallyAllocatedMemory - m_externallyAllocatedMemory); m_externallyAllocatedMemory = externallyAllocatedMemory; }
GC3Denum GraphicsContext3D::computeImageSizeInBytes(GC3Denum format, GC3Denum type, GC3Dsizei width, GC3Dsizei height, GC3Dint alignment, unsigned int* imageSizeInBytes, unsigned int* paddingInBytes) { ASSERT(imageSizeInBytes); ASSERT(alignment == 1 || alignment == 2 || alignment == 4 || alignment == 8); if (width < 0 || height < 0) return GraphicsContext3D::INVALID_VALUE; unsigned int bytesPerComponent, componentsPerPixel; if (!computeFormatAndTypeParameters(format, type, &bytesPerComponent, &componentsPerPixel)) return GraphicsContext3D::INVALID_ENUM; if (!width || !height) { *imageSizeInBytes = 0; if (paddingInBytes) *paddingInBytes = 0; return GraphicsContext3D::NO_ERROR; } Checked<uint32_t, RecordOverflow> checkedValue = bytesPerComponent * componentsPerPixel; checkedValue *= width; if (checkedValue.hasOverflowed()) return GraphicsContext3D::INVALID_VALUE; unsigned int validRowSize = checkedValue.unsafeGet(); unsigned int padding = 0; unsigned int residual = validRowSize % alignment; if (residual) { padding = alignment - residual; checkedValue += padding; } // Last row needs no padding. checkedValue *= (height - 1); checkedValue += validRowSize; if (checkedValue.hasOverflowed()) return GraphicsContext3D::INVALID_VALUE; *imageSizeInBytes = checkedValue.unsafeGet(); if (paddingInBytes) *paddingInBytes = padding; return GraphicsContext3D::NO_ERROR; }
ImageBuffer::ImageBuffer(const IntSize& size, ColorSpace imageColorSpace, RenderingMode renderingMode, bool& success) : m_data(size) , m_size(size) , m_accelerateRendering(renderingMode == Accelerated) { success = false; // Make early return mean failure. if (size.width() <= 0 || size.height() <= 0) return; Checked<int, RecordOverflow> width = size.width(); Checked<int, RecordOverflow> height = size.height(); // Prevent integer overflows m_data.m_bytesPerRow = 4 * width; Checked<size_t, RecordOverflow> dataSize = height * m_data.m_bytesPerRow; if (dataSize.hasOverflowed()) return; #if USE(IOSURFACE_CANVAS_BACKING_STORE) if (width.unsafeGet() >= maxIOSurfaceDimension || height.unsafeGet() >= maxIOSurfaceDimension || (width * height).unsafeGet() < minIOSurfaceArea) m_accelerateRendering = false; #else ASSERT(renderingMode == Unaccelerated); #endif switch (imageColorSpace) { case ColorSpaceDeviceRGB: m_data.m_colorSpace = deviceRGBColorSpaceRef(); break; case ColorSpaceSRGB: m_data.m_colorSpace = sRGBColorSpaceRef(); break; case ColorSpaceLinearRGB: m_data.m_colorSpace = linearRGBColorSpaceRef(); break; } RetainPtr<CGContextRef> cgContext; if (m_accelerateRendering) { #if USE(IOSURFACE_CANVAS_BACKING_STORE) m_data.m_surface = createIOSurface(size); cgContext.adoptCF(wkIOSurfaceContextCreate(m_data.m_surface.get(), width.unsafeGet(), height.unsafeGet(), m_data.m_colorSpace)); #endif if (!cgContext) m_accelerateRendering = false; // If allocation fails, fall back to non-accelerated path. } if (!m_accelerateRendering) { if (!tryFastCalloc(height.unsafeGet(), m_data.m_bytesPerRow.unsafeGet()).getValue(m_data.m_data)) return; ASSERT(!(reinterpret_cast<size_t>(m_data.m_data) & 2)); #if USE_ARGB32 m_data.m_bitmapInfo = kCGImageAlphaPremultipliedFirst | kCGBitmapByteOrder32Host; #else m_data.m_bitmapInfo = kCGImageAlphaPremultipliedLast; #endif cgContext.adoptCF(CGBitmapContextCreate(m_data.m_data, width.unsafeGet(), height.unsafeGet(), 8, m_data.m_bytesPerRow.unsafeGet(), m_data.m_colorSpace, m_data.m_bitmapInfo)); // Create a live image that wraps the data. m_data.m_dataProvider.adoptCF(CGDataProviderCreateWithData(0, m_data.m_data, dataSize.unsafeGet(), releaseImageData)); } if (!cgContext) return; m_context= adoptPtr(new GraphicsContext(cgContext.get())); m_context->scale(FloatSize(1, -1)); m_context->translate(0, -height.unsafeGet()); success = true; }
ImageBuffer::ImageBuffer(const IntSize& size, ColorSpace imageColorSpace, RenderingMode renderingMode, DeferralMode, bool& success) : m_data(size) // NOTE: The input here isn't important as ImageBufferDataCG's constructor just ignores it. , m_size(size) { success = false; // Make early return mean failure. bool accelerateRendering = renderingMode == Accelerated; if (size.width() <= 0 || size.height() <= 0) return; Checked<int, RecordOverflow> width = size.width(); Checked<int, RecordOverflow> height = size.height(); // Prevent integer overflows m_data.m_bytesPerRow = 4 * width; Checked<size_t, RecordOverflow> numBytes = height * m_data.m_bytesPerRow; if (numBytes.hasOverflowed()) return; #if USE(IOSURFACE_CANVAS_BACKING_STORE) if (width.unsafeGet() >= maxIOSurfaceDimension || height.unsafeGet() >= maxIOSurfaceDimension || (width * height).unsafeGet() < minIOSurfaceArea) accelerateRendering = false; #else ASSERT(renderingMode == Unaccelerated); #endif switch (imageColorSpace) { case ColorSpaceDeviceRGB: m_data.m_colorSpace = deviceRGBColorSpaceRef(); break; case ColorSpaceSRGB: m_data.m_colorSpace = sRGBColorSpaceRef(); break; case ColorSpaceLinearRGB: m_data.m_colorSpace = linearRGBColorSpaceRef(); break; } RetainPtr<CGContextRef> cgContext; if (accelerateRendering) { #if USE(IOSURFACE_CANVAS_BACKING_STORE) m_data.m_surface = createIOSurface(size); cgContext.adoptCF(wkIOSurfaceContextCreate(m_data.m_surface.get(), width.unsafeGet(), height.unsafeGet(), m_data.m_colorSpace)); #endif if (!cgContext) accelerateRendering = false; // If allocation fails, fall back to non-accelerated path. } if (!accelerateRendering) { if (!tryFastCalloc(height.unsafeGet(), m_data.m_bytesPerRow.unsafeGet()).getValue(m_data.m_data)) return; ASSERT(!(reinterpret_cast<size_t>(m_data.m_data) & 2)); m_data.m_bitmapInfo = kCGImageAlphaPremultipliedLast; cgContext.adoptCF(CGBitmapContextCreate(m_data.m_data, width.unsafeGet(), height.unsafeGet(), 8, m_data.m_bytesPerRow.unsafeGet(), m_data.m_colorSpace, m_data.m_bitmapInfo)); // Create a live image that wraps the data. m_data.m_dataProvider.adoptCF(CGDataProviderCreateWithData(0, m_data.m_data, numBytes.unsafeGet(), releaseImageData)); } if (!cgContext) return; m_context = adoptPtr(new GraphicsContext(cgContext.get())); m_context->scale(FloatSize(1, -1)); m_context->translate(0, -height.unsafeGet()); m_context->setIsAcceleratedContext(accelerateRendering); #if defined(BUILDING_ON_LION) m_data.m_lastFlushTime = currentTimeMS(); #endif success = true; }
ImageBuffer::ImageBuffer(const FloatSize& size, float resolutionScale, ColorSpace imageColorSpace, RenderingMode renderingMode, bool& success) : m_logicalSize(size) , m_resolutionScale(resolutionScale) { float scaledWidth = ceilf(resolutionScale * size.width()); float scaledHeight = ceilf(resolutionScale * size.height()); // FIXME: Should we automatically use a lower resolution? if (!FloatSize(scaledWidth, scaledHeight).isExpressibleAsIntSize()) return; m_size = IntSize(scaledWidth, scaledHeight); m_data.backingStoreSize = m_size; success = false; // Make early return mean failure. bool accelerateRendering = renderingMode == Accelerated; if (m_size.width() <= 0 || m_size.height() <= 0) return; #if USE(IOSURFACE_CANVAS_BACKING_STORE) Checked<int, RecordOverflow> width = m_size.width(); Checked<int, RecordOverflow> height = m_size.height(); #endif // Prevent integer overflows m_data.bytesPerRow = 4 * Checked<unsigned, RecordOverflow>(m_data.backingStoreSize.width()); Checked<size_t, RecordOverflow> numBytes = Checked<unsigned, RecordOverflow>(m_data.backingStoreSize.height()) * m_data.bytesPerRow; if (numBytes.hasOverflowed()) return; #if USE(IOSURFACE_CANVAS_BACKING_STORE) IntSize maxSize = IOSurface::maximumSize(); if (width.unsafeGet() > maxSize.width() || height.unsafeGet() > maxSize.height()) accelerateRendering = false; #else ASSERT(renderingMode == Unaccelerated); #endif m_data.colorSpace = cachedCGColorSpace(imageColorSpace); RetainPtr<CGContextRef> cgContext; if (accelerateRendering) { #if USE(IOSURFACE_CANVAS_BACKING_STORE) FloatSize userBounds = scaleSizeToUserSpace(FloatSize(width.unsafeGet(), height.unsafeGet()), m_data.backingStoreSize, m_size); m_data.surface = IOSurface::create(m_data.backingStoreSize, IntSize(userBounds), imageColorSpace); cgContext = m_data.surface->ensurePlatformContext(); if (cgContext) CGContextClearRect(cgContext.get(), FloatRect(FloatPoint(), userBounds)); #endif if (!cgContext) accelerateRendering = false; // If allocation fails, fall back to non-accelerated path. } if (!accelerateRendering) { if (!tryFastCalloc(m_data.backingStoreSize.height(), m_data.bytesPerRow.unsafeGet()).getValue(m_data.data)) return; ASSERT(!(reinterpret_cast<intptr_t>(m_data.data) & 3)); #if USE_ARGB32 m_data.bitmapInfo = kCGImageAlphaPremultipliedFirst | kCGBitmapByteOrder32Host; #else m_data.bitmapInfo = kCGImageAlphaPremultipliedLast; #endif cgContext = adoptCF(CGBitmapContextCreate(m_data.data, m_data.backingStoreSize.width(), m_data.backingStoreSize.height(), 8, m_data.bytesPerRow.unsafeGet(), m_data.colorSpace, m_data.bitmapInfo)); // Create a live image that wraps the data. m_data.dataProvider = adoptCF(CGDataProviderCreateWithData(0, m_data.data, numBytes.unsafeGet(), releaseImageData)); if (!cgContext) return; m_data.context = std::make_unique<GraphicsContext>(cgContext.get()); } context().scale(FloatSize(1, -1)); context().translate(0, -m_data.backingStoreSize.height()); context().applyDeviceScaleFactor(m_resolutionScale); success = true; }