static inline CGColorSpaceRef cachedCGColorSpace(ColorSpace colorSpace) { switch (colorSpace) { case DeviceColorSpace: return deviceRGBColorSpaceRef(); case sRGBColorSpace: return sRGBColorSpaceRef(); } ASSERT_NOT_REACHED(); return deviceRGBColorSpaceRef(); }
static CGColorRef createCGColorWithDeviceRGBA(CGColorRef sourceColor) { if (!sourceColor || CFEqual(CGColorGetColorSpace(sourceColor), deviceRGBColorSpaceRef())) return CGColorRetain(sourceColor); RetainPtr<CGColorTransformRef> colorTransform = adoptCF(CGColorTransformCreate(deviceRGBColorSpaceRef(), nullptr)); if (!colorTransform) return CGColorRetain(sourceColor); // CGColorTransformConvertColor() returns a +1 retained object. return CGColorTransformConvertColor(colorTransform.get(), sourceColor, kCGRenderingIntentDefault); }
// FIXME: Is it possible to merge getWindowsContext and createWindowsBitmap into a single API // suitable for all clients? void GraphicsContext::releaseWindowsContext(HDC hdc, const IntRect& dstRect, bool supportAlphaBlend, bool mayCreateBitmap) { bool createdBitmap = mayCreateBitmap && (!m_data->m_hdc || isInTransparencyLayer()); if (!createdBitmap) { m_data->restore(); return; } if (dstRect.isEmpty()) return; OwnPtr<HBITMAP> bitmap = adoptPtr(static_cast<HBITMAP>(GetCurrentObject(hdc, OBJ_BITMAP))); DIBPixelData pixelData(bitmap.get()); ASSERT(pixelData.bitsPerPixel() == 32); CGContextRef bitmapContext = CGBitmapContextCreate(pixelData.buffer(), pixelData.size().width(), pixelData.size().height(), 8, pixelData.bytesPerRow(), deviceRGBColorSpaceRef(), kCGBitmapByteOrder32Little | (supportAlphaBlend ? kCGImageAlphaPremultipliedFirst : kCGImageAlphaNoneSkipFirst)); CGImageRef image = CGBitmapContextCreateImage(bitmapContext); CGContextDrawImage(m_data->m_cgContext.get(), dstRect, image); // Delete all our junk. CGImageRelease(image); CGContextRelease(bitmapContext); ::DeleteDC(hdc); }
static CGContextRef CGContextWithHDC(HDC hdc, bool hasAlpha) { HBITMAP bitmap = static_cast<HBITMAP>(GetCurrentObject(hdc, OBJ_BITMAP)); DIBPixelData pixelData(bitmap); // FIXME: We can get here because we asked for a bitmap that is too big // when we have a tiled layer and we're compositing. In that case // bmBitsPixel will be 0. This seems to be benign, so for now we will // exit gracefully and look at it later: // https://bugs.webkit.org/show_bug.cgi?id=52041 // ASSERT(bitmapBits.bitsPerPixel() == 32); if (pixelData.bitsPerPixel() != 32) return 0; CGBitmapInfo bitmapInfo = kCGBitmapByteOrder32Little | (hasAlpha ? kCGImageAlphaPremultipliedFirst : kCGImageAlphaNoneSkipFirst); CGContextRef context = CGBitmapContextCreate(pixelData.buffer(), pixelData.size().width(), pixelData.size().height(), 8, pixelData.bytesPerRow(), deviceRGBColorSpaceRef(), bitmapInfo); // Flip coords CGContextTranslateCTM(context, 0, pixelData.size().height()); CGContextScaleCTM(context, 1, -1); // Put the HDC In advanced mode so it will honor affine transforms. SetGraphicsMode(hdc, GM_ADVANCED); return context; }
CGGradientRef Gradient::platformGradient() { if (m_gradient) return m_gradient; sortStopsIfNecessary(); const int cReservedStops = 3; Vector<CGFloat, 4 * cReservedStops> colorComponents; colorComponents.reserveInitialCapacity(m_stops.size() * 4); // RGBA components per stop Vector<CGFloat, cReservedStops> locations; locations.reserveInitialCapacity(m_stops.size()); for (size_t i = 0; i < m_stops.size(); ++i) { colorComponents.uncheckedAppend(m_stops[i].red); colorComponents.uncheckedAppend(m_stops[i].green); colorComponents.uncheckedAppend(m_stops[i].blue); colorComponents.uncheckedAppend(m_stops[i].alpha); locations.uncheckedAppend(m_stops[i].stop); } m_gradient = CGGradientCreateWithColorComponents(deviceRGBColorSpaceRef(), colorComponents.data(), locations.data(), m_stops.size()); return m_gradient; }
bool BitmapImage::getHBITMAPOfSize(HBITMAP bmp, LPSIZE size) { ASSERT(bmp); BITMAP bmpInfo; GetObject(bmp, sizeof(BITMAP), &bmpInfo); ASSERT(bmpInfo.bmBitsPixel == 32); int bufferSize = bmpInfo.bmWidthBytes * bmpInfo.bmHeight; CGContextRef cgContext = CGBitmapContextCreate(bmpInfo.bmBits, bmpInfo.bmWidth, bmpInfo.bmHeight, 8, bmpInfo.bmWidthBytes, deviceRGBColorSpaceRef(), kCGBitmapByteOrder32Little | kCGImageAlphaPremultipliedFirst); GraphicsContext gc(cgContext); IntSize imageSize = BitmapImage::size(); if (size) drawFrameMatchingSourceSize(&gc, FloatRect(0.0f, 0.0f, bmpInfo.bmWidth, bmpInfo.bmHeight), IntSize(*size), ColorSpaceDeviceRGB, CompositeCopy); else draw(&gc, FloatRect(0.0f, 0.0f, bmpInfo.bmWidth, bmpInfo.bmHeight), FloatRect(0.0f, 0.0f, imageSize.width(), imageSize.height()), ColorSpaceDeviceRGB, CompositeCopy); // Do cleanup CGContextRelease(cgContext); return true; }
static HBITMAP imageFromRect(const Frame* frame, IntRect& ir) { PaintBehavior oldPaintBehavior = frame->view()->paintBehavior(); frame->view()->setPaintBehavior(oldPaintBehavior | PaintBehaviorFlattenCompositingLayers); void* bits; HDC hdc = CreateCompatibleDC(0); int w = ir.width(); int h = ir.height(); BitmapInfo bmp = BitmapInfo::create(IntSize(w, h)); HBITMAP hbmp = CreateDIBSection(0, &bmp, DIB_RGB_COLORS, static_cast<void**>(&bits), 0, 0); HBITMAP hbmpOld = static_cast<HBITMAP>(SelectObject(hdc, hbmp)); CGContextRef context = CGBitmapContextCreate(static_cast<void*>(bits), w, h, 8, w * sizeof(RGBQUAD), deviceRGBColorSpaceRef(), kCGBitmapByteOrder32Little | kCGImageAlphaPremultipliedFirst); CGContextSaveGState(context); GraphicsContext gc(context); drawRectIntoContext(ir, frame->view(), &gc); CGContextRelease(context); SelectObject(hdc, hbmpOld); DeleteDC(hdc); frame->view()->setPaintBehavior(oldPaintBehavior); return hbmp; }
ImageBuffer::ImageBuffer(const IntSize& size, ColorSpace imageColorSpace, RenderingMode renderingMode, bool& success) : m_data(size) , m_size(size) , m_accelerateRendering(renderingMode == Accelerated) { #if !USE(IOSURFACE_CANVAS_BACKING_STORE) ASSERT(renderingMode == Unaccelerated); #endif success = false; // Make early return mean failure. if (size.width() < 0 || size.height() < 0) return; unsigned bytesPerRow = size.width(); if (bytesPerRow > 0x3FFFFFFF) // Protect against overflow return; bytesPerRow *= 4; m_data.m_bytesPerRow = bytesPerRow; size_t dataSize = size.height() * bytesPerRow; switch (imageColorSpace) { case ColorSpaceDeviceRGB: m_data.m_colorSpace = deviceRGBColorSpaceRef(); break; case ColorSpaceSRGB: m_data.m_colorSpace = sRGBColorSpaceRef(); break; case ColorSpaceLinearRGB: m_data.m_colorSpace = linearRGBColorSpaceRef(); break; } RetainPtr<CGContextRef> cgContext; if (!m_accelerateRendering) { if (!tryFastCalloc(size.height(), bytesPerRow).getValue(m_data.m_data)) return; ASSERT(!(reinterpret_cast<size_t>(m_data.m_data) & 2)); m_data.m_bitmapInfo = kCGImageAlphaPremultipliedLast; cgContext.adoptCF(CGBitmapContextCreate(m_data.m_data, size.width(), size.height(), 8, bytesPerRow, m_data.m_colorSpace, m_data.m_bitmapInfo)); // Create a live image that wraps the data. m_data.m_dataProvider.adoptCF(CGDataProviderCreateWithData(0, m_data.m_data, dataSize, releaseImageData)); } else { #if USE(IOSURFACE_CANVAS_BACKING_STORE) m_data.m_surface = createIOSurface(size); cgContext.adoptCF(wkIOSurfaceContextCreate(m_data.m_surface.get(), size.width(), size.height(), m_data.m_colorSpace)); #else m_accelerateRendering = false; // Force to false on older platforms #endif } if (!cgContext) return; m_context.set(new GraphicsContext(cgContext.get())); m_context->scale(FloatSize(1, -1)); m_context->translate(0, -size.height()); success = true; }
static CGContextRef createCgContextFromBitmap(HBITMAP bitmap) { BITMAP info; GetObject(bitmap, sizeof(info), &info); ASSERT(info.bmBitsPixel == 32); CGContextRef bitmapContext = CGBitmapContextCreate(info.bmBits, info.bmWidth, info.bmHeight, 8, info.bmWidthBytes, deviceRGBColorSpaceRef(), kCGBitmapByteOrder32Little | kCGImageAlphaNoneSkipFirst); return bitmapContext; }
CGColorSpaceRef linearRGBColorSpaceRef() { // FIXME: Windows should be able to use kCGColorSpaceGenericRGBLinear, this is tracked by http://webkit.org/b/31363. #if PLATFORM(WIN) || defined(BUILDING_ON_TIGER) return deviceRGBColorSpaceRef(); #else static CGColorSpaceRef linearRGBSpace = CGColorSpaceCreateWithName(kCGColorSpaceGenericRGBLinear); return linearRGBSpace; #endif }
static CGColorSpaceRef createColorSpace(const ColorProfile& colorProfile) { RetainPtr<CFDataRef> data(AdoptCF, CFDataCreate(kCFAllocatorDefault, reinterpret_cast<const UInt8*>(colorProfile.data()), colorProfile.size())); #ifndef TARGETING_LEOPARD return CGColorSpaceCreateWithICCProfile(data.get()); #else RetainPtr<CGDataProviderRef> profileDataProvider(AdoptCF, CGDataProviderCreateWithCFData(data.get())); CGFloat ranges[] = {0.0, 255.0, 0.0, 255.0, 0.0, 255.0}; return CGColorSpaceCreateICCBased(3, ranges, profileDataProvider.get(), deviceRGBColorSpaceRef()); #endif }
String ImageDataToDataURL(const ImageData& source, const String& mimeType, const double* quality) { ASSERT(MIMETypeRegistry::isSupportedImageMIMETypeForEncoding(mimeType)); RetainPtr<CFStringRef> uti = utiFromMIMEType(mimeType); ASSERT(uti); CGImageAlphaInfo dataAlphaInfo = kCGImageAlphaLast; unsigned char* data = source.data()->data(); Vector<uint8_t> premultipliedData; if (CFEqual(uti.get(), jpegUTI())) { // JPEGs don't have an alpha channel, so we have to manually composite on top of black. size_t size = 4 * source.width() * source.height(); if (!premultipliedData.tryReserveCapacity(size)) return "data:,"; premultipliedData.resize(size); unsigned char *buffer = premultipliedData.data(); for (size_t i = 0; i < size; i += 4) { unsigned alpha = data[i + 3]; if (alpha != 255) { buffer[i + 0] = data[i + 0] * alpha / 255; buffer[i + 1] = data[i + 1] * alpha / 255; buffer[i + 2] = data[i + 2] * alpha / 255; } else { buffer[i + 0] = data[i + 0]; buffer[i + 1] = data[i + 1]; buffer[i + 2] = data[i + 2]; } } dataAlphaInfo = kCGImageAlphaNoneSkipLast; // Ignore the alpha channel. data = premultipliedData.data(); } RetainPtr<CGDataProviderRef> dataProvider; dataProvider = adoptCF(CGDataProviderCreateWithData(0, data, 4 * source.width() * source.height(), 0)); if (!dataProvider) return "data:,"; RetainPtr<CGImageRef> image; image = adoptCF(CGImageCreate(source.width(), source.height(), 8, 32, 4 * source.width(), deviceRGBColorSpaceRef(), kCGBitmapByteOrderDefault | dataAlphaInfo, dataProvider.get(), 0, false, kCGRenderingIntentDefault)); return CGImageToDataURL(image.get(), mimeType, quality); }
String ImageDataToDataURL(const ImageData& source, const String& mimeType, const double* quality) { ASSERT(MIMETypeRegistry::isSupportedImageMIMETypeForEncoding(mimeType)); RetainPtr<CFStringRef> uti = utiFromMIMEType(mimeType); ASSERT(uti); unsigned char* data = source.data()->data(); Vector<uint8_t> dataVector; if (CFEqual(uti.get(), jpegUTI())) { // JPEGs don't have an alpha channel, so we have to manually composite on top of black. dataVector.resize(4 * source.width() * source.height()); unsigned char *out = dataVector.data(); for (int i = 0; i < source.width() * source.height(); i++) { // Multiply color data by alpha, and set alpha to 255. int alpha = data[4 * i + 3]; if (alpha != 255) { out[4 * i + 0] = data[4 * i + 0] * alpha / 255; out[4 * i + 1] = data[4 * i + 1] * alpha / 255; out[4 * i + 2] = data[4 * i + 2] * alpha / 255; } else { out[4 * i + 0] = data[4 * i + 0]; out[4 * i + 1] = data[4 * i + 1]; out[4 * i + 2] = data[4 * i + 2]; } out[4 * i + 3] = 255; } data = out; } RetainPtr<CGDataProviderRef> dataProvider; dataProvider.adoptCF(CGDataProviderCreateWithData(0, data, 4 * source.width() * source.height(), 0)); if (!dataProvider) return "data:,"; RetainPtr<CGImageRef> image; image.adoptCF(CGImageCreate(source.width(), source.height(), 8, 32, 4 * source.width(), deviceRGBColorSpaceRef(), kCGBitmapByteOrderDefault | kCGImageAlphaLast, dataProvider.get(), 0, false, kCGRenderingIntentDefault)); return CGImageToDataURL(image.get(), mimeType, quality); }
CGShadingRef Gradient::platformGradient() { if (m_gradient) return m_gradient; const CGFloat intervalRanges[2] = { 0, 1 }; const CGFloat colorComponentRanges[4 * 2] = { 0, 1, 0, 1, 0, 1, 0, 1 }; const CGFunctionCallbacks gradientCallbacks = { 0, gradientCallback, 0 }; RetainPtr<CGFunctionRef> colorFunction(AdoptCF, CGFunctionCreate(this, 1, intervalRanges, 4, colorComponentRanges, &gradientCallbacks)); CGColorSpaceRef colorSpace = deviceRGBColorSpaceRef(); if (m_radial) m_gradient = CGShadingCreateRadial(colorSpace, m_p0, m_r0, m_p1, m_r1, colorFunction.get(), true, true); else m_gradient = CGShadingCreateAxial(colorSpace, m_p0, m_p1, colorFunction.get(), true, true); return m_gradient; }
static RetainPtr<CGImageRef> imageWithColorSpace(CGImageRef originalImage, ColorSpace colorSpace) { CGColorSpaceRef originalColorSpace = CGImageGetColorSpace(originalImage); // If the image already has a (non-device) color space, we don't want to // override it, so return. if (!originalColorSpace || !CFEqual(originalColorSpace, deviceRGBColorSpaceRef())) return originalImage; switch (colorSpace) { case DeviceColorSpace: return originalImage; case sRGBColorSpace: return RetainPtr<CGImageRef>(AdoptCF, CGImageCreateCopyWithColorSpace(originalImage, sRGBColorSpaceRef())); } ASSERT_NOT_REACHED(); return originalImage; }
String ImageBuffer::toDataURL(const String& mimeType, const double* quality, CoordinateSystem) const { ASSERT(MIMETypeRegistry::isSupportedImageMIMETypeForEncoding(mimeType)); if (context().isAcceleratedContext()) flushContext(); RetainPtr<CFStringRef> uti = utiFromMIMEType(mimeType); ASSERT(uti); RefPtr<Uint8ClampedArray> premultipliedData; RetainPtr<CGImageRef> image; if (CFEqual(uti.get(), jpegUTI())) { // JPEGs don't have an alpha channel, so we have to manually composite on top of black. premultipliedData = getPremultipliedImageData(IntRect(IntPoint(0, 0), logicalSize())); if (!premultipliedData) return "data:,"; RetainPtr<CGDataProviderRef> dataProvider; dataProvider = adoptCF(CGDataProviderCreateWithData(0, premultipliedData->data(), 4 * logicalSize().width() * logicalSize().height(), 0)); if (!dataProvider) return "data:,"; image = adoptCF(CGImageCreate(logicalSize().width(), logicalSize().height(), 8, 32, 4 * logicalSize().width(), deviceRGBColorSpaceRef(), kCGBitmapByteOrderDefault | kCGImageAlphaNoneSkipLast, dataProvider.get(), 0, false, kCGRenderingIntentDefault)); } else if (m_resolutionScale == 1) { image = copyNativeImage(CopyBackingStore); image = createCroppedImageIfNecessary(image.get(), internalSize()); } else { image = copyNativeImage(DontCopyBackingStore); RetainPtr<CGContextRef> context = adoptCF(CGBitmapContextCreate(0, logicalSize().width(), logicalSize().height(), 8, 4 * logicalSize().width(), deviceRGBColorSpaceRef(), kCGImageAlphaPremultipliedLast)); CGContextSetBlendMode(context.get(), kCGBlendModeCopy); CGContextClipToRect(context.get(), CGRectMake(0, 0, logicalSize().width(), logicalSize().height())); FloatSize imageSizeInUserSpace = scaleSizeToUserSpace(logicalSize(), m_data.backingStoreSize, internalSize()); CGContextDrawImage(context.get(), CGRectMake(0, 0, imageSizeInUserSpace.width(), imageSizeInUserSpace.height()), image.get()); image = adoptCF(CGBitmapContextCreateImage(context.get())); } return CGImageToDataURL(image.get(), mimeType, quality); }
PassRefPtr<BitmapImage> BitmapImage::create(HBITMAP hBitmap) { DIBSECTION dibSection; if (!GetObject(hBitmap, sizeof(DIBSECTION), &dibSection)) return 0; ASSERT(dibSection.dsBm.bmBitsPixel == 32); if (dibSection.dsBm.bmBitsPixel != 32) return 0; ASSERT(dibSection.dsBm.bmBits); if (!dibSection.dsBm.bmBits) return 0; RetainPtr<CGContextRef> bitmapContext(AdoptCF, CGBitmapContextCreate(dibSection.dsBm.bmBits, dibSection.dsBm.bmWidth, dibSection.dsBm.bmHeight, 8, dibSection.dsBm.bmWidthBytes, deviceRGBColorSpaceRef(), kCGBitmapByteOrder32Little | kCGImageAlphaPremultipliedFirst)); // The BitmapImage takes ownership of this. CGImageRef cgImage = CGBitmapContextCreateImage(bitmapContext.get()); return adoptRef(new BitmapImage(cgImage)); }
static void resolveColorSpace(const SkBitmap& bitmap, CGColorSpaceRef colorSpace) { int width = bitmap.width(); int height = bitmap.height(); CGImageRef srcImage = SkCreateCGImageRefWithColorspace(bitmap, colorSpace); SkAutoLockPixels lock(bitmap); void* pixels = bitmap.getPixels(); RetainPtr<CGContextRef> cgBitmap(AdoptCF, CGBitmapContextCreate(pixels, width, height, 8, width * 4, deviceRGBColorSpaceRef(), kCGBitmapByteOrder32Host | kCGImageAlphaPremultipliedFirst)); if (!cgBitmap) return; CGContextSetBlendMode(cgBitmap.get(), kCGBlendModeCopy); CGRect bounds = { {0, 0}, {width, height} }; CGContextDrawImage(cgBitmap.get(), bounds, srcImage); }
RefPtr<Image> ImageBuffer::copyImage(BackingStoreCopy copyBehavior, ScaleBehavior scaleBehavior) const { RetainPtr<CGImageRef> image; if (m_resolutionScale == 1 || scaleBehavior == Unscaled) { image = copyNativeImage(copyBehavior); image = createCroppedImageIfNecessary(image.get(), internalSize()); } else { image = copyNativeImage(DontCopyBackingStore); RetainPtr<CGContextRef> context = adoptCF(CGBitmapContextCreate(0, logicalSize().width(), logicalSize().height(), 8, 4 * logicalSize().width(), deviceRGBColorSpaceRef(), kCGImageAlphaPremultipliedLast)); CGContextSetBlendMode(context.get(), kCGBlendModeCopy); CGContextClipToRect(context.get(), FloatRect(FloatPoint::zero(), logicalSize())); FloatSize imageSizeInUserSpace = scaleSizeToUserSpace(logicalSize(), m_data.backingStoreSize, internalSize()); CGContextDrawImage(context.get(), FloatRect(FloatPoint::zero(), imageSizeInUserSpace), image.get()); image = adoptCF(CGBitmapContextCreateImage(context.get())); } if (!image) return nullptr; return BitmapImage::create(image.get()); }
void BitmapImage::checkForSolidColor() { m_checkedForSolidColor = true; if (frameCount() > 1) { m_isSolidColor = false; return; } CGImageRef image = frameAtIndex(0); // Currently we only check for solid color in the important special case of a 1x1 image. if (image && CGImageGetWidth(image) == 1 && CGImageGetHeight(image) == 1) { unsigned char pixel[4]; // RGBA RetainPtr<CGContextRef> bmap(AdoptCF, CGBitmapContextCreate(pixel, 1, 1, 8, sizeof(pixel), deviceRGBColorSpaceRef(), kCGImageAlphaPremultipliedLast | kCGBitmapByteOrder32Big)); if (!bmap) return; GraphicsContext(bmap.get()).setCompositeOperation(CompositeCopy); CGRect dst = { {0, 0}, {1, 1} }; CGContextDrawImage(bmap.get(), dst, image); if (pixel[3] == 0) m_solidColor = Color(0, 0, 0, 0); else m_solidColor = Color(pixel[0] * 255 / pixel[3], pixel[1] * 255 / pixel[3], pixel[2] * 255 / pixel[3], pixel[3]); m_isSolidColor = true; } }
ImageBuffer::ImageBuffer(const IntSize& size, ColorSpace imageColorSpace, RenderingMode renderingMode, bool& success) : m_data(size) , m_size(size) , m_accelerateRendering(renderingMode == Accelerated) { success = false; // Make early return mean failure. if (size.width() <= 0 || size.height() <= 0) return; Checked<int, RecordOverflow> width = size.width(); Checked<int, RecordOverflow> height = size.height(); // Prevent integer overflows m_data.m_bytesPerRow = 4 * width; Checked<size_t, RecordOverflow> dataSize = height * m_data.m_bytesPerRow; if (dataSize.hasOverflowed()) return; #if USE(IOSURFACE_CANVAS_BACKING_STORE) if (width.unsafeGet() >= maxIOSurfaceDimension || height.unsafeGet() >= maxIOSurfaceDimension || (width * height).unsafeGet() < minIOSurfaceArea) m_accelerateRendering = false; #else ASSERT(renderingMode == Unaccelerated); #endif switch (imageColorSpace) { case ColorSpaceDeviceRGB: m_data.m_colorSpace = deviceRGBColorSpaceRef(); break; case ColorSpaceSRGB: m_data.m_colorSpace = sRGBColorSpaceRef(); break; case ColorSpaceLinearRGB: m_data.m_colorSpace = linearRGBColorSpaceRef(); break; } RetainPtr<CGContextRef> cgContext; if (m_accelerateRendering) { #if USE(IOSURFACE_CANVAS_BACKING_STORE) m_data.m_surface = createIOSurface(size); cgContext.adoptCF(wkIOSurfaceContextCreate(m_data.m_surface.get(), width.unsafeGet(), height.unsafeGet(), m_data.m_colorSpace)); #endif if (!cgContext) m_accelerateRendering = false; // If allocation fails, fall back to non-accelerated path. } if (!m_accelerateRendering) { if (!tryFastCalloc(height.unsafeGet(), m_data.m_bytesPerRow.unsafeGet()).getValue(m_data.m_data)) return; ASSERT(!(reinterpret_cast<size_t>(m_data.m_data) & 2)); #if USE_ARGB32 m_data.m_bitmapInfo = kCGImageAlphaPremultipliedFirst | kCGBitmapByteOrder32Host; #else m_data.m_bitmapInfo = kCGImageAlphaPremultipliedLast; #endif cgContext.adoptCF(CGBitmapContextCreate(m_data.m_data, width.unsafeGet(), height.unsafeGet(), 8, m_data.m_bytesPerRow.unsafeGet(), m_data.m_colorSpace, m_data.m_bitmapInfo)); // Create a live image that wraps the data. m_data.m_dataProvider.adoptCF(CGDataProviderCreateWithData(0, m_data.m_data, dataSize.unsafeGet(), releaseImageData)); } if (!cgContext) return; m_context= adoptPtr(new GraphicsContext(cgContext.get())); m_context->scale(FloatSize(1, -1)); m_context->translate(0, -height.unsafeGet()); success = true; }
void GraphicsContext::drawWindowsBitmap(WindowsBitmap* image, const IntPoint& point) { // FIXME: Creating CFData is non-optimal, but needed to avoid crashing when printing. Ideally we should // make a custom CGDataProvider that controls the WindowsBitmap lifetime. see <rdar://6394455> RetainPtr<CFDataRef> imageData = adoptCF(CFDataCreate(kCFAllocatorDefault, image->buffer(), image->bufferLength())); RetainPtr<CGDataProviderRef> dataProvider = adoptCF(CGDataProviderCreateWithCFData(imageData.get())); RetainPtr<CGImageRef> cgImage = adoptCF(CGImageCreate(image->size().width(), image->size().height(), 8, 32, image->bytesPerRow(), deviceRGBColorSpaceRef(), kCGBitmapByteOrder32Little | kCGImageAlphaFirst, dataProvider.get(), 0, true, kCGRenderingIntentDefault)); CGContextDrawImage(m_data->m_cgContext.get(), CGRectMake(point.x(), point.y(), image->size().width(), image->size().height()), cgImage.get()); }
void BitmapImage::checkForSolidColor() { m_checkedForSolidColor = true; if (frameCount() > 1) { m_isSolidColor = false; return; } #if !PLATFORM(IOS) CGImageRef image = frameAtIndex(0); #else // Note, checkForSolidColor() may be called from frameAtIndex(). On iOS frameAtIndex() gets passed a scaleHint // argument which it uses to tell CG to create a scaled down image. Since we don't know the scaleHint here, if // we call frameAtIndex() again, we would pass it the default scale of 1 and would end up recreating the image. // So we do a quick check and call frameAtIndex(0) only if we haven't yet created an image. CGImageRef image = nullptr; if (m_frames.size()) image = m_frames[0].m_frame; if (!image) image = frameAtIndex(0); #endif // Currently we only check for solid color in the important special case of a 1x1 image. if (image && CGImageGetWidth(image) == 1 && CGImageGetHeight(image) == 1) { unsigned char pixel[4]; // RGBA RetainPtr<CGContextRef> bitmapContext = adoptCF(CGBitmapContextCreate(pixel, 1, 1, 8, sizeof(pixel), deviceRGBColorSpaceRef(), kCGImageAlphaPremultipliedLast | kCGBitmapByteOrder32Big)); if (!bitmapContext) return; GraphicsContext(bitmapContext.get()).setCompositeOperation(CompositeCopy); CGRect destinationRect = CGRectMake(0, 0, 1, 1); CGContextDrawImage(bitmapContext.get(), destinationRect, image); if (!pixel[3]) m_solidColor = Color(0, 0, 0, 0); else m_solidColor = Color(pixel[0] * 255 / pixel[3], pixel[1] * 255 / pixel[3], pixel[2] * 255 / pixel[3], pixel[3]); m_isSolidColor = true; } }
void BitmapImage::checkForSolidColor() { m_checkedForSolidColor = true; m_isSolidColor = false; if (frameCount() > 1) return; if (!haveFrameAtIndex(0)) { IntSize size = m_source.frameSizeAtIndex(0, 0); if (size.width() != 1 || size.height() != 1) return; if (!ensureFrameIsCached(0)) return; } CGImageRef image = nullptr; if (m_frames.size()) image = m_frames[0].m_frame; if (!image) return; // Currently we only check for solid color in the important special case of a 1x1 image. if (CGImageGetWidth(image) == 1 && CGImageGetHeight(image) == 1) { unsigned char pixel[4]; // RGBA RetainPtr<CGContextRef> bitmapContext = adoptCF(CGBitmapContextCreate(pixel, 1, 1, 8, sizeof(pixel), deviceRGBColorSpaceRef(), kCGImageAlphaPremultipliedLast | kCGBitmapByteOrder32Big)); if (!bitmapContext) return; GraphicsContext(bitmapContext.get()).setCompositeOperation(CompositeCopy); CGRect destinationRect = CGRectMake(0, 0, 1, 1); CGContextDrawImage(bitmapContext.get(), destinationRect, image); if (!pixel[3]) m_solidColor = Color(0, 0, 0, 0); else m_solidColor = Color(pixel[0] * 255 / pixel[3], pixel[1] * 255 / pixel[3], pixel[2] * 255 / pixel[3], pixel[3]); m_isSolidColor = true; } }
PassRefPtr<Image> ImageBuffer::copyImage(BackingStoreCopy copyBehavior) const { RetainPtr<CGImageRef> image; if (m_resolutionScale == 1) image = copyNativeImage(copyBehavior); else { image.adoptCF(copyNativeImage(DontCopyBackingStore)); RetainPtr<CGContextRef> context(AdoptCF, CGBitmapContextCreate(0, logicalSize().width(), logicalSize().height(), 8, 4 * logicalSize().width(), deviceRGBColorSpaceRef(), kCGImageAlphaPremultipliedLast)); CGContextSetBlendMode(context.get(), kCGBlendModeCopy); CGContextDrawImage(context.get(), CGRectMake(0, 0, logicalSize().width(), logicalSize().height()), image.get()); image = CGBitmapContextCreateImage(context.get()); } if (!image) return 0; return BitmapImage::create(image.get()); }
ImageBuffer::ImageBuffer(const IntSize& size, ColorSpace imageColorSpace, RenderingMode renderingMode, DeferralMode, bool& success) : m_data(size) // NOTE: The input here isn't important as ImageBufferDataCG's constructor just ignores it. , m_size(size) { success = false; // Make early return mean failure. bool accelerateRendering = renderingMode == Accelerated; if (size.width() <= 0 || size.height() <= 0) return; Checked<int, RecordOverflow> width = size.width(); Checked<int, RecordOverflow> height = size.height(); // Prevent integer overflows m_data.m_bytesPerRow = 4 * width; Checked<size_t, RecordOverflow> numBytes = height * m_data.m_bytesPerRow; if (numBytes.hasOverflowed()) return; #if USE(IOSURFACE_CANVAS_BACKING_STORE) if (width.unsafeGet() >= maxIOSurfaceDimension || height.unsafeGet() >= maxIOSurfaceDimension || (width * height).unsafeGet() < minIOSurfaceArea) accelerateRendering = false; #else ASSERT(renderingMode == Unaccelerated); #endif switch (imageColorSpace) { case ColorSpaceDeviceRGB: m_data.m_colorSpace = deviceRGBColorSpaceRef(); break; case ColorSpaceSRGB: m_data.m_colorSpace = sRGBColorSpaceRef(); break; case ColorSpaceLinearRGB: m_data.m_colorSpace = linearRGBColorSpaceRef(); break; } RetainPtr<CGContextRef> cgContext; if (accelerateRendering) { #if USE(IOSURFACE_CANVAS_BACKING_STORE) m_data.m_surface = createIOSurface(size); cgContext.adoptCF(wkIOSurfaceContextCreate(m_data.m_surface.get(), width.unsafeGet(), height.unsafeGet(), m_data.m_colorSpace)); #endif if (!cgContext) accelerateRendering = false; // If allocation fails, fall back to non-accelerated path. } if (!accelerateRendering) { if (!tryFastCalloc(height.unsafeGet(), m_data.m_bytesPerRow.unsafeGet()).getValue(m_data.m_data)) return; ASSERT(!(reinterpret_cast<size_t>(m_data.m_data) & 2)); m_data.m_bitmapInfo = kCGImageAlphaPremultipliedLast; cgContext.adoptCF(CGBitmapContextCreate(m_data.m_data, width.unsafeGet(), height.unsafeGet(), 8, m_data.m_bytesPerRow.unsafeGet(), m_data.m_colorSpace, m_data.m_bitmapInfo)); // Create a live image that wraps the data. m_data.m_dataProvider.adoptCF(CGDataProviderCreateWithData(0, m_data.m_data, numBytes.unsafeGet(), releaseImageData)); } if (!cgContext) return; m_context = adoptPtr(new GraphicsContext(cgContext.get())); m_context->scale(FloatSize(1, -1)); m_context->translate(0, -height.unsafeGet()); m_context->setIsAcceleratedContext(accelerateRendering); #if defined(BUILDING_ON_LION) m_data.m_lastFlushTime = currentTimeMS(); #endif success = true; }
HBITMAP allocImage(HDC dc, IntSize size, CGContextRef *targetRef) { BitmapInfo bmpInfo = BitmapInfo::create(size); LPVOID bits; HBITMAP hbmp = CreateDIBSection(dc, &bmpInfo, DIB_RGB_COLORS, &bits, 0, 0); if (!targetRef) return hbmp; CGContextRef bitmapContext = CGBitmapContextCreate(bits, bmpInfo.bmiHeader.biWidth, bmpInfo.bmiHeader.biHeight, 8, bmpInfo.bmiHeader.biWidth * 4, deviceRGBColorSpaceRef(), kCGBitmapByteOrder32Little | kCGImageAlphaNoneSkipFirst); if (!bitmapContext) { DeleteObject(hbmp); return 0; } *targetRef = bitmapContext; return hbmp; }