static void TypefaceStyle_test(skiatest::Reporter* reporter, uint16_t weight, uint16_t width, SkData* data) { sk_sp<SkData> dataCopy; SkData* dataToUse = data; if (!dataToUse->unique()) { dataCopy = SkData::MakeWithCopy(data->data(), data->size()); dataToUse = dataCopy.get(); } SkSFNTHeader* sfntHeader = static_cast<SkSFNTHeader*>(dataToUse->writable_data()); SkSFNTHeader::TableDirectoryEntry* tableEntry = SkTAfter<SkSFNTHeader::TableDirectoryEntry>(sfntHeader); SkSFNTHeader::TableDirectoryEntry* os2TableEntry = nullptr; int numTables = SkEndian_SwapBE16(sfntHeader->numTables); for (int tableEntryIndex = 0; tableEntryIndex < numTables; ++tableEntryIndex) { if (SkOTTableOS2::TAG == tableEntry[tableEntryIndex].tag) { os2TableEntry = tableEntry + tableEntryIndex; break; } } SkASSERT_RELEASE(os2TableEntry); size_t os2TableOffset = SkEndian_SwapBE32(os2TableEntry->offset); SkOTTableOS2_V0* os2Table = SkTAddOffset<SkOTTableOS2_V0>(sfntHeader, os2TableOffset); os2Table->usWeightClass.value = SkEndian_SwapBE16(weight); using WidthType = SkOTTableOS2_V0::WidthClass::Value; os2Table->usWidthClass.value = static_cast<WidthType>(SkEndian_SwapBE16(width)); sk_sp<SkTypeface> newTypeface(SkTypeface::MakeFromStream(new SkMemoryStream(dataToUse))); SkASSERT_RELEASE(newTypeface); SkFontStyle newStyle = newTypeface->fontStyle(); //printf("%d, %f\n", weight, (newStyle.weight() - (float)0x7FFF) / (float)0x7FFF); //printf("%d, %f\n", width , (newStyle.width() - (float)0x7F) / (float)0x7F); //printf("%d, %d\n", weight, newStyle.weight()); //printf("%d, %d\n", width , newStyle.width()); // Some back-ends (CG, GDI, DW) support OS/2 version A which uses 0 - 10 (but all differently). REPORTER_ASSERT(reporter, newStyle.weight() == weight || (weight <= 10 && newStyle.weight() == 100 * weight) || (weight == 4 && newStyle.weight() == 350) || // GDI weirdness (weight == 5 && newStyle.weight() == 400) || // GDI weirdness (weight == 0 && newStyle.weight() == 1) || // DW weirdness (weight == 1000 && newStyle.weight() == 999) // DW weirdness ); // Some back-ends (GDI) don't support width, ensure these always report 'medium'. REPORTER_ASSERT(reporter, newStyle.width() == width || newStyle.width() == 5); }
bool GrVkMemory::AllocAndBindBufferMemory(const GrVkGpu* gpu, VkBuffer buffer, GrVkBuffer::Type type, bool dynamic, GrVkAlloc* alloc) { const GrVkInterface* iface = gpu->vkInterface(); VkDevice device = gpu->device(); VkMemoryRequirements memReqs; GR_VK_CALL(iface, GetBufferMemoryRequirements(device, buffer, &memReqs)); VkMemoryPropertyFlags desiredMemProps = dynamic ? VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT | VK_MEMORY_PROPERTY_HOST_CACHED_BIT : VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT; uint32_t typeIndex = 0; if (!get_valid_memory_type_index(gpu->physicalDeviceMemoryProperties(), memReqs.memoryTypeBits, desiredMemProps, &typeIndex)) { // this memory type should always be available SkASSERT_RELEASE(get_valid_memory_type_index(gpu->physicalDeviceMemoryProperties(), memReqs.memoryTypeBits, VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT, &typeIndex)); } GrVkHeap* heap = gpu->getHeap(buffer_type_to_heap(type)); if (!heap->alloc(memReqs.size, memReqs.alignment, typeIndex, alloc)) { SkDebugf("Failed to alloc buffer\n"); return false; } // Bind Memory to device VkResult err = GR_VK_CALL(iface, BindBufferMemory(device, buffer, alloc->fMemory, alloc->fOffset)); if (err) { SkASSERT_RELEASE(heap->free(*alloc)); return false; } return true; }
static SkCachedData* make_data(size_t size, SkDiscardableMemoryPool* pool) { if (pool) { SkDiscardableMemory* dm = pool->create(size); // the pool "can" return null, but it shouldn't in these controlled conditions SkASSERT_RELEASE(dm); return new SkCachedData(size, dm); } else { return new SkCachedData(sk_malloc_throw(size), size); } }
sk_sp<SkSpecialImage> SkSpecialImage::MakeDeferredFromGpu(GrContext* context, const SkIRect& subset, uint32_t uniqueID, sk_sp<GrTextureProxy> proxy, sk_sp<SkColorSpace> colorSpace, const SkSurfaceProps* props, SkAlphaType at) { if (!context || context->contextPriv().abandoned() || !proxy) { return nullptr; } SkASSERT_RELEASE(rect_fits(subset, proxy->width(), proxy->height())); return sk_make_sp<SkSpecialImage_Gpu>(context, subset, uniqueID, std::move(proxy), at, std::move(colorSpace), props); }
/* * Modulo internal errors, this should always succeed *if* the matrix is downscaling * (in this case, we have the inverse, so it succeeds if fInvMatrix is upscaling) */ bool SkDefaultBitmapControllerState::processMediumRequest(const SkBitmapProvider& provider) { SkASSERT(fQuality <= kMedium_SkFilterQuality); if (fQuality != kMedium_SkFilterQuality) { return false; } // Our default return state is to downgrade the request to Low, w/ or w/o setting fBitmap // to a valid bitmap. fQuality = kLow_SkFilterQuality; SkSize invScaleSize; if (!fInvMatrix.decomposeScale(&invScaleSize, nullptr)) { return false; } SkDestinationSurfaceColorMode colorMode = provider.dstColorSpace() ? SkDestinationSurfaceColorMode::kGammaAndColorSpaceAware : SkDestinationSurfaceColorMode::kLegacy; if (invScaleSize.width() > SK_Scalar1 || invScaleSize.height() > SK_Scalar1) { fCurrMip.reset(SkMipMapCache::FindAndRef(provider.makeCacheDesc(), colorMode)); if (nullptr == fCurrMip.get()) { SkBitmap orig; if (!provider.asBitmap(&orig)) { return false; } fCurrMip.reset(SkMipMapCache::AddAndRef(orig, colorMode)); if (nullptr == fCurrMip.get()) { return false; } } // diagnostic for a crasher... SkASSERT_RELEASE(fCurrMip->data()); const SkSize scale = SkSize::Make(SkScalarInvert(invScaleSize.width()), SkScalarInvert(invScaleSize.height())); SkMipMap::Level level; if (fCurrMip->extractLevel(scale, &level)) { const SkSize& invScaleFixup = level.fScale; fInvMatrix.postScale(invScaleFixup.width(), invScaleFixup.height()); // todo: if we could wrap the fCurrMip in a pixelref, then we could just install // that here, and not need to explicitly track it ourselves. return fResultBitmap.installPixels(level.fPixmap); } else { // failed to extract, so release the mipmap fCurrMip.reset(nullptr); } } return false; }
/** localeNameLength must include the null terminator. */ SkFontMgr_DirectWrite(IDWriteFactory* factory, IDWriteFontCollection* fontCollection, WCHAR* localeName, int localeNameLength) : fFactory(SkRefComPtr(factory)) , fFontCollection(SkRefComPtr(fontCollection)) , fLocaleName(localeNameLength) { #if SK_HAS_DWRITE_2_H if (!SUCCEEDED(fFactory->QueryInterface(&fFactory2))) { // IUnknown::QueryInterface states that if it fails, punk will be set to nullptr. // http://blogs.msdn.com/b/oldnewthing/archive/2004/03/26/96777.aspx SkASSERT_RELEASE(nullptr == fFactory2.get()); } #endif memcpy(fLocaleName.get(), localeName, localeNameLength * sizeof(WCHAR)); }
/** localeNameLength must include the null terminator. */ SkFontMgr_DirectWrite(IDWriteFactory* factory, IDWriteFontCollection* fontCollection, IDWriteFontFallback* fallback, WCHAR* localeName, int localeNameLength) : fFactory(SkRefComPtr(factory)) , fFontFallback(SkSafeRefComPtr(fallback)) , fFontCollection(SkRefComPtr(fontCollection)) , fLocaleName(localeNameLength) { if (!SUCCEEDED(fFactory->QueryInterface(&fFactory2))) { // IUnknown::QueryInterface states that if it fails, punk will be set to nullptr. // http://blogs.msdn.com/b/oldnewthing/archive/2004/03/26/96777.aspx SkASSERT_RELEASE(nullptr == fFactory2.get()); } if (fFontFallback.get()) { // factory must be provided if fallback is non-null, else the fallback will not be used. SkASSERT(fFactory2.get()); } memcpy(fLocaleName.get(), localeName, localeNameLength * sizeof(WCHAR)); }
// Exercise the public API of SkSpecialSurface (e.g., getCanvas, newImageSnapshot) static void test_surface(const sk_sp<SkSpecialSurface>& surf, skiatest::Reporter* reporter, int offset) { const SkIRect surfSubset = TestingSpecialSurfaceAccess::Subset(surf.get()); REPORTER_ASSERT(reporter, offset == surfSubset.fLeft); REPORTER_ASSERT(reporter, offset == surfSubset.fTop); REPORTER_ASSERT(reporter, kSmallerSize == surfSubset.width()); REPORTER_ASSERT(reporter, kSmallerSize == surfSubset.height()); SkCanvas* canvas = surf->getCanvas(); SkASSERT_RELEASE(canvas); canvas->clear(SK_ColorRED); sk_sp<SkSpecialImage> img(surf->makeImageSnapshot()); REPORTER_ASSERT(reporter, img); const SkIRect imgSubset = img->subset(); REPORTER_ASSERT(reporter, surfSubset == imgSubset); // the canvas was invalidated by the newImageSnapshot call REPORTER_ASSERT(reporter, !surf->getCanvas()); }
void GrAtlasTextBlob::AssertEqual(const GrAtlasTextBlob& l, const GrAtlasTextBlob& r) { SkASSERT_RELEASE(l.fSize == r.fSize); SkASSERT_RELEASE(l.fPool == r.fPool); SkASSERT_RELEASE(l.fBlurRec.fSigma == r.fBlurRec.fSigma); SkASSERT_RELEASE(l.fBlurRec.fStyle == r.fBlurRec.fStyle); SkASSERT_RELEASE(l.fBlurRec.fQuality == r.fBlurRec.fQuality); SkASSERT_RELEASE(l.fStrokeInfo.fFrameWidth == r.fStrokeInfo.fFrameWidth); SkASSERT_RELEASE(l.fStrokeInfo.fMiterLimit == r.fStrokeInfo.fMiterLimit); SkASSERT_RELEASE(l.fStrokeInfo.fJoin == r.fStrokeInfo.fJoin); SkASSERT_RELEASE(l.fBigGlyphs.count() == r.fBigGlyphs.count()); for (int i = 0; i < l.fBigGlyphs.count(); i++) { const BigGlyph& lBigGlyph = l.fBigGlyphs[i]; const BigGlyph& rBigGlyph = r.fBigGlyphs[i]; SkASSERT_RELEASE(lBigGlyph.fPath == rBigGlyph.fPath); // We can't assert that these have the same translations } SkASSERT_RELEASE(l.fKey == r.fKey); //SkASSERT_RELEASE(l.fPaintColor == r.fPaintColor); // Colors might not actually be identical SkASSERT_RELEASE(l.fMaxMinScale == r.fMaxMinScale); SkASSERT_RELEASE(l.fMinMaxScale == r.fMinMaxScale); SkASSERT_RELEASE(l.fTextType == r.fTextType); SkASSERT_RELEASE(l.fRunCount == r.fRunCount); for (int i = 0; i < l.fRunCount; i++) { const Run& lRun = l.fRuns[i]; const Run& rRun = r.fRuns[i]; if (lRun.fTypeface.get()) { SkASSERT_RELEASE(rRun.fTypeface.get()); SkASSERT_RELEASE(SkTypeface::Equal(lRun.fTypeface, rRun.fTypeface)); } else { SkASSERT_RELEASE(!rRun.fTypeface.get()); } SkASSERT_RELEASE(lRun.fDescriptor.getDesc()); SkASSERT_RELEASE(rRun.fDescriptor.getDesc()); SkASSERT_RELEASE(*lRun.fDescriptor.getDesc() == *rRun.fDescriptor.getDesc()); if (lRun.fOverrideDescriptor.get()) { SkASSERT_RELEASE(lRun.fOverrideDescriptor->getDesc()); SkASSERT_RELEASE(rRun.fOverrideDescriptor.get() && rRun.fOverrideDescriptor->getDesc()); SkASSERT_RELEASE(*lRun.fOverrideDescriptor->getDesc() == *rRun.fOverrideDescriptor->getDesc()); } else { SkASSERT_RELEASE(!rRun.fOverrideDescriptor.get()); } // color can be changed //SkASSERT(lRun.fColor == rRun.fColor); SkASSERT_RELEASE(lRun.fInitialized == rRun.fInitialized); SkASSERT_RELEASE(lRun.fDrawAsPaths == rRun.fDrawAsPaths); SkASSERT_RELEASE(lRun.fSubRunInfo.count() == rRun.fSubRunInfo.count()); for(int j = 0; j < lRun.fSubRunInfo.count(); j++) { const Run::SubRunInfo& lSubRun = lRun.fSubRunInfo[j]; const Run::SubRunInfo& rSubRun = rRun.fSubRunInfo[j]; // TODO we can do this check, but we have to apply the VM to the old vertex bounds //SkASSERT_RELEASE(lSubRun.vertexBounds() == rSubRun.vertexBounds()); if (lSubRun.strike()) { SkASSERT_RELEASE(rSubRun.strike()); SkASSERT_RELEASE(GrBatchTextStrike::GetKey(*lSubRun.strike()) == GrBatchTextStrike::GetKey(*rSubRun.strike())); } else { SkASSERT_RELEASE(!rSubRun.strike()); } SkASSERT_RELEASE(lSubRun.vertexStartIndex() == rSubRun.vertexStartIndex()); SkASSERT_RELEASE(lSubRun.vertexEndIndex() == rSubRun.vertexEndIndex()); SkASSERT_RELEASE(lSubRun.glyphStartIndex() == rSubRun.glyphStartIndex()); SkASSERT_RELEASE(lSubRun.glyphEndIndex() == rSubRun.glyphEndIndex()); SkASSERT_RELEASE(lSubRun.maskFormat() == rSubRun.maskFormat()); SkASSERT_RELEASE(lSubRun.drawAsDistanceFields() == rSubRun.drawAsDistanceFields()); SkASSERT_RELEASE(lSubRun.hasUseLCDText() == rSubRun.hasUseLCDText()); } } }
// Basic test of the SkSpecialImage public API (e.g., peekTexture, peekPixels & draw) static void test_image(const sk_sp<SkSpecialImage>& img, skiatest::Reporter* reporter, GrContext* context, bool peekTextureSucceeds, int offset, int size) { const SkIRect subset = img->subset(); REPORTER_ASSERT(reporter, offset == subset.left()); REPORTER_ASSERT(reporter, offset == subset.top()); REPORTER_ASSERT(reporter, kSmallerSize == subset.width()); REPORTER_ASSERT(reporter, kSmallerSize == subset.height()); //-------------- // Test that peekTexture reports the correct backing type REPORTER_ASSERT(reporter, peekTextureSucceeds == img->isTextureBacked()); #if SK_SUPPORT_GPU //-------------- // Test getTextureAsRef - as long as there is a context this should succeed if (context) { sk_sp<GrTexture> texture(img->asTextureRef(context)); REPORTER_ASSERT(reporter, texture); } #endif //-------------- // Test getROPixels - this should always succeed regardless of backing store SkBitmap bitmap; REPORTER_ASSERT(reporter, img->getROPixels(&bitmap)); if (context) { REPORTER_ASSERT(reporter, kSmallerSize == bitmap.width()); REPORTER_ASSERT(reporter, kSmallerSize == bitmap.height()); } else { REPORTER_ASSERT(reporter, size == bitmap.width()); REPORTER_ASSERT(reporter, size == bitmap.height()); } //-------------- // Test that draw restricts itself to the subset SkImageInfo info = SkImageInfo::MakeN32(kFullSize, kFullSize, kOpaque_SkAlphaType); sk_sp<SkSpecialSurface> surf(img->makeSurface(info)); SkCanvas* canvas = surf->getCanvas(); canvas->clear(SK_ColorBLUE); img->draw(canvas, SkIntToScalar(kPad), SkIntToScalar(kPad), nullptr); SkBitmap bm; bm.allocN32Pixels(kFullSize, kFullSize, true); bool result = canvas->readPixels(bm.info(), bm.getPixels(), bm.rowBytes(), 0, 0); SkASSERT_RELEASE(result); // Only the center (red) portion should've been drawn into the canvas REPORTER_ASSERT(reporter, SK_ColorBLUE == bm.getColor(kPad-1, kPad-1)); REPORTER_ASSERT(reporter, SK_ColorRED == bm.getColor(kPad, kPad)); REPORTER_ASSERT(reporter, SK_ColorRED == bm.getColor(kSmallerSize+kPad-1, kSmallerSize+kPad-1)); REPORTER_ASSERT(reporter, SK_ColorBLUE == bm.getColor(kSmallerSize+kPad, kSmallerSize+kPad)); //-------------- // Test that makeTightSubset & makeTightSurface return appropriately sized objects // of the correct backing type SkIRect newSubset = SkIRect::MakeWH(subset.width(), subset.height()); { sk_sp<SkImage> tightImg(img->makeTightSubset(newSubset)); REPORTER_ASSERT(reporter, tightImg->width() == subset.width()); REPORTER_ASSERT(reporter, tightImg->height() == subset.height()); REPORTER_ASSERT(reporter, peekTextureSucceeds == !!tightImg->getTexture()); SkPixmap tmpPixmap; REPORTER_ASSERT(reporter, peekTextureSucceeds != !!tightImg->peekPixels(&tmpPixmap)); } { SkImageInfo info = SkImageInfo::MakeN32(subset.width(), subset.height(), kPremul_SkAlphaType); sk_sp<SkSurface> tightSurf(img->makeTightSurface(info)); REPORTER_ASSERT(reporter, tightSurf->width() == subset.width()); REPORTER_ASSERT(reporter, tightSurf->height() == subset.height()); REPORTER_ASSERT(reporter, peekTextureSucceeds == !!tightSurf->getTextureHandle(SkSurface::kDiscardWrite_BackendHandleAccess)); SkPixmap tmpPixmap; REPORTER_ASSERT(reporter, peekTextureSucceeds != !!tightSurf->peekPixels(&tmpPixmap)); } }
void GrVkMemory::FreeBufferMemory(const GrVkGpu* gpu, GrVkBuffer::Type type, const GrVkAlloc& alloc) { GrVkHeap* heap = gpu->getHeap(buffer_type_to_heap(type)); SkASSERT_RELEASE(heap->free(alloc)); }
bool GrVkMemory::AllocAndBindImageMemory(const GrVkGpu* gpu, VkImage image, bool linearTiling, GrVkAlloc* alloc) { const GrVkInterface* iface = gpu->vkInterface(); VkDevice device = gpu->device(); VkMemoryRequirements memReqs; GR_VK_CALL(iface, GetImageMemoryRequirements(device, image, &memReqs)); uint32_t typeIndex = 0; GrVkHeap* heap; if (linearTiling) { VkMemoryPropertyFlags desiredMemProps = VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT | VK_MEMORY_PROPERTY_HOST_CACHED_BIT; if (!get_valid_memory_type_index(gpu->physicalDeviceMemoryProperties(), memReqs.memoryTypeBits, desiredMemProps, &typeIndex)) { // this memory type should always be available SkASSERT_RELEASE(get_valid_memory_type_index(gpu->physicalDeviceMemoryProperties(), memReqs.memoryTypeBits, VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT, &typeIndex)); } heap = gpu->getHeap(GrVkGpu::kLinearImage_Heap); } else { // this memory type should always be available SkASSERT_RELEASE(get_valid_memory_type_index(gpu->physicalDeviceMemoryProperties(), memReqs.memoryTypeBits, VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT, &typeIndex)); if (memReqs.size <= kMaxSmallImageSize) { heap = gpu->getHeap(GrVkGpu::kSmallOptimalImage_Heap); } else { heap = gpu->getHeap(GrVkGpu::kOptimalImage_Heap); } } if (!heap->alloc(memReqs.size, memReqs.alignment, typeIndex, alloc)) { SkDebugf("Failed to alloc image\n"); return false; } // Bind Memory to device VkResult err = GR_VK_CALL(iface, BindImageMemory(device, image, alloc->fMemory, alloc->fOffset)); if (err) { SkASSERT_RELEASE(heap->free(*alloc)); return false; } gTotalImageMemory += alloc->fSize; VkDeviceSize pageAlignedSize = align_size(alloc->fSize, kMinVulkanPageSize); gTotalImageMemoryFullPage += pageAlignedSize; return true; }