// This routine will word-align ShadowStart and ShadowEnd prior to scanning. static u32 countAndClearShadowValues(u32 BitIdx, uptr ShadowStart, uptr ShadowEnd) { u32 WorkingSetSize = 0; u32 ByteValue = 0x1 << BitIdx; u32 WordValue = ByteValue | ByteValue << 8 | ByteValue << 16 | ByteValue << 24; // Get word aligned start. ShadowStart = RoundDownTo(ShadowStart, sizeof(u32)); bool Accum = getFlags()->record_snapshots && BitIdx < MaxAccumBitIdx; for (u32 *Ptr = (u32 *)ShadowStart; Ptr < (u32 *)ShadowEnd; ++Ptr) { if ((*Ptr & WordValue) != 0) { byte *BytePtr = (byte *)Ptr; for (u32 j = 0; j < sizeof(u32); ++j) { if (BytePtr[j] & ByteValue) { ++WorkingSetSize; if (Accum) { // Accumulate to the lower-frequency bit to the left. BytePtr[j] |= (ByteValue << 1); } } } // Clear this bit from every shadow byte. *Ptr &= ~WordValue; } } return WorkingSetSize; }
static void HwasanDeallocate(StackTrace *stack, void *tagged_ptr) { CHECK(tagged_ptr); HWASAN_FREE_HOOK(tagged_ptr); if (!PointerAndMemoryTagsMatch(tagged_ptr)) ReportInvalidFree(stack, reinterpret_cast<uptr>(tagged_ptr)); void *untagged_ptr = UntagPtr(tagged_ptr); void *aligned_ptr = reinterpret_cast<void *>( RoundDownTo(reinterpret_cast<uptr>(untagged_ptr), kShadowAlignment)); Metadata *meta = reinterpret_cast<Metadata *>(allocator.GetMetaData(aligned_ptr)); uptr orig_size = meta->requested_size; u32 free_context_id = StackDepotPut(*stack); u32 alloc_context_id = meta->alloc_context_id; // Check tail magic. uptr tagged_size = TaggedSize(orig_size); if (flags()->free_checks_tail_magic && !right_align_mode && orig_size) { uptr tail_size = tagged_size - orig_size; CHECK_LT(tail_size, kShadowAlignment); void *tail_beg = reinterpret_cast<void *>( reinterpret_cast<uptr>(aligned_ptr) + orig_size); if (tail_size && internal_memcmp(tail_beg, tail_magic, tail_size)) ReportTailOverwritten(stack, reinterpret_cast<uptr>(tagged_ptr), orig_size, tail_size, tail_magic); } meta->requested_size = 0; meta->alloc_context_id = 0; // This memory will not be reused by anyone else, so we are free to keep it // poisoned. Thread *t = GetCurrentThread(); if (flags()->max_free_fill_size > 0) { uptr fill_size = Min(TaggedSize(orig_size), (uptr)flags()->max_free_fill_size); internal_memset(aligned_ptr, flags()->free_fill_byte, fill_size); } if (flags()->tag_in_free && malloc_bisect(stack, 0) && atomic_load_relaxed(&hwasan_allocator_tagging_enabled)) TagMemoryAligned(reinterpret_cast<uptr>(aligned_ptr), TaggedSize(orig_size), t ? t->GenerateRandomTag() : kFallbackFreeTag); if (t) { allocator.Deallocate(t->allocator_cache(), aligned_ptr); if (auto *ha = t->heap_allocations()) ha->push({reinterpret_cast<uptr>(tagged_ptr), alloc_context_id, free_context_id, static_cast<u32>(orig_size)}); } else { SpinMutexLock l(&fallback_mutex); AllocatorCache *cache = &fallback_allocator_cache; allocator.Deallocate(cache, aligned_ptr); } }
static uptr AllocationSize(const void *tagged_ptr) { const void *untagged_ptr = UntagPtr(tagged_ptr); if (!untagged_ptr) return 0; const void *beg = allocator.GetBlockBegin(untagged_ptr); Metadata *b = (Metadata *)allocator.GetMetaData(untagged_ptr); if (b->right_aligned) { if (beg != reinterpret_cast<void *>(RoundDownTo( reinterpret_cast<uptr>(untagged_ptr), kShadowAlignment))) return 0; } else { if (beg != untagged_ptr) return 0; } return b->requested_size; }
// If this is a shadow fault, we handle it here; otherwise, we pass it to the // app to handle it just as the app would do without our tool in place. static void handleMemoryFault(int SigNum, __sanitizer_siginfo *Info, void *Ctx) { if (SigNum == SIGSEGV) { // We rely on si_addr being filled in (thus we do not support old kernels). siginfo_t *SigInfo = (siginfo_t *)Info; uptr Addr = (uptr)SigInfo->si_addr; if (isShadowMem(Addr)) { VPrintf(3, "Shadow fault @%p\n", Addr); uptr PageSize = GetPageSizeCached(); int Res = internal_mprotect((void *)RoundDownTo(Addr, PageSize), PageSize, PROT_READ|PROT_WRITE); CHECK(Res == 0); } else if (AppSigAct.sigaction) { // FIXME: For simplicity we ignore app options including its signal stack // (we just use ours) and all the delivery flags. AppSigAct.sigaction(SigNum, Info, Ctx); } else { // Crash instead of spinning with infinite faults. reinstateDefaultHandler(SigNum); } } else UNREACHABLE("signal not registered"); }