void registerMemoryFaultHandler() { // We do not use an alternate signal stack, as doing so would require // setting it up for each app thread. // FIXME: This could result in problems with emulating the app's signal // handling if the app relies on an alternate stack for SIGSEGV. // We require that SIGSEGV is not blocked. We use a sigprocmask // interceptor to ensure that in the future. Here we ensure it for // the current thread. We assume there are no other threads at this // point during initialization, or that at least they do not block // SIGSEGV. __sanitizer_sigset_t SigSet; internal_sigemptyset(&SigSet); internal_sigprocmask(SIG_BLOCK, &SigSet, nullptr); __sanitizer_sigaction SigAct; internal_memset(&SigAct, 0, sizeof(SigAct)); SigAct.sigaction = handleMemoryFault; // We want to handle nested signals b/c we need to handle a // shadow fault in an app signal handler. SigAct.sa_flags = SA_SIGINFO | SA_NODEFER; int Res = internal_sigaction(SIGSEGV, &SigAct, &AppSigAct); CHECK(Res == 0); VPrintf(1, "Registered for SIGSEGV handler\n"); }
static void reinstateDefaultHandler(int SigNum) { __sanitizer_sigaction SigAct; internal_memset(&SigAct, 0, sizeof(SigAct)); SigAct.sigaction = (decltype(SigAct.sigaction))SIG_DFL; int Res = internal_sigaction(SigNum, &SigAct, nullptr); CHECK(Res == 0); VPrintf(1, "Unregistered for %d handler\n", SigNum); }
static void HwasanDeallocate(StackTrace *stack, void *tagged_ptr) { CHECK(tagged_ptr); HWASAN_FREE_HOOK(tagged_ptr); if (!PointerAndMemoryTagsMatch(tagged_ptr)) ReportInvalidFree(stack, reinterpret_cast<uptr>(tagged_ptr)); void *untagged_ptr = UntagPtr(tagged_ptr); void *aligned_ptr = reinterpret_cast<void *>( RoundDownTo(reinterpret_cast<uptr>(untagged_ptr), kShadowAlignment)); Metadata *meta = reinterpret_cast<Metadata *>(allocator.GetMetaData(aligned_ptr)); uptr orig_size = meta->requested_size; u32 free_context_id = StackDepotPut(*stack); u32 alloc_context_id = meta->alloc_context_id; // Check tail magic. uptr tagged_size = TaggedSize(orig_size); if (flags()->free_checks_tail_magic && !right_align_mode && orig_size) { uptr tail_size = tagged_size - orig_size; CHECK_LT(tail_size, kShadowAlignment); void *tail_beg = reinterpret_cast<void *>( reinterpret_cast<uptr>(aligned_ptr) + orig_size); if (tail_size && internal_memcmp(tail_beg, tail_magic, tail_size)) ReportTailOverwritten(stack, reinterpret_cast<uptr>(tagged_ptr), orig_size, tail_size, tail_magic); } meta->requested_size = 0; meta->alloc_context_id = 0; // This memory will not be reused by anyone else, so we are free to keep it // poisoned. Thread *t = GetCurrentThread(); if (flags()->max_free_fill_size > 0) { uptr fill_size = Min(TaggedSize(orig_size), (uptr)flags()->max_free_fill_size); internal_memset(aligned_ptr, flags()->free_fill_byte, fill_size); } if (flags()->tag_in_free && malloc_bisect(stack, 0) && atomic_load_relaxed(&hwasan_allocator_tagging_enabled)) TagMemoryAligned(reinterpret_cast<uptr>(aligned_ptr), TaggedSize(orig_size), t ? t->GenerateRandomTag() : kFallbackFreeTag); if (t) { allocator.Deallocate(t->allocator_cache(), aligned_ptr); if (auto *ha = t->heap_allocations()) ha->push({reinterpret_cast<uptr>(tagged_ptr), alloc_context_id, free_context_id, static_cast<u32>(orig_size)}); } else { SpinMutexLock l(&fallback_mutex); AllocatorCache *cache = &fallback_allocator_cache; allocator.Deallocate(cache, aligned_ptr); } }
static void *HwasanAllocate(StackTrace *stack, uptr orig_size, uptr alignment, bool zeroise) { if (orig_size > kMaxAllowedMallocSize) { if (AllocatorMayReturnNull()) { Report("WARNING: HWAddressSanitizer failed to allocate 0x%zx bytes\n", orig_size); return nullptr; } ReportAllocationSizeTooBig(orig_size, kMaxAllowedMallocSize, stack); } alignment = Max(alignment, kShadowAlignment); uptr size = TaggedSize(orig_size); Thread *t = GetCurrentThread(); void *allocated; if (t) { allocated = allocator.Allocate(t->allocator_cache(), size, alignment); } else { SpinMutexLock l(&fallback_mutex); AllocatorCache *cache = &fallback_allocator_cache; allocated = allocator.Allocate(cache, size, alignment); } if (UNLIKELY(!allocated)) { SetAllocatorOutOfMemory(); if (AllocatorMayReturnNull()) return nullptr; ReportOutOfMemory(size, stack); } Metadata *meta = reinterpret_cast<Metadata *>(allocator.GetMetaData(allocated)); meta->requested_size = static_cast<u32>(orig_size); meta->alloc_context_id = StackDepotPut(*stack); meta->right_aligned = false; if (zeroise) { internal_memset(allocated, 0, size); } else if (flags()->max_malloc_fill_size > 0) { uptr fill_size = Min(size, (uptr)flags()->max_malloc_fill_size); internal_memset(allocated, flags()->malloc_fill_byte, fill_size); } if (!right_align_mode) internal_memcpy(reinterpret_cast<u8 *>(allocated) + orig_size, tail_magic, size - orig_size); void *user_ptr = allocated; // Tagging can only be skipped when both tag_in_malloc and tag_in_free are // false. When tag_in_malloc = false and tag_in_free = true malloc needs to // retag to 0. if ((flags()->tag_in_malloc || flags()->tag_in_free) && atomic_load_relaxed(&hwasan_allocator_tagging_enabled)) { tag_t tag = flags()->tag_in_malloc && malloc_bisect(stack, orig_size) ? (t ? t->GenerateRandomTag() : kFallbackAllocTag) : 0; user_ptr = (void *)TagMemoryAligned((uptr)user_ptr, size, tag); } if ((orig_size % kShadowAlignment) && (alignment <= kShadowAlignment) && right_align_mode) { uptr as_uptr = reinterpret_cast<uptr>(user_ptr); if (right_align_mode == kRightAlignAlways || GetTagFromPointer(as_uptr) & 1) { // use a tag bit as a random bit. user_ptr = reinterpret_cast<void *>(AlignRight(as_uptr, orig_size)); meta->right_aligned = 1; } } HWASAN_MALLOC_HOOK(user_ptr, size); return user_ptr; }