bool processWorkingSetSigaction(int SigNum, const void *ActVoid,
                                void *OldActVoid) {
  VPrintf(2, "%s: %d\n", __FUNCTION__, SigNum);
  if (SigNum == SIGSEGV) {
    const struct sigaction *Act = (const struct sigaction *) ActVoid;
    struct sigaction *OldAct = (struct sigaction *) OldActVoid;
    if (OldAct)
      internal_memcpy(OldAct, &AppSigAct, sizeof(OldAct));
    if (Act)
      internal_memcpy(&AppSigAct, Act, sizeof(AppSigAct));
    return false; // Skip real call.
  }
  return true;
}
static void *HwasanReallocate(StackTrace *stack, void *tagged_ptr_old,
                              uptr new_size, uptr alignment) {
  if (!PointerAndMemoryTagsMatch(tagged_ptr_old))
    ReportInvalidFree(stack, reinterpret_cast<uptr>(tagged_ptr_old));

  void *tagged_ptr_new =
      HwasanAllocate(stack, new_size, alignment, false /*zeroise*/);
  if (tagged_ptr_old && tagged_ptr_new) {
    void *untagged_ptr_old =  UntagPtr(tagged_ptr_old);
    Metadata *meta =
        reinterpret_cast<Metadata *>(allocator.GetMetaData(untagged_ptr_old));
    internal_memcpy(UntagPtr(tagged_ptr_new), untagged_ptr_old,
                    Min(new_size, static_cast<uptr>(meta->requested_size)));
    HwasanDeallocate(stack, tagged_ptr_old);
  }
  return tagged_ptr_new;
}
static void *HwasanAllocate(StackTrace *stack, uptr orig_size, uptr alignment,
                            bool zeroise) {
  if (orig_size > kMaxAllowedMallocSize) {
    if (AllocatorMayReturnNull()) {
      Report("WARNING: HWAddressSanitizer failed to allocate 0x%zx bytes\n",
             orig_size);
      return nullptr;
    }
    ReportAllocationSizeTooBig(orig_size, kMaxAllowedMallocSize, stack);
  }

  alignment = Max(alignment, kShadowAlignment);
  uptr size = TaggedSize(orig_size);
  Thread *t = GetCurrentThread();
  void *allocated;
  if (t) {
    allocated = allocator.Allocate(t->allocator_cache(), size, alignment);
  } else {
    SpinMutexLock l(&fallback_mutex);
    AllocatorCache *cache = &fallback_allocator_cache;
    allocated = allocator.Allocate(cache, size, alignment);
  }
  if (UNLIKELY(!allocated)) {
    SetAllocatorOutOfMemory();
    if (AllocatorMayReturnNull())
      return nullptr;
    ReportOutOfMemory(size, stack);
  }
  Metadata *meta =
      reinterpret_cast<Metadata *>(allocator.GetMetaData(allocated));
  meta->requested_size = static_cast<u32>(orig_size);
  meta->alloc_context_id = StackDepotPut(*stack);
  meta->right_aligned = false;
  if (zeroise) {
    internal_memset(allocated, 0, size);
  } else if (flags()->max_malloc_fill_size > 0) {
    uptr fill_size = Min(size, (uptr)flags()->max_malloc_fill_size);
    internal_memset(allocated, flags()->malloc_fill_byte, fill_size);
  }
  if (!right_align_mode)
    internal_memcpy(reinterpret_cast<u8 *>(allocated) + orig_size, tail_magic,
                    size - orig_size);

  void *user_ptr = allocated;
  // Tagging can only be skipped when both tag_in_malloc and tag_in_free are
  // false. When tag_in_malloc = false and tag_in_free = true malloc needs to
  // retag to 0.
  if ((flags()->tag_in_malloc || flags()->tag_in_free) &&
      atomic_load_relaxed(&hwasan_allocator_tagging_enabled)) {
    tag_t tag = flags()->tag_in_malloc && malloc_bisect(stack, orig_size)
                    ? (t ? t->GenerateRandomTag() : kFallbackAllocTag)
                    : 0;
    user_ptr = (void *)TagMemoryAligned((uptr)user_ptr, size, tag);
  }

  if ((orig_size % kShadowAlignment) && (alignment <= kShadowAlignment) &&
      right_align_mode) {
    uptr as_uptr = reinterpret_cast<uptr>(user_ptr);
    if (right_align_mode == kRightAlignAlways ||
        GetTagFromPointer(as_uptr) & 1) {  // use a tag bit as a random bit.
      user_ptr = reinterpret_cast<void *>(AlignRight(as_uptr, orig_size));
      meta->right_aligned = 1;
    }
  }

  HWASAN_MALLOC_HOOK(user_ptr, size);
  return user_ptr;
}