bool debug_initialize(const MallocDispatch* malloc_dispatch, int* malloc_zygote_child) {
  if (malloc_zygote_child == nullptr) {
    return false;
  }

  InitAtfork();

  g_malloc_zygote_child = malloc_zygote_child;

  g_dispatch = malloc_dispatch;

  if (!DebugDisableInitialize()) {
    return false;
  }

  DebugData* debug = new DebugData();
  if (!debug->Initialize()) {
    delete debug;
    DebugDisableFinalize();
    return false;
  }
  g_debug = debug;

  // Always enable the backtrace code since we will use it in a number
  // of different error cases.
  backtrace_startup();

  return true;
}
void FreeTrackData::VerifyAndFree(DebugData& debug, const Header* header,
                                  const void* pointer) {
  ScopedDisableDebugCalls disable;

  if (header->tag != DEBUG_FREE_TAG) {
    error_log(LOG_DIVIDER);
    error_log("+++ ALLOCATION %p HAS CORRUPTED HEADER TAG 0x%x AFTER FREE", pointer, header->tag);
    error_log(LOG_DIVIDER);
  } else {
    const uint8_t* memory = reinterpret_cast<const uint8_t*>(pointer);
    size_t bytes = header->usable_size;
    bytes = (bytes < debug.config().fill_on_free_bytes) ? bytes : debug.config().fill_on_free_bytes;
    while (bytes > 0) {
      size_t bytes_to_cmp = (bytes < cmp_mem_.size()) ? bytes : cmp_mem_.size();
      if (memcmp(memory, cmp_mem_.data(), bytes_to_cmp) != 0) {
        LogFreeError(debug, header, reinterpret_cast<const uint8_t*>(pointer));
        break;
      }
      bytes -= bytes_to_cmp;
      memory = &memory[bytes_to_cmp];
    }
  }

  auto back_iter = backtraces_.find(header);
  if (back_iter != backtraces_.end()) {
    g_dispatch->free(reinterpret_cast<void*>(back_iter->second));
    backtraces_.erase(header);
  }
  g_dispatch->free(header->orig_pointer);
}
void FreeTrackData::VerifyAll(DebugData& debug) {
  // Make sure the stl calls below don't call the debug_XXX functions.
  ScopedDisableDebugCalls disable;

  for (const auto& header : list_) {
    VerifyAndFree(debug, header, debug.GetPointer(header));
  }
  list_.clear();
}
void FreeTrackData::Add(DebugData& debug, const Header* header) {
  // Make sure the stl calls below don't call the debug_XXX functions.
  ScopedDisableDebugCalls disable;

  pthread_mutex_lock(&mutex_);
  if (list_.size() == debug.config().free_track_allocations) {
    const Header* old_header = list_.back();
    VerifyAndFree(debug, old_header, debug.GetPointer(old_header));
    list_.pop_back();
  }

  if (backtrace_num_frames_ > 0) {
    BacktraceHeader* back_header = reinterpret_cast<BacktraceHeader*>(
      g_dispatch->malloc(sizeof(BacktraceHeader) + backtrace_num_frames_ * sizeof(uintptr_t)));
    if (back_header) {
      back_header->num_frames = backtrace_get(&back_header->frames[0], backtrace_num_frames_);
      backtraces_[header] = back_header;
    }
  }
  list_.push_front(header);

  pthread_mutex_unlock(&mutex_);
}
void FreeTrackData::LogFreeError(DebugData& debug, const Header* header,
                                 const uint8_t* pointer) {
  ScopedDisableDebugCalls disable;

  error_log(LOG_DIVIDER);
  error_log("+++ ALLOCATION %p USED AFTER FREE", pointer);
  uint8_t fill_free_value = debug.config().fill_free_value;
  for (size_t i = 0; i < header->usable_size; i++) {
    if (pointer[i] != fill_free_value) {
      error_log("  pointer[%zu] = 0x%02x (expected 0x%02x)", i, pointer[i], fill_free_value);
    }
  }
  auto back_iter = backtraces_.find(header);
  if (back_iter != backtraces_.end()) {
    const BacktraceHeader* back_header = back_iter->second;
    error_log("Backtrace at time of free:");
    backtrace_log(&back_header->frames[0], back_header->num_frames);
  }
  error_log(LOG_DIVIDER);
}
void RearGuardData::LogFailure(DebugData& debug, const Header* header) {
  GuardData::LogFailure(header, debug.GetPointer(header), debug.GetRearGuard(header));
}
bool RearGuardData::Valid(DebugData& debug, const Header* header) {
  return GuardData::Valid(debug.GetRearGuard(header));
}
bool FrontGuardData::Valid(DebugData& debug, const Header* header) {
  return GuardData::Valid(debug.GetFrontGuard(header));
}