Esempio n. 1
0
void* mmap64(void* addr, size_t size, int prot, int flags, int fd, off64_t offset) {
  if (offset < 0 || (offset & ((1UL << MMAP2_SHIFT)-1)) != 0) {
    errno = EINVAL;
    return MAP_FAILED;
  }

  // prevent allocations large enough for `end - start` to overflow
  size_t rounded = BIONIC_ALIGN(size, PAGE_SIZE);
  if (rounded < size || rounded > PTRDIFF_MAX) {
    errno = ENOMEM;
    return MAP_FAILED;
  }

  bool is_private_anonymous = (flags & (MAP_PRIVATE | MAP_ANONYMOUS)) != 0;
  void* result = __mmap2(addr, size, prot, flags, fd, offset >> MMAP2_SHIFT);

  if (result != MAP_FAILED && kernel_has_MADV_MERGEABLE && is_private_anonymous) {
    ErrnoRestorer errno_restorer;
    int rc = madvise(result, size, MADV_MERGEABLE);
    if (rc == -1 && errno == EINVAL) {
      kernel_has_MADV_MERGEABLE = false;
    }
  }

  return result;
}
Esempio n. 2
0
static int __allocate_thread(pthread_attr_t* attr, pthread_internal_t** threadp, void** child_stack) {
  size_t mmap_size;
  uint8_t* stack_top;

  if (attr->stack_base == NULL) {
    // The caller didn't provide a stack, so allocate one.
    // Make sure the stack size and guard size are multiples of PAGE_SIZE.
    mmap_size = BIONIC_ALIGN(attr->stack_size + sizeof(pthread_internal_t), PAGE_SIZE);
    attr->guard_size = BIONIC_ALIGN(attr->guard_size, PAGE_SIZE);
    attr->stack_base = __create_thread_mapped_space(mmap_size, attr->guard_size);
    if (attr->stack_base == NULL) {
      return EAGAIN;
    }
    stack_top = reinterpret_cast<uint8_t*>(attr->stack_base) + mmap_size;
  } else {
    // Remember the mmap size is zero and we don't need to free it.
    mmap_size = 0;
    stack_top = reinterpret_cast<uint8_t*>(attr->stack_base) + attr->stack_size;
  }

  // Mapped space(or user allocated stack) is used for:
  //   pthread_internal_t
  //   thread stack (including guard page)

  // To safely access the pthread_internal_t and thread stack, we need to find a 16-byte aligned boundary.
  stack_top = reinterpret_cast<uint8_t*>(
                (reinterpret_cast<uintptr_t>(stack_top) - sizeof(pthread_internal_t)) & ~0xf);

  pthread_internal_t* thread = reinterpret_cast<pthread_internal_t*>(stack_top);
  if (mmap_size == 0) {
    // If thread was not allocated by mmap(), it may not have been cleared to zero.
    // So assume the worst and zero it.
    memset(thread, 0, sizeof(pthread_internal_t));
  }
  attr->stack_size = stack_top - reinterpret_cast<uint8_t*>(attr->stack_base);

  thread->mmap_size = mmap_size;
  thread->attr = *attr;
  __init_tls(thread);

  *threadp = thread;
  *child_stack = stack_top;
  return 0;
}
void* prop_area::allocate_obj(const size_t size, uint_least32_t* const off) {
  const size_t aligned = BIONIC_ALIGN(size, sizeof(uint_least32_t));
  if (bytes_used_ + aligned > pa_data_size) {
    return nullptr;
  }

  *off = bytes_used_;
  bytes_used_ += aligned;
  return data_ + *off;
}
static void *allocate_obj(const size_t size, uint32_t *const off)
{
    prop_area *pa = __system_property_area__;
    const size_t aligned = BIONIC_ALIGN(size, sizeof(uint32_t));
    if (pa->bytes_used + aligned > pa_data_size) {
        return NULL;
    }

    *off = pa->bytes_used;
    pa->bytes_used += aligned;
    return pa->data + *off;
}
extern "C" void* chk_pvalloc(size_t bytes) {
  if (DebugCallsDisabled()) {
    return g_malloc_dispatch->pvalloc(bytes);
  }

  size_t pagesize = getpagesize();
  size_t size = BIONIC_ALIGN(bytes, pagesize);
  if (size < bytes) { // Overflow
    return NULL;
  }
  return chk_memalign(pagesize, size);
}
Esempio n. 6
0
void* debug_pvalloc(size_t bytes) {
  if (DebugCallsDisabled()) {
    return g_dispatch->pvalloc(bytes);
  }

  size_t pagesize = getpagesize();
  size_t size = BIONIC_ALIGN(bytes, pagesize);
  if (size < bytes) {
    // Overflow
    errno = ENOMEM;
    return nullptr;
  }
  return debug_memalign(pagesize, size);
}
Esempio n. 7
0
void* mremap(void* old_address, size_t old_size, size_t new_size, int flags, ...) {
  // prevent allocations large enough for `end - start` to overflow
  size_t rounded = BIONIC_ALIGN(new_size, PAGE_SIZE);
  if (rounded < new_size || rounded > PTRDIFF_MAX) {
    errno = ENOMEM;
    return MAP_FAILED;
  }

  void* new_address = nullptr;
  // The optional argument is only valid if the MREMAP_FIXED flag is set,
  // so we assume it's not present otherwise.
  if ((flags & MREMAP_FIXED) != 0) {
    va_list ap;
    va_start(ap, flags);
    new_address = va_arg(ap, void*);
    va_end(ap);
  }
Esempio n. 8
0
// This function is designed to be called once. A second call will not
// reset all variables.
bool Config::SetFromProperties() {
  char property_str[PROP_VALUE_MAX];
  memset(property_str, 0, sizeof(property_str));
  if (!__system_property_get("libc.debug.malloc.options", property_str)) {
    return false;
  }

  // Initialize a few default values.
  fill_alloc_value = DEFAULT_FILL_ALLOC_VALUE;
  fill_free_value = DEFAULT_FILL_FREE_VALUE;
  front_guard_value = DEFAULT_FRONT_GUARD_VALUE;
  rear_guard_value = DEFAULT_REAR_GUARD_VALUE;
  backtrace_signal = SIGRTMAX - 19;
  record_allocs_signal = SIGRTMAX - 18;
  free_track_backtrace_num_frames = 0;
  record_allocs_file.clear();

  // Parse the options are of the format:
  //   option_name or option_name=XX

  // Supported options:
  const OptionSizeT option_guard(
      "guard", DEFAULT_GUARD_BYTES, 1, MAX_GUARD_BYTES, 0, nullptr, true);
  // Enable front guard. Value is the size of the guard.
  const OptionSizeT option_front_guard(
      "front_guard", DEFAULT_GUARD_BYTES, 1, MAX_GUARD_BYTES, FRONT_GUARD,
      &this->front_guard_bytes, true);
  // Enable end guard. Value is the size of the guard.
  const OptionSizeT option_rear_guard(
      "rear_guard", DEFAULT_GUARD_BYTES, 1, MAX_GUARD_BYTES, REAR_GUARD, &this->rear_guard_bytes,
      true);

  // Enable logging the backtrace on allocation. Value is the total
  // number of frames to log.
  const OptionSizeT option_backtrace(
      "backtrace", DEFAULT_BACKTRACE_FRAMES, 1, MAX_BACKTRACE_FRAMES, BACKTRACE | TRACK_ALLOCS,
      &this->backtrace_frames, false, &this->backtrace_enabled);
  // Enable gathering backtrace values on a signal.
  const OptionSizeT option_backtrace_enable_on_signal(
      "backtrace_enable_on_signal", DEFAULT_BACKTRACE_FRAMES, 1, MAX_BACKTRACE_FRAMES,
      BACKTRACE | TRACK_ALLOCS, &this->backtrace_frames, false, &this->backtrace_enable_on_signal);

  const OptionSizeT option_fill("fill", SIZE_MAX, 1, SIZE_MAX, 0, nullptr, true);
  // Fill the allocation with an arbitrary pattern on allocation.
  // Value is the number of bytes of the allocation to fill
  // (default entire allocation).
  const OptionSizeT option_fill_on_alloc(
      "fill_on_alloc", SIZE_MAX, 1, SIZE_MAX, FILL_ON_ALLOC, &this->fill_on_alloc_bytes, true);
  // Fill the allocation with an arbitrary pattern on free.
  // Value is the number of bytes of the allocation to fill
  // (default entire allocation).
  const OptionSizeT option_fill_on_free(
      "fill_on_free", SIZE_MAX, 1, SIZE_MAX, FILL_ON_FREE, &this->fill_on_free_bytes, true);

  // Expand the size of every alloc by this number bytes. Value is
  // the total number of bytes to expand every allocation by.
  const OptionSizeT option_expand_alloc(
      "expand_alloc", DEFAULT_EXPAND_BYTES, 1, MAX_EXPAND_BYTES, EXPAND_ALLOC,
      &this->expand_alloc_bytes);

  // Keep track of the freed allocations and verify at a later date
  // that they have not been used. Turning this on, also turns on
  // fill on free.
  const OptionSizeT option_free_track(
      "free_track", DEFAULT_FREE_TRACK_ALLOCATIONS, 1, MAX_FREE_TRACK_ALLOCATIONS,
      FREE_TRACK | FILL_ON_FREE, &this->free_track_allocations);
  // Number of backtrace frames to keep when free_track is enabled. If this
  // value is set to zero, no backtrace will be kept.
  const OptionSizeT option_free_track_backtrace_num_frames(
      "free_track_backtrace_num_frames", DEFAULT_BACKTRACE_FRAMES, 0, MAX_BACKTRACE_FRAMES, 0,
      &this->free_track_backtrace_num_frames);

  // Enable printing leaked allocations.
  const Option option_leak_track("leak_track", LEAK_TRACK | TRACK_ALLOCS);

  const OptionSizeT option_record_allocs(
      "record_allocs", DEFAULT_RECORD_ALLOCS, 1, MAX_RECORD_ALLOCS, RECORD_ALLOCS,
      &this->record_allocs_num_entries);
  const OptionString option_record_allocs_file(
      "record_allocs_file", 0, DEFAULT_RECORD_ALLOCS_FILE, &this->record_allocs_file);

  const Option* option_list[] = {
    &option_guard, &option_front_guard, &option_rear_guard,
    &option_backtrace, &option_backtrace_enable_on_signal,
    &option_fill, &option_fill_on_alloc, &option_fill_on_free,
    &option_expand_alloc,
    &option_free_track, &option_free_track_backtrace_num_frames,
    &option_leak_track,
    &option_record_allocs, &option_record_allocs_file,
  };

  // Set defaults for all of the options.
  for (size_t i = 0; i < sizeof(option_list)/sizeof(Option*); i++) {
    option_list[i]->SetDefault();
  }

  // Process each property name we can find.
  PropertyParser parser(property_str);
  bool valid = true;
  std::string property;
  std::string value;
  while (valid && parser.Get(&property, &value)) {
    bool found = false;
    for (size_t i = 0; i < sizeof(option_list)/sizeof(Option*); i++) {
      if (property == option_list[i]->name) {
        if (option_list[i]->option == 0 && option_list[i]->combo_option) {
          const std::string* option_name = &option_list[i]->name;
          i++;
          for (; i < sizeof(option_list)/sizeof(Option*) && option_list[i]->combo_option; i++) {
            if (!option_list[i]->ParseValue(*option_name, value)) {
              valid = false;
              break;
            }
            if (option_list[i]->config) {
              *option_list[i]->config = true;
            }
            options |= option_list[i]->option;
          }
          if (!valid) {
            break;
          }
        } else {
          if (!option_list[i]->ParseValue(option_list[i]->name, value)) {
            valid = false;
            break;
          }
          if (option_list[i]->config) {
            *option_list[i]->config = true;
          }
          options |= option_list[i]->option;
        }
        found = true;
        break;
      }
    }
    if (valid && !found) {
      error_log("%s: unknown option %s", getprogname(), property.c_str());
      valid = false;
      break;
    }
  }

  valid = valid && parser.Done();

  if (valid) {
    // It's necessary to align the front guard to MINIMUM_ALIGNMENT_BYTES to
    // make sure that the header is aligned properly.
    if (options & FRONT_GUARD) {
      front_guard_bytes = BIONIC_ALIGN(front_guard_bytes, MINIMUM_ALIGNMENT_BYTES);
    }
  } else {
    parser.LogUsage();
  }

  return valid;
}
Esempio n. 9
0
int pthread_create(pthread_t* thread_out, pthread_attr_t const* attr,
                   void* (*start_routine)(void*), void* arg) {
  ErrnoRestorer errno_restorer;

  // Inform the rest of the C library that at least one thread was created.
  __isthreaded = 1;

  pthread_internal_t* thread = reinterpret_cast<pthread_internal_t*>(calloc(sizeof(*thread), 1));
  if (thread == NULL) {
    __libc_format_log(ANDROID_LOG_WARN, "libc", "pthread_create failed: couldn't allocate thread");
    return EAGAIN;
  }

  if (attr == NULL) {
    pthread_attr_init(&thread->attr);
  } else {
    thread->attr = *attr;
    attr = NULL; // Prevent misuse below.
  }

  // Make sure the stack size and guard size are multiples of PAGE_SIZE.
  thread->attr.stack_size = BIONIC_ALIGN(thread->attr.stack_size, PAGE_SIZE);
  thread->attr.guard_size = BIONIC_ALIGN(thread->attr.guard_size, PAGE_SIZE);

  if (thread->attr.stack_base == NULL) {
    // The caller didn't provide a stack, so allocate one.
    thread->attr.stack_base = __create_thread_stack(thread);
    if (thread->attr.stack_base == NULL) {
      free(thread);
      return EAGAIN;
    }
  } else {
    // The caller did provide a stack, so remember we're not supposed to free it.
    thread->attr.flags |= PTHREAD_ATTR_FLAG_USER_ALLOCATED_STACK;
  }

  // Make room for the TLS area.
  // The child stack is the same address, just growing in the opposite direction.
  // At offsets >= 0, we have the TLS slots.
  // At offsets < 0, we have the child stack.
  thread->tls = reinterpret_cast<void**>(reinterpret_cast<uint8_t*>(thread->attr.stack_base) +
                                         thread->attr.stack_size - BIONIC_TLS_SLOTS * sizeof(void*));
  void* child_stack = thread->tls;
  __init_tls(thread);

  // Create a mutex for the thread in TLS to wait on once it starts so we can keep
  // it from doing anything until after we notify the debugger about it
  //
  // This also provides the memory barrier we need to ensure that all
  // memory accesses previously performed by this thread are visible to
  // the new thread.
  pthread_mutex_init(&thread->startup_handshake_mutex, NULL);
  pthread_mutex_lock(&thread->startup_handshake_mutex);

  thread->start_routine = start_routine;
  thread->start_routine_arg = arg;

  thread->set_cached_pid(getpid());

  int flags = CLONE_VM | CLONE_FS | CLONE_FILES | CLONE_SIGHAND | CLONE_THREAD | CLONE_SYSVSEM |
      CLONE_SETTLS | CLONE_PARENT_SETTID | CLONE_CHILD_CLEARTID;
  void* tls = thread->tls;
#if defined(__i386__)
  // On x86 (but not x86-64), CLONE_SETTLS takes a pointer to a struct user_desc rather than
  // a pointer to the TLS itself.
  user_desc tls_descriptor;
  __init_user_desc(&tls_descriptor, false, tls);
  tls = &tls_descriptor;
#endif
  int rc = clone(__pthread_start, child_stack, flags, thread, &(thread->tid), tls, &(thread->tid));
  if (rc == -1) {
    int clone_errno = errno;
    // We don't have to unlock the mutex at all because clone(2) failed so there's no child waiting to
    // be unblocked, but we're about to unmap the memory the mutex is stored in, so this serves as a
    // reminder that you can't rewrite this function to use a ScopedPthreadMutexLocker.
    pthread_mutex_unlock(&thread->startup_handshake_mutex);
    if ((thread->attr.flags & PTHREAD_ATTR_FLAG_USER_ALLOCATED_STACK) == 0) {
      munmap(thread->attr.stack_base, thread->attr.stack_size);
    }
    free(thread);
    __libc_format_log(ANDROID_LOG_WARN, "libc", "pthread_create failed: clone failed: %s", strerror(errno));
    return clone_errno;
  }

  int init_errno = __init_thread(thread, true);
  if (init_errno != 0) {
    // Mark the thread detached and replace its start_routine with a no-op.
    // Letting the thread run is the easiest way to clean up its resources.
    thread->attr.flags |= PTHREAD_ATTR_FLAG_DETACHED;
    thread->start_routine = __do_nothing;
    pthread_mutex_unlock(&thread->startup_handshake_mutex);
    return init_errno;
  }

  // Publish the pthread_t and unlock the mutex to let the new thread start running.
  *thread_out = reinterpret_cast<pthread_t>(thread);
  pthread_mutex_unlock(&thread->startup_handshake_mutex);

  return 0;
}
Esempio n. 10
0
// This function is designed to be called once. A second call will not
// reset all variables.
bool Config::SetFromProperties() {
  char property_str[PROP_VALUE_MAX];
  memset(property_str, 0, sizeof(property_str));
  if (!__system_property_get("libc.debug.malloc.options", property_str)) {
    return false;
  }

  // Initialize a few default values.
  fill_alloc_value = PropertyParser::DEFAULT_FILL_ALLOC_VALUE;
  fill_free_value = PropertyParser::DEFAULT_FILL_FREE_VALUE;
  front_guard_value = PropertyParser::DEFAULT_FRONT_GUARD_VALUE;
  rear_guard_value = PropertyParser::DEFAULT_REAR_GUARD_VALUE;
  backtrace_signal = SIGRTMIN + 10;

  // Parse the options are of the format:
  //   option_name or option_name=XX

  // Supported features:
  const Feature features[] = {
    Feature("guard", 32, 0, nullptr, nullptr, true),
    // Enable front guard. Value is the size of the guard.
    Feature("front_guard", 32, FRONT_GUARD, &this->front_guard_bytes, nullptr, true),
    // Enable end guard. Value is the size of the guard.
    Feature("rear_guard", 32, REAR_GUARD, &this->rear_guard_bytes, nullptr, true),

    // Enable logging the backtrace on allocation. Value is the total
    // number of frames to log.
    Feature("backtrace", 16, BACKTRACE | TRACK_ALLOCS, &this->backtrace_frames,
            &this->backtrace_enabled, false),
    // Enable gathering backtrace values on a signal.
    Feature("backtrace_enable_on_signal", 16, BACKTRACE | TRACK_ALLOCS, &this->backtrace_frames,
            &this->backtrace_enable_on_signal, false),

    Feature("fill", SIZE_MAX, 0, nullptr, nullptr, true),
    // Fill the allocation with an arbitrary pattern on allocation.
    // Value is the number of bytes of the allocation to fill
    // (default entire allocation).
    Feature("fill_on_alloc", SIZE_MAX, FILL_ON_ALLOC, &this->fill_on_alloc_bytes,
            nullptr, true),
    // Fill the allocation with an arbitrary pattern on free.
    // Value is the number of bytes of the allocation to fill
    // (default entire allocation).
    Feature("fill_on_free", SIZE_MAX, FILL_ON_FREE, &this->fill_on_free_bytes, nullptr, true),

    // Expand the size of every alloc by this number bytes. Value is
    // the total number of bytes to expand every allocation by.
    Feature ("expand_alloc", 16, EXPAND_ALLOC, &this->expand_alloc_bytes, nullptr, false),

    // Keep track of the freed allocations and verify at a later date
    // that they have not been used. Turning this on, also turns on
    // fill on free.
    Feature("free_track", 100, FREE_TRACK | FILL_ON_FREE, &this->free_track_allocations,
            nullptr, false),

    // Enable printing leaked allocations.
    Feature("leak_track", 0, LEAK_TRACK | TRACK_ALLOCS, nullptr, nullptr, false),
  };

  // Process each property name we can find.
  std::string property;
  size_t value;
  bool value_set;
  PropertyParser parser(property_str);
  bool found = false;
  bool valid = true;
  while (valid && parser.Get(&property, &value, &value_set)) {
    for (size_t i = 0; i < sizeof(features)/sizeof(Feature); i++) {
      if (property == features[i].name) {
        if (features[i].option == 0 && features[i].combo_option) {
          i++;
          for (; i < sizeof(features)/sizeof(Feature) && features[i].combo_option; i++) {
            if (!SetFeature(features[i], value, value_set)) {
              valid = false;
              break;
            }
            options |= features[i].option;
          }
          if (!valid) {
            break;
          }
        } else {
          if (!SetFeature(features[i], value, value_set)) {
            valid = false;
            break;
          }
          options |= features[i].option;
        }
        found = true;
        break;
      }
    }
    if (valid && !found) {
      error_log("%s: unknown option %s", getprogname(), property.c_str());
      valid = false;
      break;
    }
  }

  valid = valid && parser.Done();

  if (valid) {
    // It's necessary to align the front guard to sizeof(uintptr_t) to
    // make sure that the header is aligned properly.
    if (options & FRONT_GUARD) {
      front_guard_bytes = BIONIC_ALIGN(front_guard_bytes, sizeof(uintptr_t));
    }

    // This situation can occur if the free_track option is specified and
    // the fill_on_free option is not. In this case, indicate the whole
    // allocation should be filled.
    if ((options & FILL_ON_FREE) && fill_on_free_bytes == 0) {
      fill_on_free_bytes = SIZE_MAX;
    }
  } else {
    parser.LogUsage();
  }

  return valid;
}