Beispiel #1
0
const void* ReadOnlyArena::allocate(const void* data, size_t dataLen) {
  always_assert(dataLen <= m_chunkSize);
  guard g(m_mutex);

  // Round up to the minimal alignment.
  auto alignedLen =
    (dataLen + (kMinimalAlignment - 1)) & ~(kMinimalAlignment - 1);

  if (m_frontier + alignedLen > m_end) {
    grow();
  }
  always_assert(m_frontier + alignedLen <= m_end);

  auto const ret = m_frontier;
  assert((uintptr_t(ret) & (kMinimalAlignment - 1)) == 0);

  m_frontier += alignedLen;

  auto pageAddr = reinterpret_cast<unsigned char*>(
    uintptr_t(ret) & ~(s_pageSize - 1)
  );
  checked_mprotect(pageAddr, m_frontier - pageAddr, PROT_WRITE|PROT_READ);
  auto const ucData = static_cast<const unsigned char*>(data);
  std::copy(ucData, ucData + dataLen, ret);
  checked_mprotect(pageAddr, m_frontier - pageAddr, PROT_READ);

  return ret;
}
Beispiel #2
0
void coro_spawn(Worker * me, Worker * c, coro_func f, size_t ssize) {
  CHECK(c != NULL) << "Must provide a valid Worker";
  c->running = 0;
  c->suspended = 0;
  c->idle = 0;

  // allocate stack and guard page
  c->base = Grappa::impl::locale_shared_memory.allocate_aligned( ssize+4096*2, 4096 );
  CHECK_NOTNULL( c->base );
  c->ssize = ssize;

  // set stack pointer
  c->stack = (char*) c->base + ssize + 4096 - current_stack_offset;

  // try to make sure we don't stuff all our stacks at the same cache index
  const int num_offsets = 128;
  const int cache_line_size = 64;
  current_stack_offset += FLAGS_stack_offset;
  current_stack_offset &= ((cache_line_size * num_offsets) - 1);
  
  c->tracking_prev = NULL;
  c->tracking_next = NULL;

#ifdef ENABLE_VALGRIND
  c->valgrind_stack_id = VALGRIND_STACK_REGISTER( (char *) c->base + 4096, c->stack );
#endif

  // clear stack
  memset(c->base, 0, ssize);

#ifdef GUARD_PAGES_ON_STACK
  // arm guard page
  checked_mprotect( c->base, 4096, PROT_NONE );
  checked_mprotect( (char*)c->base + ssize + 4096, 4096, PROT_NONE );
#endif

  // set up coroutine to be able to run next time we're switched in
  makestack(&me->stack, &c->stack, f, c);
  
  insert_coro( c ); // insert into debugging list of coros

#ifdef CORO_PROTECT_UNUSED_STACK
  // disable writes to stack until we're swtiched in again.
  checked_mprotect( (void*)((intptr_t)c->base + 4096), ssize, PROT_READ );
  checked_mprotect( (void*)(c), 4096, PROT_READ );
#endif

  total_coros++;
}
Beispiel #3
0
void destroy_coro(Worker * c) {
  total_coros--;
#ifdef ENABLE_VALGRIND
  if( c->valgrind_stack_id != -1 ) {
    VALGRIND_STACK_DEREGISTER( c->valgrind_stack_id );
  }
#endif
  if( c->base != NULL ) {
    // disarm guard page
    checked_mprotect( c->base, 4096, PROT_READ | PROT_WRITE );
    checked_mprotect( (char*)c->base + c->ssize + 4096, 4096, PROT_READ | PROT_WRITE );
#ifdef CORO_PROTECT_UNUSED_STACK
    // enable writes to stack so we can deallocate
    checked_mprotect( (void*)((intptr_t)c->base + 4096), c->ssize, PROT_READ | PROT_WRITE );
    checked_mprotect( (void*)(c), 4096, PROT_READ | PROT_WRITE );
#endif
    remove_coro(c); // remove from debugging list of coros
    Grappa::impl::locale_shared_memory.deallocate(c->base);
  }
}
Beispiel #4
0
// Pre: mutex already held, or no other threads may be able to access
// this (i.e. it's the ctor).
void ReadOnlyArena::grow() {
  void* vp;
  if (auto err = posix_memalign(&vp, s_pageSize, m_chunkSize)) {
    folly::throwSystemError(err, "failed to posix_memalign in "
      "ReadOnlyArena");
  }
  checked_mprotect(vp, m_chunkSize, PROT_READ);

  auto uc = static_cast<unsigned char*>(vp);
  m_chunks.push_back(uc);
  m_frontier = uc;
  m_end = uc + m_chunkSize;
}
Beispiel #5
0
ReadOnlyArena::~ReadOnlyArena() {
  for (auto& chunk : m_chunks) {
    checked_mprotect(chunk, m_chunkSize, PROT_READ|PROT_WRITE);
    std::free(chunk);
  }
}