extern "C" void __libc_init_main_thread_final() {
  bionic_tcb* temp_tcb = __get_bionic_tcb();
  bionic_tls* temp_tls = &__get_bionic_tls();

  // Allocate the main thread's static TLS. (This mapping doesn't include a
  // stack.)
  ThreadMapping mapping = __allocate_thread_mapping(0, PTHREAD_GUARD_SIZE);
  if (mapping.mmap_base == nullptr) {
    async_safe_fatal("failed to mmap main thread static TLS: %s", strerror(errno));
  }

  const StaticTlsLayout& layout = __libc_shared_globals()->static_tls_layout;
  auto new_tcb = reinterpret_cast<bionic_tcb*>(mapping.static_tls + layout.offset_bionic_tcb());
  auto new_tls = reinterpret_cast<bionic_tls*>(mapping.static_tls + layout.offset_bionic_tls());

  __init_static_tls(mapping.static_tls);
  new_tcb->copy_from_bootstrap(temp_tcb);
  new_tls->copy_from_bootstrap(temp_tls);
  __init_tcb(new_tcb, &main_thread);
  __init_bionic_tls_ptrs(new_tcb, new_tls);

  main_thread.mmap_base = mapping.mmap_base;
  main_thread.mmap_size = mapping.mmap_size;

  __set_tls(&new_tcb->tls_slot(0));

  __free_temp_bionic_tls(temp_tls);
}
Esempio n. 2
0
static int threading_init() {
  static thread_t dummy_t = {
    .id = 0,
    .prev = NULL, .next = NULL,
    .scheduler_next = NULL,
    .semaphore_next = NULL,
    .stack = 0,
    .request_kill = 0,
    .state = 0,
    .priority = 0,
    .auto_free = 0
  };

  int r = slab_cache_create(&thread_cache, &kernel_vmspace, sizeof(thread_t), (void*)&dummy_t);
  assert(r == 0 && "slab_cache_create failed!");

  thread_t *t = (thread_t*)slab_cache_alloc(&thread_cache);
  t->stack = (uintptr_t)__builtin_frame_address(0) & ~(THREAD_STACK_SZ-1);

  *tls_slot(TLS_SLOT_TCB, t->stack) = (uintptr_t)t;
  *tls_slot(TLS_SLOT_CANARY, t->stack) = CANARY_VAL;

  assert(*tls_slot(TLS_SLOT_TCB, t->stack) == (uintptr_t)t);

  assert(*tls_slot(TLS_SLOT_CANARY, t->stack) == CANARY_VAL);

  thread_list_head = t;

  register_debugger_handler("threads", "List all thread states", &inspect_threads);

  return 0;
}

static prereq_t p[] = { {"kmalloc",NULL}, {"scheduler",NULL}, {NULL,NULL} };
static module_t x run_on_startup = {
  .name = "threading",
  .required = p,
  .load_after = NULL,
  .init = &threading_init,
  .fini = NULL
};
Esempio n. 3
0
void thread_yield() {
  thread_t *t = thread_current();
  assert(*tls_slot(TLS_SLOT_CANARY, t->stack) == CANARY_VAL);

  if (setjmp(t->jmpbuf) == 0) {
    if (t->request_kill)
      t->state = THREAD_DEAD;
    else
      scheduler_ready(t);
    yield();
  }
}
Esempio n. 4
0
thread_t *thread_spawn(void (*fn)(void*), void *p, uint8_t auto_free) {
  thread_t *t = (thread_t*)slab_cache_alloc(&thread_cache);

  memset(t, 0, sizeof(thread_t));

  t->auto_free = auto_free;
  t->stack = alloc_stack_and_tls();
 
  spinlock_acquire(&thread_list_lock);
  t->next = thread_list_head;
  t->next->prev = t;
  thread_list_head = t;
  spinlock_release(&thread_list_lock);
 
  /* TLS slot zero always contains the thread object. */
  *tls_slot(TLS_SLOT_TCB, t->stack) = (uintptr_t)t;

  /* Store the function and argument temporarily in TLS */
  *tls_slot(1, t->stack) = (uintptr_t)fn;
  *tls_slot(2, t->stack) = (uintptr_t)p;

  /* In the last valid TLS slot, store a canary. */
  *tls_slot(TLS_SLOT_CANARY, t->stack) = CANARY_VAL;

  if (setjmp(t->jmpbuf) == 0) {
    jmp_buf_set_stack(t->jmpbuf, t->stack + THREAD_STACK_SZ);

    scheduler_ready(t);

    return t;
  } else {
    /* Tail call to trampoline which is defined as noinline, to force the creation
       of a new stack frame as the previous stack frame is now invalid! */
    trampoline();
  }
}
Esempio n. 5
0
uintptr_t *thread_tls_slot(unsigned idx) {
  /* __builtin_frame_address is a platform-agnostic way to get a pointer on to
     the stack. */
  return tls_slot(idx, (uintptr_t)__builtin_frame_address(0));
}