Exemple #1
0
ACPI_THREAD_ID AcpiOsGetThreadId(void) {
	PRINTD("AcpiOsGetThreadId() called");

	return thread_self()->id;
}
Exemple #2
0
void* pmalloc(size_t size)
{
    thread_t* thread = thread_self();
    return numa_alloc_onnode(size, thread->virtual_node->nvram_node->node_id);
}
Exemple #3
0
int sem_timedwait(sem_t *sem, const struct timespec *abstime)
{
  pthread_descr self = thread_self();
  pthread_extricate_if extr;
  int already_canceled = 0;
  int spurious_wakeup_count;

  __pthread_lock(&sem->__sem_lock, self);
  if (sem->__sem_value > 0) {
    --sem->__sem_value;
    __pthread_unlock(&sem->__sem_lock);
    return 0;
  }

  if (abstime->tv_nsec < 0 || abstime->tv_nsec >= 1000000000) {
    /* The standard requires that if the function would block and the
       time value is illegal, the function returns with an error.  */
    __pthread_unlock(&sem->__sem_lock);
    __set_errno (EINVAL);
    return -1;
  }

  /* Set up extrication interface */
  extr.pu_object = sem;
  extr.pu_extricate_func = new_sem_extricate_func;

  /* Register extrication interface */
  THREAD_SETMEM(self, p_sem_avail, 0);
  __pthread_set_own_extricate_if(self, &extr);
  /* Enqueue only if not already cancelled. */
  if (!(THREAD_GETMEM(self, p_canceled)
      && THREAD_GETMEM(self, p_cancelstate) == PTHREAD_CANCEL_ENABLE))
    enqueue(&sem->__sem_waiting, self);
  else
    already_canceled = 1;
  __pthread_unlock(&sem->__sem_lock);

  if (already_canceled) {
    __pthread_set_own_extricate_if(self, 0);
    __pthread_do_exit(PTHREAD_CANCELED, CURRENT_STACK_FRAME);
  }

  spurious_wakeup_count = 0;
  while (1)
    {
      if (timedsuspend(self, abstime) == 0) {
	int was_on_queue;

	/* __pthread_lock will queue back any spurious restarts that
	   may happen to it. */

	__pthread_lock(&sem->__sem_lock, self);
	was_on_queue = remove_from_queue(&sem->__sem_waiting, self);
	__pthread_unlock(&sem->__sem_lock);

	if (was_on_queue) {
	  __pthread_set_own_extricate_if(self, 0);
	  __set_errno (ETIMEDOUT);
	  return -1;
	}

	/* Eat the outstanding restart() from the signaller */
	suspend(self);
      }

      if (THREAD_GETMEM(self, p_sem_avail) == 0
	  && (THREAD_GETMEM(self, p_woken_by_cancel) == 0
	      || THREAD_GETMEM(self, p_cancelstate) != PTHREAD_CANCEL_ENABLE))
	{
	  /* Count resumes that don't belong to us. */
	  spurious_wakeup_count++;
	  continue;
	}
      break;
    }

 __pthread_set_own_extricate_if(self, 0);

  /* Terminate only if the wakeup came from cancellation. */
  /* Otherwise ignore cancellation because we got the semaphore. */

  if (THREAD_GETMEM(self, p_woken_by_cancel)
      && THREAD_GETMEM(self, p_cancelstate) == PTHREAD_CANCEL_ENABLE) {
    THREAD_SETMEM(self, p_woken_by_cancel, 0);
    __pthread_do_exit(PTHREAD_CANCELED, CURRENT_STACK_FRAME);
  }
  /* We got the semaphore */
  return 0;
}
Exemple #4
0
int
__pthread_rwlock_unlock (pthread_rwlock_t *rwlock)
{
  pthread_descr torestart;
  pthread_descr th;

  __pthread_lock (&rwlock->__rw_lock, NULL);
  if (rwlock->__rw_writer != NULL)
    {
      /* Unlocking a write lock.  */
      if (rwlock->__rw_writer != thread_self ())
	{
	  __pthread_unlock (&rwlock->__rw_lock);
	  return EPERM;
	}
      rwlock->__rw_writer = NULL;

      if ((rwlock->__rw_kind == PTHREAD_RWLOCK_PREFER_READER_NP
	   && !queue_is_empty(&rwlock->__rw_read_waiting))
	  || (th = dequeue(&rwlock->__rw_write_waiting)) == NULL)
	{
	  /* Restart all waiting readers.  */
	  torestart = rwlock->__rw_read_waiting;
	  rwlock->__rw_read_waiting = NULL;
	  __pthread_unlock (&rwlock->__rw_lock);
	  while ((th = dequeue (&torestart)) != NULL)
	    restart (th);
	}
      else
	{
	  /* Restart one waiting writer.  */
	  __pthread_unlock (&rwlock->__rw_lock);
	  restart (th);
	}
    }
  else
    {
      /* Unlocking a read lock.  */
      if (rwlock->__rw_readers == 0)
	{
	  __pthread_unlock (&rwlock->__rw_lock);
	  return EPERM;
	}

      --rwlock->__rw_readers;
      if (rwlock->__rw_readers == 0)
	/* Restart one waiting writer, if any.  */
	th = dequeue (&rwlock->__rw_write_waiting);
      else
	th = NULL;

      __pthread_unlock (&rwlock->__rw_lock);
      if (th != NULL)
	restart (th);

      /* Recursive lock fixup */

      if (rwlock->__rw_kind == PTHREAD_RWLOCK_PREFER_WRITER_NP)
	{
	  pthread_descr self = thread_self();
	  pthread_readlock_info *victim = rwlock_remove_from_list(self, rwlock);

	  if (victim != NULL)
	    {
	      if (victim->pr_lock_count == 0)
		{
		  victim->pr_next = THREAD_GETMEM (self, p_readlock_free);
		  THREAD_SETMEM (self, p_readlock_free, victim);
		}
	    }
	  else
	    {
	      int val = THREAD_GETMEM (self, p_untracked_readlock_count);
	      if (val > 0)
		THREAD_SETMEM (self, p_untracked_readlock_count, val - 1);
	    }
	}
    }

  return 0;
}
Exemple #5
0
pthread_t pthread_self(void)
{
  pthread_descr self = thread_self();
  return THREAD_GETMEM(self, p_tid);
}
Exemple #6
0
int pthread_join(pthread_t thread_id, void ** thread_return)
{
  volatile pthread_descr self = thread_self();
  struct pthread_request request;
  pthread_handle handle = thread_handle(thread_id);
  pthread_descr th;
  pthread_extricate_if extr;
  int already_canceled = 0;
  PDEBUG("\n");

  /* Set up extrication interface */
  extr.pu_object = handle;
  extr.pu_extricate_func = join_extricate_func;

  __pthread_lock(&handle->h_lock, self);
  if (invalid_handle(handle, thread_id)) {
    __pthread_unlock(&handle->h_lock);
    return ESRCH;
  }
  th = handle->h_descr;
  if (th == self) {
    __pthread_unlock(&handle->h_lock);
    return EDEADLK;
  }
  /* If detached or already joined, error */
  if (th->p_detached || th->p_joining != NULL) {
    __pthread_unlock(&handle->h_lock);
    return EINVAL;
  }
  /* If not terminated yet, suspend ourselves. */
  if (! th->p_terminated) {
    /* Register extrication interface */
    __pthread_set_own_extricate_if(self, &extr);
    if (!(THREAD_GETMEM(self, p_canceled)
	&& THREAD_GETMEM(self, p_cancelstate) == PTHREAD_CANCEL_ENABLE))
      th->p_joining = self;
    else
      already_canceled = 1;
    __pthread_unlock(&handle->h_lock);

    if (already_canceled) {
      __pthread_set_own_extricate_if(self, 0);
      __pthread_do_exit(PTHREAD_CANCELED, CURRENT_STACK_FRAME);
    }

  PDEBUG("before suspend\n");
    suspend(self);
  PDEBUG("after suspend\n");
    /* Deregister extrication interface */
    __pthread_set_own_extricate_if(self, 0);

    /* This is a cancellation point */
    if (THREAD_GETMEM(self, p_woken_by_cancel)
	&& THREAD_GETMEM(self, p_cancelstate) == PTHREAD_CANCEL_ENABLE) {
      THREAD_SETMEM(self, p_woken_by_cancel, 0);
      __pthread_do_exit(PTHREAD_CANCELED, CURRENT_STACK_FRAME);
    }
    __pthread_lock(&handle->h_lock, self);
  }
  /* Get return value */
  if (thread_return != NULL) *thread_return = th->p_retval;
  __pthread_unlock(&handle->h_lock);
  /* Send notification to thread manager */
  if (__pthread_manager_request >= 0) {
    request.req_thread = self;
    request.req_kind = REQ_FREE;
    request.req_args.free.thread_id = thread_id;
    TEMP_FAILURE_RETRY(__libc_write(__pthread_manager_request,
		(char *) &request, sizeof(request)));
  }
  return 0;
}
Exemple #7
0
__pthread_internal_tsd_address (int key)
{
  pthread_descr self = thread_self();
  return &self->p_libc_specific[key];
}
Exemple #8
0
int __old_sem_wait(old_sem_t * sem)
{
    long oldstatus, newstatus;
    volatile pthread_descr self = thread_self();
    pthread_descr * th;
    pthread_extricate_if extr;

    /* Set up extrication interface */
    extr.pu_object = 0;
    extr.pu_extricate_func = old_sem_extricate_func;

    while (1) {
	/* Register extrication interface */
	__pthread_set_own_extricate_if(self, &extr); 
	do {
            oldstatus = sem->sem_status;
            if ((oldstatus & 1) && (oldstatus != 1))
		newstatus = oldstatus - 2;
            else {
		newstatus = (long) self;
		self->p_nextwaiting = (pthread_descr) oldstatus;
	    }
	}
	while (! sem_compare_and_swap(sem, oldstatus, newstatus));
	if (newstatus & 1) {
	    /* We got the semaphore. */
	  __pthread_set_own_extricate_if(self, 0); 
	    return 0;
	}
	/* Wait for sem_post or cancellation */
	suspend(self);
	__pthread_set_own_extricate_if(self, 0); 

	/* This is a cancellation point */
	if (self->p_canceled && self->p_cancelstate == PTHREAD_CANCEL_ENABLE) {
	    /* Remove ourselves from the waiting list if we're still on it */
	    /* First check if we're at the head of the list. */
            do {
		oldstatus = sem->sem_status;
		if (oldstatus != (long) self) break;
		newstatus = (long) self->p_nextwaiting;
	    }
            while (! sem_compare_and_swap(sem, oldstatus, newstatus));
            /* Now, check if we're somewhere in the list.
	       There's a race condition with sem_post here, but it does not matter:
	       the net result is that at the time pthread_exit is called,
	       self is no longer reachable from sem->sem_status. */
            if (oldstatus != (long) self && (oldstatus & 1) == 0) {
		for (th = &(((pthread_descr) oldstatus)->p_nextwaiting);
		     *th != NULL && *th != (pthread_descr) 1;
		     th = &((*th)->p_nextwaiting)) {
		    if (*th == self) {
			*th = self->p_nextwaiting;
			break;
		    }
		}
	    }
            pthread_exit(PTHREAD_CANCELED);
	}
    }
}
Exemple #9
0
int dyn_lwp_self()
{
   return thread_self();
}
Exemple #10
0
    pid = handle->h_descr->p_pid;
    __pthread_unlock(&handle->h_lock);
    pol = sched_getscheduler(pid);
    if (pol == -1) return errno;
    if (sched_getparam(pid, param) == -1) return errno;
    *policy = pol;
    return 0;
}
libpthread_hidden_def(pthread_getschedparam)

/* Process-wide exit() request */

static void pthread_onexit_process(int retcode, void *arg attribute_unused)
{
    struct pthread_request request;
    pthread_descr self = thread_self();

    if (__pthread_manager_request >= 0) {
        request.req_thread = self;
        request.req_kind = REQ_PROCESS_EXIT;
        request.req_args.exit.code = retcode;
        TEMP_FAILURE_RETRY(__libc_write(__pthread_manager_request,
                                        (char *) &request, sizeof(request)));
        suspend(self);
        /* Main thread should accumulate times for thread manager and its
           children, so that timings for main thread account for all threads. */
        if (self == __pthread_main_thread) {
            waitpid(__pthread_manager_thread.p_pid, NULL, __WCLONE);
            /* Since all threads have been asynchronously terminated
             * (possibly holding locks), free cannot be used any more.  */
            __pthread_manager_thread_bos = __pthread_manager_thread_tos = NULL;
Exemple #11
0
int __pthread_initialize_manager(void)
{
    int manager_pipe[2];
    int pid;
    int report_events;
    struct pthread_request request;

    *__libc_multiple_threads_ptr = 1;

    /* If basic initialization not done yet (e.g. we're called from a
       constructor run before our constructor), do it now */
    if (__pthread_initial_thread_bos == NULL) pthread_initialize();
    /* Setup stack for thread manager */
    __pthread_manager_thread_bos = malloc(THREAD_MANAGER_STACK_SIZE);
    if (__pthread_manager_thread_bos == NULL) return -1;
    __pthread_manager_thread_tos =
        __pthread_manager_thread_bos + THREAD_MANAGER_STACK_SIZE;

    /* On non-MMU systems we make sure that the initial thread bounds don't overlap
     * with the manager stack frame */
    NOMMU_INITIAL_THREAD_BOUNDS(__pthread_manager_thread_tos,__pthread_manager_thread_bos);
    PDEBUG("manager stack: size=%d, bos=%p, tos=%p\n", THREAD_MANAGER_STACK_SIZE,
           __pthread_manager_thread_bos, __pthread_manager_thread_tos);
#if 0
    PDEBUG("initial stack: estimate bos=%p, tos=%p\n",
           __pthread_initial_thread_bos, __pthread_initial_thread_tos);
#endif

    /* Setup pipe to communicate with thread manager */
    if (pipe(manager_pipe) == -1) {
        free(__pthread_manager_thread_bos);
        return -1;
    }
    /* Start the thread manager */
    pid = 0;
#ifdef USE_TLS
    if (__linuxthreads_initial_report_events != 0)
        THREAD_SETMEM (((pthread_descr) NULL), p_report_events,
                       __linuxthreads_initial_report_events);
    report_events = THREAD_GETMEM (((pthread_descr) NULL), p_report_events);
#else
    if (__linuxthreads_initial_report_events != 0)
        __pthread_initial_thread.p_report_events
            = __linuxthreads_initial_report_events;
    report_events = __pthread_initial_thread.p_report_events;
#endif
    if (__builtin_expect (report_events, 0))
    {
        /* It's a bit more complicated.  We have to report the creation of
        the manager thread.  */
        int idx = __td_eventword (TD_CREATE);
        uint32_t mask = __td_eventmask (TD_CREATE);

        if ((mask & (__pthread_threads_events.event_bits[idx]
                     | __pthread_initial_thread.p_eventbuf.eventmask.event_bits[idx]))
                != 0)
        {

            __pthread_lock(__pthread_manager_thread.p_lock, NULL);

#ifdef __ia64__
            pid = __clone2(__pthread_manager_event,
                           (void **) __pthread_manager_thread_tos,
                           THREAD_MANAGER_STACK_SIZE,
                           CLONE_VM | CLONE_FS | CLONE_FILES | CLONE_SIGHAND,
                           (void *)(long)manager_pipe[0]);
#else
            pid = clone(__pthread_manager_event,
                        (void **) __pthread_manager_thread_tos,
                        CLONE_VM | CLONE_FS | CLONE_FILES | CLONE_SIGHAND,
                        (void *)(long)manager_pipe[0]);
#endif

            if (pid != -1)
            {
                /* Now fill in the information about the new thread in
                   the newly created thread's data structure.  We cannot let
                   the new thread do this since we don't know whether it was
                   already scheduled when we send the event.  */
                __pthread_manager_thread.p_eventbuf.eventdata =
                    &__pthread_manager_thread;
                __pthread_manager_thread.p_eventbuf.eventnum = TD_CREATE;
                __pthread_last_event = &__pthread_manager_thread;
                __pthread_manager_thread.p_tid = 2* PTHREAD_THREADS_MAX + 1;
                __pthread_manager_thread.p_pid = pid;

                /* Now call the function which signals the event.  */
                __linuxthreads_create_event ();
            }
            /* Now restart the thread.  */
            __pthread_unlock(__pthread_manager_thread.p_lock);
        }
    }

    if (pid == 0) {
#ifdef __ia64__
        pid = __clone2(__pthread_manager, (void **) __pthread_manager_thread_tos,
                       THREAD_MANAGER_STACK_SIZE,
                       CLONE_VM | CLONE_FS | CLONE_FILES | CLONE_SIGHAND,
                       (void *)(long)manager_pipe[0]);
#else
        pid = clone(__pthread_manager, (void **) __pthread_manager_thread_tos,
                    CLONE_VM | CLONE_FS | CLONE_FILES | CLONE_SIGHAND,
                    (void *)(long)manager_pipe[0]);
#endif
    }
    if (pid == -1) {
        free(__pthread_manager_thread_bos);
        __libc_close(manager_pipe[0]);
        __libc_close(manager_pipe[1]);
        return -1;
    }
    __pthread_manager_request = manager_pipe[1]; /* writing end */
    __pthread_manager_reader = manager_pipe[0]; /* reading end */
    __pthread_manager_thread.p_tid = 2* PTHREAD_THREADS_MAX + 1;
    __pthread_manager_thread.p_pid = pid;

    /* Make gdb aware of new thread manager */
    if (__pthread_threads_debug && __pthread_sig_debug > 0)
    {
        raise(__pthread_sig_debug);
        /* We suspend ourself and gdb will wake us up when it is
        ready to handle us. */
        __pthread_wait_for_restart_signal(thread_self());
    }
    /* Synchronize debugging of the thread manager */
    PDEBUG("send REQ_DEBUG to manager thread\n");
    request.req_kind = REQ_DEBUG;
    TEMP_FAILURE_RETRY(__libc_write(__pthread_manager_request,
                                    (char *) &request, sizeof(request)));
    return 0;
}
Exemple #12
0
void* f(void* arg)
{
   printf("in thread\n");

   return thread_self();
}
Exemple #13
0
void thread_iter(int dram_refs, int nvm_refs, int interleave_dram, int interleave_nvm) {
	long it_n;
	unsigned long time_dram, time_nvm, total_time_dram_ns, total_time_nvm_ns;
	uint64_t seed;
	uint64_t j;
	chain_t *C_dram[MAX_NUM_CHAINS];
	chain_t *C_nvm[MAX_NUM_CHAINS];
	int missing_dram_refs, missing_nvm_refs;
	int dram_stalls, nvm_stalls;
	struct timespec task_time_start, task_time_end;
	unsigned long task_time_diff_ns;
#ifndef NDEBUG
	pid_t tid = (pid_t) syscall(SYS_gettid);
#endif

	assert(NELEMS < UINT64_MAX);

    for (j=0; j < NCHAINS; j++) {
        seed = SEED_IN + j*j;
        C_dram[j] = alloc_chain(seed, NELEMS, 64LLU, 0, 0);
        C_nvm[j] = alloc_chain(seed, NELEMS, 64LLU, 0, 1);
        __asm__("");
    }

    bind_cpu(thread_self());

    // cache must be trashed after bind_cpu() call
    trash_cache(NELEMS);

    total_time_dram_ns = 0;
    total_time_nvm_ns = 0;

    missing_dram_refs = dram_refs;
    missing_nvm_refs = nvm_refs;

#ifndef NDEBUG
    printf("DRAM accesses to be made: %ld\n", dram_refs);
    printf("NVM accesses to be made: %ld\n", nvm_refs);
#endif

    //delay_cycles(8000000000);
    //printf("STARTING MEASURES\n");

    clock_gettime(CLOCK_MONOTONIC, &task_time_start);

    for (it_n = 0; (missing_dram_refs > 0) || (missing_nvm_refs > 0); ++it_n) {
    	__asm__("");

    	// calculate the number o memory accesses to be made on each memory type
    	if (missing_dram_refs > interleave_dram) {
    		missing_dram_refs -= interleave_dram;
    		dram_stalls = interleave_dram;
    	} else {
    		dram_stalls = missing_dram_refs;
    		missing_dram_refs = 0;
    	}

    	if (missing_nvm_refs > interleave_nvm) {
			missing_nvm_refs -= interleave_nvm;
			nvm_stalls = interleave_nvm;
		} else {
			nvm_stalls = missing_nvm_refs;
			missing_nvm_refs = 0;
		}

    	time_dram = 0;
    	time_nvm = 0;

    	// do memory accesses interleaved by dividing the number of accesses in smaller amount
    	// as configured by user
        force_ldm_stalls((chain_t **)&C_dram, 64LLU, 8, dram_stalls, NELEMS, it_n, &time_dram);
        force_ldm_stalls((chain_t **)&C_nvm, 64LLU, 8, nvm_stalls, NELEMS, it_n, &time_nvm);

        total_time_dram_ns += time_dram;
        total_time_nvm_ns += time_nvm;
#ifndef NDEBUG
        printf("%ld DRAM accesses took: %ld ns\n", dram_stalls, time_dram);
        printf("%ld NVM accesses took: %ld ns\n", nvm_stalls, time_nvm);
#endif
    }

    clock_gettime(CLOCK_MONOTONIC, &task_time_end);
    task_time_diff_ns = ((task_time_end.tv_sec * 1000000000) + task_time_end.tv_nsec) -
                        ((task_time_start.tv_sec * 1000000000) + task_time_start.tv_nsec);

    // the memory latency is the total time divided by the number of accesses for each memory type
    if (dram_refs > 0)
        total_time_dram_ns /= dram_refs;
    else
        total_time_dram_ns = 0;
    if (nvm_refs > 0)
        total_time_nvm_ns /= nvm_refs;
    else
        total_time_nvm_ns = 0;

    printf("DRAM latency: %ld ns\n", total_time_dram_ns);
    printf("NVM latency: %ld ns\n", total_time_nvm_ns);
    printf("Measure time: %.3lf ms\n", (double)task_time_diff_ns/1000000.0);
    
    printf("Expected time: %.3ld ms\n", ((total_time_dram_ns * dram_refs) + (total_time_nvm_ns * nvm_refs)) / 1000000);

    for (j=0; j < NCHAINS; j++) {
        free(C_dram[j]);
        free(C_nvm[j]);
    }
}
Exemple #14
0
	static unsigned long self()
	{
		return thread_self();
	}
Exemple #15
0
void traceback_dump(void) {
	traceback_dump_thread(thread_self());
}
/**
 * \brief initializes the XOMP worker library
 *
 * \param wid   Xomp worker id
 *
 * \returns SYS_ERR_OK on success
 *          errval on failure
 */
errval_t xomp_worker_init(xomp_wid_t wid)
{
    errval_t err;

    worker_id = wid;

    XWI_DEBUG("initializing worker {%016lx} iref:%u\n", worker_id, svc_iref);

#if XOMP_BENCH_WORKER_EN
    bench_init();
#endif

    struct capref frame = {
        .cnode = cnode_root,
        .slot = ROOTCN_SLOT_ARGCN
    };

    struct frame_identity id;
    err = invoke_frame_identify(frame, &id);
    if (err_is_fail(err)) {
        return err_push(err, XOMP_ERR_INVALID_MSG_FRAME);
    }

    size_t frame_size = 0;

    if (svc_iref) {
        frame_size = XOMP_TLS_SIZE;
    } else {
        frame_size = XOMP_FRAME_SIZE;
        err = spawn_symval_cache_init(0);
        if (err_is_fail(err)) {
            return err;
        }
    }

    if ((1UL << id.bits) < XOMP_TLS_SIZE) {
        return XOMP_ERR_INVALID_MSG_FRAME;
    }

    msgframe = frame;

    err = vspace_map_one_frame(&msgbuf, frame_size, frame, NULL, NULL);
    if (err_is_fail(err)) {
        err_push(err, XOMP_ERR_WORKER_INIT_FAILED);
    }
    if (svc_iref) {
        tls = msgbuf;
    } else {
        tls = ((uint8_t *) msgbuf) + XOMP_MSG_FRAME_SIZE;
    }

    XWI_DEBUG("messaging frame mapped: [%016lx] @ [%016lx]\n", id.base,
              (lvaddr_t )msgbuf);

    struct bomp_thread_local_data *tlsinfo = malloc(sizeof(*tlsinfo));
    tlsinfo->thr = thread_self();
    tlsinfo->work = (struct bomp_work *) tls;
    tlsinfo->work->data = tlsinfo->work + 1;
    g_bomp_state->backend.set_tls(tlsinfo);

#ifdef __k1om__
    if (worker_id & XOMP_WID_GATEWAY_FLAG) {
        err = xomp_gateway_init();
    } else {
        if (!svc_iref) {
            err = xomp_gateway_bind_svc();
        } else {
            err = SYS_ERR_OK;
        }
    }
    if (err_is_fail(err)) {
        return err;
    }
#endif

#ifdef __k1om__
    if (!svc_iref) {
        err = xeon_phi_client_init(disp_xeon_phi_id());
        if (err_is_fail(err)) {
            err_push(err, XOMP_ERR_WORKER_INIT_FAILED);
        }

        xeon_phi_client_set_callbacks(&callbacks);
    }
#endif

    struct waitset *ws = get_default_waitset();

// XXX: disabling DMA on the host as there is no replication used at this moment
#if XOMP_WORKER_ENABLE_DMA && defined(__k1om__)
    /* XXX: use lib numa */

#ifndef __k1om__
    uint8_t numanode = 0;
    if (disp_get_core_id() > 20) {
        numanode = 1;
    }

    err = dma_manager_wait_for_driver(dma_device_type, numanode);
    if (err_is_fail(err)) {
        USER_PANIC_ERR(err, "could not wait for the DMA driver");
    }
#endif
    char svc_name[30];
#ifdef __k1om__
    snprintf(svc_name, 30, "%s", XEON_PHI_DMA_SERVICE_NAME);
#else
    snprintf(svc_name, 30, "%s.%u", IOAT_DMA_SERVICE_NAME, numanode);
#endif

    struct dma_client_info dma_info = {
        .type = DMA_CLIENT_INFO_TYPE_NAME,
        .device_type = dma_device_type,
        .args.name = svc_name
    };
    err = dma_client_device_init(&dma_info, &dma_dev);
    if (err_is_fail(err)) {
        USER_PANIC_ERR(err, "DMA device initialization");
    }
#endif

    if (svc_iref) {
        err = xomp_bind(svc_iref, master_bind_cb, NULL, ws,
                        IDC_EXPORT_FLAGS_DEFAULT);
    } else {
        struct xomp_frameinfo fi = {
            .sendbase = id.base,
            .inbuf = ((uint8_t *) msgbuf) + XOMP_MSG_CHAN_SIZE,
            .inbufsize = XOMP_MSG_CHAN_SIZE,
            .outbuf = ((uint8_t *) msgbuf),
            .outbufsize = XOMP_MSG_CHAN_SIZE
        };
        err = xomp_connect(&fi, master_bind_cb, NULL, ws,
        IDC_EXPORT_FLAGS_DEFAULT);
    }

    if (err_is_fail(err)) {
        /* TODO: Clean up */
        return err_push(err, XOMP_ERR_WORKER_INIT_FAILED);
    }

    XWI_DEBUG("Waiting until bound to master...\n");

    while (!is_bound) {
        messages_wait_and_handle_next();
    }

    if (xbinding == NULL) {
        return XOMP_ERR_WORKER_INIT_FAILED;
    }

    return SYS_ERR_OK;
}
Exemple #17
0
static void logs(void) {
  int thread_number = thread_self()->td_tid;
  for (int i = 0; i < 20; i++)
    klog("Message number %d of thread %d.", i, thread_number);
}
Exemple #18
0
int do_fork(void) {
  thread_t *td = thread_self();

  /* Cannot fork non-user threads. */
  assert(td->td_proc);

  thread_t *newtd = thread_create(td->td_name, NULL, NULL);

  /* Clone the thread. Since we don't use fork-oriented thread_t layout, we copy
     all necessary fields one-by one for clarity. The new thread is already on
     the all_thread list, has name and tid set. Many fields don't require setup
     as they will be prepared by sched_add. */

  assert(td->td_idnest == 0);
  newtd->td_idnest = 0;

  /* Copy user context.. */
  newtd->td_uctx = td->td_uctx;
  newtd->td_uctx_fpu = td->td_uctx_fpu;
  exc_frame_set_retval(&newtd->td_uctx, 0);

  /* New thread does not need the exception frame just yet. */
  newtd->td_kframe = NULL;
  newtd->td_onfault = 0;

  /* The new thread already has a new kernel stack allocated. There is no need
     to copy its contents, it will be discarded anyway. We just prepare the
     thread's kernel context to a fresh one so that it will continue execution
     starting from user_exc_leave (which serves as fork_trampoline). */
  ctx_init(newtd, (void (*)(void *))user_exc_leave, NULL);

  newtd->td_sleepqueue = sleepq_alloc();
  newtd->td_wchan = NULL;
  newtd->td_waitpt = NULL;

  newtd->td_prio = td->td_prio;

  /* Now, prepare a new process. */
  assert(td->td_proc);
  proc_t *proc = proc_create();
  proc->p_parent = td->td_proc;
  TAILQ_INSERT_TAIL(&td->td_proc->p_children, proc, p_child);
  proc_populate(proc, newtd);

  /* Clone the entire process memory space. */
  proc->p_uspace = vm_map_clone(td->td_proc->p_uspace);

  /* Find copied brk segment. */
  proc->p_sbrk = vm_map_find_entry(proc->p_uspace, SBRK_START);

  /* Copy the parent descriptor table. */
  /* TODO: Optionally share the descriptor table between processes. */
  proc->p_fdtable = fdtab_copy(td->td_proc->p_fdtable);

  /* Copy signal handler dispatch rules. */
  memcpy(proc->p_sigactions, td->td_proc->p_sigactions,
         sizeof(proc->p_sigactions));

  sched_add(newtd);

  return proc->p_pid;
}
Exemple #19
0
void __pthread_do_exit(void *retval, char *currentframe)
{
  pthread_descr self = thread_self();
  pthread_descr joining;
  struct pthread_request request;
  PDEBUG("self=%p, pid=%d\n", self, self->p_pid);

  /* obey POSIX behavior and prevent cancellation functions from
   * being called more than once.
   * http://sourceware.org/ml/libc-ports/2006-10/msg00043.html
   */
  THREAD_SETMEM(self, p_cancelstate, PTHREAD_CANCEL_DISABLE);
  THREAD_SETMEM(self, p_canceltype, PTHREAD_CANCEL_DEFERRED);

  /* Call cleanup functions and destroy the thread-specific data */
  __pthread_perform_cleanup(currentframe);
  __pthread_destroy_specifics();
  /* Store return value */
  __pthread_lock(THREAD_GETMEM(self, p_lock), self);
  THREAD_SETMEM(self, p_retval, retval);
  /* See whether we have to signal the death.  */
  if (THREAD_GETMEM(self, p_report_events))
    {
      /* See whether TD_DEATH is in any of the mask.  */
      int idx = __td_eventword (TD_DEATH);
      uint32_t mask = __td_eventmask (TD_DEATH);

      if ((mask & (__pthread_threads_events.event_bits[idx]
		   | THREAD_GETMEM_NC(self,
				   p_eventbuf.eventmask).event_bits[idx]))
	  != 0)
	{
	  /* Yep, we have to signal the death.  */
	  THREAD_SETMEM(self, p_eventbuf.eventnum, TD_DEATH);
	  THREAD_SETMEM(self, p_eventbuf.eventdata, self);
	  __pthread_last_event = self;

	  /* Now call the function to signal the event.  */
	  __linuxthreads_death_event();
	}
    }
  /* Say that we've terminated */
  THREAD_SETMEM(self, p_terminated, 1);
  /* See if someone is joining on us */
  joining = THREAD_GETMEM(self, p_joining);
  PDEBUG("joining = %p, pid=%d\n", joining, joining->p_pid);
  __pthread_unlock(THREAD_GETMEM(self, p_lock));
  /* Restart joining thread if any */
  if (joining != NULL) restart(joining);
  /* If this is the initial thread, block until all threads have terminated.
     If another thread calls exit, we'll be terminated from our signal
     handler. */
  if (self == __pthread_main_thread && __pthread_manager_request >= 0) {
    request.req_thread = self;
    request.req_kind = REQ_MAIN_THREAD_EXIT;
    TEMP_FAILURE_RETRY(__libc_write(__pthread_manager_request,
		(char *)&request, sizeof(request)));
    suspend(self);
    /* Main thread flushes stdio streams and runs atexit functions.
     * It also calls a handler within LinuxThreads which sends a process exit
     * request to the thread manager. */
    exit(0);
  }
  /* Exit the process (but don't flush stdio streams, and don't run
     atexit functions). */
  _exit(0);
}
Exemple #20
0
void thread1(void* info) 
{
  status(1,5);
  thread_kill(thread_self());
  fprintf(stderr, "** error: killed thread is executing\n");
}
Exemple #21
0
int
__pthread_rwlock_timedrdlock (pthread_rwlock_t *rwlock,
			      const struct timespec *abstime)
{
  pthread_descr self = NULL;
  pthread_readlock_info *existing;
  int out_of_mem, have_lock_already;
  pthread_extricate_if extr;

  if (abstime->tv_nsec < 0 || abstime->tv_nsec >= 1000000000)
    return EINVAL;

  have_lock_already = rwlock_have_already(&self, rwlock,
					  &existing, &out_of_mem);

  if (self == NULL)
    self = thread_self ();

  /* Set up extrication interface */
  extr.pu_object = rwlock;
  extr.pu_extricate_func = rwlock_rd_extricate_func;

  /* Register extrication interface */
  __pthread_set_own_extricate_if (self, &extr);

  for (;;)
    {
      __pthread_lock (&rwlock->__rw_lock, self);

      if (rwlock_can_rdlock(rwlock, have_lock_already))
	break;

      enqueue (&rwlock->__rw_read_waiting, self);
      __pthread_unlock (&rwlock->__rw_lock);
      /* This is not a cancellation point */
      if (timedsuspend (self, abstime) == 0)
	{
	  int was_on_queue;

	  __pthread_lock (&rwlock->__rw_lock, self);
	  was_on_queue = remove_from_queue (&rwlock->__rw_read_waiting, self);
	  __pthread_unlock (&rwlock->__rw_lock);

	  if (was_on_queue)
	    {
	      __pthread_set_own_extricate_if (self, 0);
	      return ETIMEDOUT;
	    }

	  /* Eat the outstanding restart() from the signaller */
	  suspend (self);
	}
    }

  __pthread_set_own_extricate_if (self, 0);

  ++rwlock->__rw_readers;
  __pthread_unlock (&rwlock->__rw_lock);

  if (have_lock_already || out_of_mem)
    {
      if (existing != NULL)
	++existing->pr_lock_count;
      else
	++self->p_untracked_readlock_count;
    }

  return 0;
}
Exemple #22
0
int
main(int argc, char *argv[])
{
	int err;
	thread_t self, th;

	printf("Thread test program\n");

	self = thread_self();

	/*
	 * Create new thread
	 */
	printf("Start test thread\n");
	th = thread_run(test_thread, stack+1024);

	/*
	 * Wait 1 sec
	 */
	timer_sleep(1000, 0);

	/*
	 * Suspend test thread
	 */
	printf("\nSuspend test thread\n");
	err = thread_suspend(th);

	/*
	 * Wait 2 sec
	 */
	timer_sleep(2000, 0);

	/*
	 * Resume test thread
	 */
	printf("\nResume test thread\n");
	err = thread_resume(th);

	/*
	 * Wait 100 msec
	 */
	timer_sleep(100, 0);

	/*
	 * Suspend test thread
	 */
	thread_suspend(th);

	/*
	 * Wait 2 sec
	 */
	timer_sleep(2000, 0);

	/*
	 * Resume test thread
	 */
	thread_resume(th);

	/*
	 * We can check this thread can run 10 times than test thread,
	 */
	for (;;)
		putchar('!');

	return 0;
}
Exemple #23
0
int __pthread_initialize_manager(void)
{
  int manager_pipe[2];
  int pid;
  struct pthread_request request;

#ifndef HAVE_Z_NODELETE
  if (__builtin_expect (&__dso_handle != NULL, 1))
    __cxa_atexit ((void (*) (void *)) pthread_atexit_retcode, NULL,
		  __dso_handle);
#endif

  if (__pthread_max_stacksize == 0)
    __pthread_init_max_stacksize ();
  /* If basic initialization not done yet (e.g. we're called from a
     constructor run before our constructor), do it now */
  if (__pthread_initial_thread_bos == NULL) pthread_initialize();
  /* Setup stack for thread manager */
  __pthread_manager_thread_bos = malloc(THREAD_MANAGER_STACK_SIZE);
  if (__pthread_manager_thread_bos == NULL) return -1;
  __pthread_manager_thread_tos =
    __pthread_manager_thread_bos + THREAD_MANAGER_STACK_SIZE;
  /* Setup pipe to communicate with thread manager */
  if (__libc_pipe(manager_pipe) == -1) {
    free(__pthread_manager_thread_bos);
    return -1;
  }
  /* Start the thread manager */
  pid = 0;
  if (__builtin_expect (__pthread_initial_thread.p_report_events, 0))
    {
      /* It's a bit more complicated.  We have to report the creation of
	 the manager thread.  */
      int idx = __td_eventword (TD_CREATE);
      uint32_t mask = __td_eventmask (TD_CREATE);

      if ((mask & (__pthread_threads_events.event_bits[idx]
		   | __pthread_initial_thread.p_eventbuf.eventmask.event_bits[idx]))
	  != 0)
	{
	  __pthread_lock(__pthread_manager_thread.p_lock, NULL);

#ifdef NEED_SEPARATE_REGISTER_STACK
	  pid = __clone2(__pthread_manager_event,
			 (void **) __pthread_manager_thread_bos,
			 THREAD_MANAGER_STACK_SIZE,
			 CLONE_VM | CLONE_FS | CLONE_FILES | CLONE_SIGHAND,
			 (void *)(long)manager_pipe[0]);
#elif _STACK_GROWS_UP
	  pid = __clone(__pthread_manager_event,
			(void **) __pthread_manager_thread_bos,
			CLONE_VM | CLONE_FS | CLONE_FILES | CLONE_SIGHAND,
			(void *)(long)manager_pipe[0]);
#else
	  pid = __clone(__pthread_manager_event,
			(void **) __pthread_manager_thread_tos,
			CLONE_VM | CLONE_FS | CLONE_FILES | CLONE_SIGHAND,
			(void *)(long)manager_pipe[0]);
#endif

	  if (pid != -1)
	    {
	      /* Now fill in the information about the new thread in
	         the newly created thread's data structure.  We cannot let
	         the new thread do this since we don't know whether it was
	         already scheduled when we send the event.  */
	      __pthread_manager_thread.p_eventbuf.eventdata =
		&__pthread_manager_thread;
	      __pthread_manager_thread.p_eventbuf.eventnum = TD_CREATE;
	      __pthread_last_event = &__pthread_manager_thread;
	      __pthread_manager_thread.p_tid = 2* PTHREAD_THREADS_MAX + 1;
	      __pthread_manager_thread.p_pid = pid;

	      /* Now call the function which signals the event.  */
	      __linuxthreads_create_event ();
	    }

	  /* Now restart the thread.  */
	  __pthread_unlock(__pthread_manager_thread.p_lock);
	}
    }

  if (__builtin_expect (pid, 0) == 0)
    {
#ifdef NEED_SEPARATE_REGISTER_STACK
      pid = __clone2(__pthread_manager, (void **) __pthread_manager_thread_bos,
		     THREAD_MANAGER_STACK_SIZE,
		     CLONE_VM | CLONE_FS | CLONE_FILES | CLONE_SIGHAND,
		     (void *)(long)manager_pipe[0]);
#elif _STACK_GROWS_UP
      pid = __clone(__pthread_manager, (void **) __pthread_manager_thread_bos,
		    CLONE_VM | CLONE_FS | CLONE_FILES | CLONE_SIGHAND,
		    (void *)(long)manager_pipe[0]);
#else
      pid = __clone(__pthread_manager, (void **) __pthread_manager_thread_tos,
		    CLONE_VM | CLONE_FS | CLONE_FILES | CLONE_SIGHAND,
		    (void *)(long)manager_pipe[0]);
#endif
    }
  if (__builtin_expect (pid, 0) == -1) {
    free(__pthread_manager_thread_bos);
    __libc_close(manager_pipe[0]);
    __libc_close(manager_pipe[1]);
    return -1;
  }
  __pthread_manager_request = manager_pipe[1]; /* writing end */
  __pthread_manager_reader = manager_pipe[0]; /* reading end */
  __pthread_manager_thread.p_tid = 2* PTHREAD_THREADS_MAX + 1;
  __pthread_manager_thread.p_pid = pid;
  /* Make gdb aware of new thread manager */
  if (__builtin_expect (__pthread_threads_debug, 0) && __pthread_sig_debug > 0)
    {
      raise(__pthread_sig_debug);
      /* We suspend ourself and gdb will wake us up when it is
	 ready to handle us. */
      __pthread_wait_for_restart_signal(thread_self());
    }
  /* Synchronize debugging of the thread manager */
  request.req_kind = REQ_DEBUG;
  TEMP_FAILURE_RETRY(__libc_write(__pthread_manager_request,
				  (char *) &request, sizeof(request)));
  return 0;
}
Exemple #24
0
void thread_mutex_lock_c(mutex_t *mutex, int line, char *file)
{
#ifdef DEBUG_MUTEXES
    thread_type *th = thread_self();

    if (!th) LOG_WARN("No mt record for %u in lock [%s:%d]", thread_self(), file, line);

    LOG_DEBUG5("Locking %p (%s) on line %d in file %s by thread %d", mutex, mutex->name, line, file, th ? th->thread_id : -1);

# ifdef CHECK_MUTEXES
    /* Just a little sanity checking to make sure that we're locking
    ** mutexes correctly
    */

    if (th) {
        int locks = 0;
        avl_node *node;
        mutex_t *tmutex;

        _mutex_lock(&_mutextree_mutex);

        node = avl_get_first (_mutextree);
        
        while (node) {
            tmutex = (mutex_t *)node->key;

            if (tmutex->mutex_id == mutex->mutex_id) {
                if (tmutex->thread_id == th->thread_id) { 
                    /* Deadlock, same thread can't lock the same mutex twice */
                    LOG_ERROR7("DEADLOCK AVOIDED (%d == %d) on mutex [%s] in file %s line %d by thread %d [%s]", 
                         tmutex->thread_id, th->thread_id, mutex->name ? mutex->name : "undefined", file, line, th->thread_id, th->name);

                    _mutex_unlock(&_mutextree_mutex);
                    return;
                }
            } else if (tmutex->thread_id == th->thread_id) { 
                /* Mutex locked by this thread (not this mutex) */
                locks++;
            }

            node = avl_get_next(node);
        }

        if (locks > 0) { 
            /* Has already got a mutex locked */
            if (_multi_mutex.thread_id != th->thread_id) {
                /* Tries to lock two mutexes, but has not got the double mutex, norty boy! */
                LOG_WARN("(%d != %d) Thread %d [%s] tries to lock a second mutex [%s] in file %s line %d, without locking double mutex!",
                     _multi_mutex.thread_id, th->thread_id, th->thread_id, th->name, mutex->name ? mutex->name : "undefined", file, line);
            }
        }
        
        _mutex_unlock(&_mutextree_mutex);
    }
# endif /* CHECK_MUTEXES */
    
    _mutex_lock(mutex);
    
    _mutex_lock(&_mutextree_mutex);

    LOG_DEBUG2("Locked %p by thread %d", mutex, th ? th->thread_id : -1);
    mutex->line = line;
    if (th) {
        mutex->thread_id = th->thread_id;
    }

    _mutex_unlock(&_mutextree_mutex);
#else
    _mutex_lock(mutex);
#endif /* DEBUG_MUTEXES */
}
Exemple #25
0
static int mtx_try_lock(mtx_t *mtx) {
  return atomic_cmp_exchange(&mtx->mtx_state, MTX_UNOWNED,
                             (uint32_t)thread_self());
}
Exemple #26
0
void thread_mutex_unlock_c(mutex_t *mutex, int line, char *file)
{
#ifdef DEBUG_MUTEXES
    thread_type *th = thread_self();

    if (!th) {
        LOG_ERROR3("No record for %u in unlock [%s:%d]", thread_self(), file, line);
    }

    LOG_DEBUG5("Unlocking %p (%s) on line %d in file %s by thread %d", mutex, mutex->name, line, file, th ? th->thread_id : -1);

    mutex->line = line;

# ifdef CHECK_MUTEXES
    if (th) {
        int locks = 0;
        avl_node *node;
        mutex_t *tmutex;

        _mutex_lock(&_mutextree_mutex);

        while (node) {
            tmutex = (mutex_t *)node->key;

            if (tmutex->mutex_id == mutex->mutex_id) {
                if (tmutex->thread_id != th->thread_id) {
                    LOG_ERROR7("ILLEGAL UNLOCK (%d != %d) on mutex [%s] in file %s line %d by thread %d [%s]", tmutex->thread_id, th->thread_id, 
                         mutex->name ? mutex->name : "undefined", file, line, th->thread_id, th->name);
                    _mutex_unlock(&_mutextree_mutex);
                    return;
                }
            } else if (tmutex->thread_id == th->thread_id) {
                locks++;
            }

            node = avl_get_next (node);
        }

        if ((locks > 0) && (_multi_mutex.thread_id != th->thread_id)) {
            /* Don't have double mutex, has more than this mutex left */
        
            LOG_WARN("(%d != %d) Thread %d [%s] tries to unlock a mutex [%s] in file %s line %d, without owning double mutex!",
                 _multi_mutex.thread_id, th->thread_id, th->thread_id, th->name, mutex->name ? mutex->name : "undefined", file, line);
        }

        _mutex_unlock(&_mutextree_mutex);
    }
# endif  /* CHECK_MUTEXES */

    _mutex_unlock(mutex);

    _mutex_lock(&_mutextree_mutex);

    LOG_DEBUG2("Unlocked %p by thread %d", mutex, th ? th->thread_id : -1);
    mutex->line = -1;
    if (mutex->thread_id == th->thread_id) {
        mutex->thread_id = MUTEX_STATE_NOTLOCKED;
    }

    _mutex_unlock(&_mutextree_mutex);
#else
    _mutex_unlock(mutex);
#endif /* DEBUG_MUTEXES */
}
Exemple #27
0
/*
 * Main routine for exec service.
 */
int
main(int argc, char *argv[])
{
    const struct msg_map *map;
    struct msg *msg;
    object_t obj;
    int error;

    sys_log("Starting exec server\n");

    /* Boost thread priority. */
    thread_setpri(thread_self(), PRI_EXEC);

    /*
     * Set capability for us
     */
    bind_cap("/boot/exec", task_self());

    /*
     * Setup exception handler.
     */
    exception_setup(exception_handler);

    /*
     * Initialize exec loaders.
     */
    exec_init();

    /*
     * Create an object to expose our service.
     */
    error = object_create("!exec", &obj);
    if (error)
        sys_panic("fail to create object");

    msg = malloc(MAX_EXECMSG);
    ASSERT(msg);

    /*
     * Message loop
     */
    for (;;) {
        /*
         * Wait for an incoming request.
         */
        error = msg_receive(obj, msg, MAX_EXECMSG);
        if (error)
            continue;

        error = EINVAL;
        map = &execmsg_map[0];
        while (map->code != 0) {
            if (map->code == msg->hdr.code) {
                error = (*map->func)(msg);
                break;
            }
            map++;
        }
#ifdef DEBUG_EXEC
        if (error)
            DPRINTF(("exec: msg error=%d code=%x\n",
                     error, msg->hdr.code));
#endif
        /*
         * Reply to the client.
         *
         * Note: If EXEC_EXECVE request is handled successfully,
         * the receiver task has been terminated here. But, we
         * have to call msg_reply() even in such case to reset
         * our IPC state.
         */
        msg->hdr.status = error;
        error = msg_reply(obj, msg, MAX_EXECMSG);
    }
}
Exemple #28
0
/*ARGSUSED*/
static void
main_switcher(void *cookie, char *argp, size_t arg_size, door_desc_t *desc,
    uint_t n_desc)
{
	repository_door_request_t *request;
	repository_door_response_t reply;
	door_desc_t reply_desc;

	thread_info_t *ti = thread_self();

	int send_desc = 0;
	int fd;

	thread_newstate(ti, TI_MAIN_DOOR_CALL);
	ti->ti_main_door_request = (void *)argp;

	assert(cookie == REPOSITORY_DOOR_COOKIE);

	reply.rdr_status = INVALID_RESULT;

	if (argp == DOOR_UNREF_DATA) {
		backend_fini();

		exit(CONFIGD_EXIT_LOST_MAIN_DOOR);
	}

	/*
	 * No file descriptors allowed
	 */
	assert(n_desc == 0);

	/*
	 * first, we just check the version
	 */
	if (arg_size < offsetofend(repository_door_request_t, rdr_version)) {
		reply.rdr_status = REPOSITORY_DOOR_FAIL_BAD_REQUEST;
		goto fail;
	}

	/* LINTED alignment */
	request = (repository_door_request_t *)argp;
	ti->ti_main_door_request = request;

	if (request->rdr_version != REPOSITORY_DOOR_VERSION) {
		reply.rdr_status = REPOSITORY_DOOR_FAIL_VERSION_MISMATCH;
		goto fail;
	}

	/*
	 * Now, check that the argument is of the minimum required size
	 */
	if (arg_size < offsetofend(repository_door_request_t, rdr_request)) {
		reply.rdr_status = REPOSITORY_DOOR_FAIL_BAD_REQUEST;
		goto fail;
	}

	if (door_ucred(&ti->ti_ucred) != 0) {
		reply.rdr_status = REPOSITORY_DOOR_FAIL_PERMISSION_DENIED;
		goto fail;
	}

	switch (request->rdr_request) {
	case REPOSITORY_DOOR_REQUEST_CONNECT:
		fd = -1;
		reply.rdr_status = create_connection(ti->ti_ucred, request,
		    arg_size, &fd);
		if (reply.rdr_status != REPOSITORY_DOOR_SUCCESS) {
			assert(fd == -1);
			goto fail;
		}
		assert(fd != -1);
		reply_desc.d_attributes = DOOR_DESCRIPTOR | DOOR_RELEASE;
		reply_desc.d_data.d_desc.d_descriptor = fd;
		send_desc = 1;
		break;

	default:
		reply.rdr_status = REPOSITORY_DOOR_FAIL_BAD_REQUEST;
		goto fail;
	}

fail:
	assert(reply.rdr_status != INVALID_RESULT);

	thread_newstate(ti, TI_DOOR_RETURN);
	ti->ti_main_door_request = NULL;

	(void) door_return((char *)&reply, sizeof (reply),
	    &reply_desc, (send_desc)? 1:0);
	(void) door_return(NULL, 0, NULL, 0);
}
Exemple #29
0
int sem_wait(sem_t * sem)
{
  __volatile__ pthread_descr self = thread_self();
  pthread_extricate_if extr;
  int already_canceled = 0;
  int spurious_wakeup_count;

  /* Set up extrication interface */
  extr.pu_object = sem;
  extr.pu_extricate_func = new_sem_extricate_func;

  __pthread_lock(&sem->__sem_lock, self);
  if (sem->__sem_value > 0) {
    sem->__sem_value--;
    __pthread_unlock(&sem->__sem_lock);
    return 0;
  }
  /* Register extrication interface */
  THREAD_SETMEM(self, p_sem_avail, 0);
  __pthread_set_own_extricate_if(self, &extr);
  /* Enqueue only if not already cancelled. */
  if (!(THREAD_GETMEM(self, p_canceled)
      && THREAD_GETMEM(self, p_cancelstate) == PTHREAD_CANCEL_ENABLE))
    enqueue(&sem->__sem_waiting, self);
  else
    already_canceled = 1;
  __pthread_unlock(&sem->__sem_lock);

  if (already_canceled) {
    __pthread_set_own_extricate_if(self, 0);
    __pthread_do_exit(PTHREAD_CANCELED, CURRENT_STACK_FRAME);
  }

  /* Wait for sem_post or cancellation, or fall through if already canceled */
  spurious_wakeup_count = 0;
  while (1)
    {
      suspend(self);
      if (THREAD_GETMEM(self, p_sem_avail) == 0
	  && (THREAD_GETMEM(self, p_woken_by_cancel) == 0
	      || THREAD_GETMEM(self, p_cancelstate) != PTHREAD_CANCEL_ENABLE))
	{
	  /* Count resumes that don't belong to us. */
	  spurious_wakeup_count++;
	  continue;
	}
      break;
    }
  __pthread_set_own_extricate_if(self, 0);

  /* Terminate only if the wakeup came from cancellation. */
  /* Otherwise ignore cancellation because we got the semaphore. */

  if (THREAD_GETMEM(self, p_woken_by_cancel)
      && THREAD_GETMEM(self, p_cancelstate) == PTHREAD_CANCEL_ENABLE) {
    THREAD_SETMEM(self, p_woken_by_cancel, 0);
    __pthread_do_exit(PTHREAD_CANCELED, CURRENT_STACK_FRAME);
  }
  /* We got the semaphore */
  return 0;
}
Exemple #30
0
int set_thread_affinity(int cpuid) {
  unsigned long mask = 0xffffffff;
  unsigned int len = sizeof(mask);

#ifdef _WIN32
  HANDLE hThread;
#endif	
  
#ifdef _WIN32
  SET_MASK(cpuid)
  hThread = GetCurrentThread();
  if (SetThreadAffinityMask(hThread, mask) == 0) {
    return -1;
  }
#elif  CMK_HAS_PTHREAD_SETAFFINITY
#ifdef CPU_ALLOC
 if ( cpuid >= CPU_SETSIZE ) {
  cpu_set_t *cpusetp;
  pthread_t thread;
  size_t size;
  int num_cpus;
  num_cpus = cpuid + 1;
  cpusetp = CPU_ALLOC(num_cpus);
  if (cpusetp == NULL) {
    perror("set_thread_affinity CPU_ALLOC");
    return -1;
  }
  size = CPU_ALLOC_SIZE(num_cpus);
  thread = pthread_self();
  CPU_ZERO_S(size, cpusetp);
  CPU_SET_S(cpuid, size, cpusetp);
  if (errno = pthread_setaffinity_np(thread, size, cpusetp)) {
    perror("pthread_setaffinity dynamically allocated");
    CPU_FREE(cpusetp);
    return -1;
  }
  CPU_FREE(cpusetp);
 } else
#endif
 {
  int s, j;
  cpu_set_t cpuset;
  pthread_t thread;

  thread = pthread_self();

  CPU_ZERO(&cpuset);
  CPU_SET(cpuid, &cpuset);

  if (errno = pthread_setaffinity_np(thread, sizeof(cpu_set_t), &cpuset)) {
    perror("pthread_setaffinity");
    return -1;
  }
 }
#elif CMK_HAS_BINDPROCESSOR
  if (bindprocessor(BINDTHREAD, thread_self(), cpuid) != 0)
    return -1;
#else
  return set_cpu_affinity(cpuid);
#endif

  return 0;
}