Ejemplo n.º 1
0
int
Ndb_GetRUsage(ndb_rusage* dst)
{
  int res = -1;
#ifdef _WIN32
  FILETIME create_time;
  FILETIME exit_time;
  FILETIME kernel_time;
  FILETIME user_time;

  dst->ru_minflt = 0;
  dst->ru_majflt = 0;
  dst->ru_nvcsw = 0;
  dst->ru_nivcsw = 0;

  /**
   * GetThreadTimes times are updated once per timer interval, so can't
   * be used for microsecond measurements, but it is good enough for
   * keeping track of CPU usage on a second basis.
   */
  bool ret = GetThreadTimes( GetCurrentThread(),
                             &create_time,
                             &exit_time,
                             &kernel_time,
                             &user_time);
  if (ret)
  {
    /* Successful return */
    res = 0;

    Uint64 tmp = user_time.dwHighDateTime;
    tmp <<= 32;
    tmp += user_time.dwLowDateTime;
    /** 
     * Time reported in microseconds, Windows report it in
     * 100 ns intervals. So we need to divide by 10 the
     * Windows counter.
     */
    dst->ru_utime = tmp / 10;

    tmp = kernel_time.dwHighDateTime;
    tmp <<= 32;
    tmp += kernel_time.dwLowDateTime;
    dst->ru_stime = tmp / 10;
  }
  else
  {
    res = -1;
  }
#elif defined(HAVE_MAC_OS_X_THREAD_INFO)
  mach_port_t thread_port;
  kern_return_t ret_code;
  mach_msg_type_number_t basic_info_count;
  thread_basic_info_data_t basic_info;

  /**
   * mach_thread_self allocates memory so it needs to be
   * released immediately since we don't want to burden
   * the code with keeping track of this value.
   */
  thread_port = mach_thread_self();
  if (thread_port != MACH_PORT_NULL)
  {
    ret_code = thread_info(thread_port,
                           THREAD_BASIC_INFO,
                           (thread_info_t) &basic_info,
                           &basic_info_count);
  
    mach_port_deallocate(our_mach_task, thread_port);

    if (ret_code == KERN_SUCCESS)
    {
      dst->ru_minflt = 0;
      dst->ru_majflt = 0;
      dst->ru_nvcsw = 0;
      dst->ru_nivcsw = 0;

      Uint64 tmp;
      tmp = basic_info.user_time.seconds * 1000000;
      tmp += basic_info.user_time.microseconds;
      dst->ru_utime = tmp;

      tmp = basic_info.system_time.seconds * 1000000;
      tmp += basic_info.system_time.microseconds;
      dst->ru_stime = tmp;

      res = 0;
    }
    else
    {
      res = -1;
    }
  }
  else
  {
    res = -2; /* Report -2 to distinguish error cases for debugging. */
  }
#else
#ifdef HAVE_GETRUSAGE
  struct rusage tmp;
#ifdef RUSAGE_THREAD
  res = getrusage(RUSAGE_THREAD, &tmp);
#elif defined RUSAGE_LWP
  res = getrusage(RUSAGE_LWP, &tmp);
#endif

  if (res == 0)
  {
    dst->ru_utime = micros(tmp.ru_utime);
    dst->ru_stime = micros(tmp.ru_stime);
    dst->ru_minflt = tmp.ru_minflt;
    dst->ru_majflt = tmp.ru_majflt;
    dst->ru_nvcsw = tmp.ru_nvcsw;
    dst->ru_nivcsw = tmp.ru_nivcsw;
  }
#endif
#endif

  if (res != 0)
  {
    bzero(dst, sizeof(* dst));
  }
  return res;
}
Ejemplo n.º 2
0
int
main (int argc, char **argv, char **envp)
{
  mach_port_t boot;
  error_t err;
  mach_port_t pset, psetcntl;
  void *genport;
  process_t startup_port;
  struct argp argp = { 0, 0, 0, "Hurd process server" };

  argp_parse (&argp, argc, argv, 0, 0, 0);

  initialize_version_info ();

  err = task_get_bootstrap_port (mach_task_self (), &boot);
  assert_perror (err);
  if (boot == MACH_PORT_NULL)
    error (2, 0, "proc server can only be run by init during boot");

  proc_bucket = ports_create_bucket ();
  proc_class = ports_create_class (0, 0);
  generic_port_class = ports_create_class (0, 0);
  exc_class = ports_create_class (exc_clean, 0);
  ports_create_port (generic_port_class, proc_bucket,
		     sizeof (struct port_info), &genport);
  generic_port = ports_get_right (genport);

  /* Create the initial proc object for init (PID 1).  */
  startup_proc = create_startup_proc ();

  /* Create our own proc object (we are PID 0).  */
  self_proc = allocate_proc (mach_task_self ());
  assert (self_proc);

  complete_proc (self_proc, 0);

  startup_port = ports_get_send_right (startup_proc);
  err = startup_procinit (boot, startup_port, &startup_proc->p_task,
			  &authserver, &master_host_port, &master_device_port);
  assert_perror (err);
  mach_port_deallocate (mach_task_self (), startup_port);

  mach_port_mod_refs (mach_task_self (), authserver, MACH_PORT_RIGHT_SEND, 1);
  _hurd_port_set (&_hurd_ports[INIT_PORT_AUTH], authserver);
  mach_port_deallocate (mach_task_self (), boot);

  proc_death_notify (startup_proc);
  add_proc_to_hash (startup_proc); /* Now that we have the task port.  */

  /* Set our own argv and envp locations.  */
  self_proc->p_argv = (vm_address_t) argv;
  self_proc->p_envp = (vm_address_t) envp;

  /* Give ourselves good scheduling performance, because we are so
     important. */
  err = thread_get_assignment (mach_thread_self (), &pset);
  assert_perror (err);
  err = host_processor_set_priv (master_host_port, pset, &psetcntl);
  assert_perror (err);
  thread_max_priority (mach_thread_self (), psetcntl, 0);
  assert_perror (err);
  err = task_priority (mach_task_self (), 2, 1);
  assert_perror (err);

  mach_port_deallocate (mach_task_self (), pset);
  mach_port_deallocate (mach_task_self (), psetcntl);

  {
    /* Get our stderr set up to print on the console, in case we have
       to panic or something.  */
    mach_port_t cons;
    error_t err;
    err = device_open (master_device_port, D_READ|D_WRITE, "console", &cons);
    assert_perror (err);
    stdin = mach_open_devstream (cons, "r");
    stdout = stderr = mach_open_devstream (cons, "w");
    mach_port_deallocate (mach_task_self (), cons);
  }

  while (1)
    ports_manage_port_operations_multithread (proc_bucket,
					      message_demuxer,
					      0, 0, 0);
}
/*  wqthread note: The kernel may create or destroy pthreads in the 
    wqthread pool at any time with no userspace interaction, 
    and wqthread_start may be entered at any time with no userspace 
    interaction.
    To handle this in valgrind, we create and destroy a valgrind 
    thread for every work item.
*/
void wqthread_hijack(Addr self, Addr kport, Addr stackaddr, Addr workitem, 
                     Int reuse, Addr sp)
{
   ThreadState *tst;
   VexGuestAMD64State *vex;
   Addr stack;
   SizeT stacksize;
   vki_sigset_t blockall;

   /* When we enter here we hold no lock (!), so we better acquire it
      pronto.  Why do we hold no lock?  Because (presumably) the only
      way to get here is as a result of a SfMayBlock syscall
      "workq_ops(WQOPS_THREAD_RETURN)", which will have dropped the
      lock.  At least that's clear for the 'reuse' case.  The
      non-reuse case?  Dunno, perhaps it's a new thread the kernel
      pulled out of a hat.  In any case we still need to take a
      lock. */
   VG_(acquire_BigLock_LL)("wqthread_hijack");

   if (0) VG_(printf)(
             "wqthread_hijack: self %#lx, kport %#lx, "
	     "stackaddr %#lx, workitem %#lx, reuse/flags %x, sp %#lx\n", 
	     self, kport, stackaddr, workitem, reuse, sp);

   /* Start the thread with all signals blocked.  VG_(scheduler) will
      set the mask correctly when we finally get there. */
   VG_(sigfillset)(&blockall);
   VG_(sigprocmask)(VKI_SIG_SETMASK, &blockall, NULL);

   /* For 10.7 and earlier, |reuse| appeared to be used as a simple
      boolean.  In 10.8 and later its name changed to |flags| and has
      various other bits OR-d into it too, so it's necessary to fish
      out just the relevant parts.  Hence: */
#  if DARWIN_VERS <= DARWIN_10_7
   Bool is_reuse = reuse != 0;
#  elif DARWIN_VERS > DARWIN_10_7
   Bool is_reuse = (reuse & 0x20000 /* == WQ_FLAG_THREAD_REUSE */) != 0;
#  else
#    error "Unsupported Darwin version"
#  endif

   if (is_reuse) {

     /* For whatever reason, tst->os_state.pthread appear to have a
        constant offset of 96 on 10.7, but zero on 10.6 and 10.5.  No
        idea why. */
#      if DARWIN_VERS <= DARWIN_10_6
       UWord magic_delta = 0;
#      elif DARWIN_VERS == DARWIN_10_7 || DARWIN_VERS == DARWIN_10_8
       UWord magic_delta = 0x60;
#      elif DARWIN_VERS == DARWIN_10_9 || DARWIN_VERS == DARWIN_10_10
       UWord magic_delta = 0xE0;
#      elif DARWIN_VERS == DARWIN_10_11
       UWord magic_delta = 0x100;
#      else
#        error "magic_delta: to be computed on new OS version"
         // magic_delta = tst->os_state.pthread - self
#      endif

       // This thread already exists; we're merely re-entering 
       // after leaving via workq_ops(WQOPS_THREAD_RETURN). 
       // Don't allocate any V thread resources.
       // Do reset thread registers.
       ThreadId tid = VG_(lwpid_to_vgtid)(kport);
       vg_assert(VG_(is_valid_tid)(tid));
       vg_assert(mach_thread_self() == kport);

       tst = VG_(get_ThreadState)(tid);

       if (0) VG_(printf)("wqthread_hijack reuse %s: tid %d, tst %p, "
                          "tst->os_state.pthread %#lx, self %#lx\n",
                          tst->os_state.pthread == self ? "SAME" : "DIFF",
                          tid, tst, tst->os_state.pthread, self);

       vex = &tst->arch.vex;
       vg_assert(tst->os_state.pthread - magic_delta == self);
   }
   else {
       // This is a new thread.
       tst = VG_(get_ThreadState)(VG_(alloc_ThreadState)());        
       vex = &tst->arch.vex;
       allocstack(tst->tid);
       LibVEX_GuestAMD64_initialise(vex);
   }
       
   // Set thread's registers
   // Do this FIRST because some code below tries to collect a backtrace, 
   // which requires valid register data.
   vex->guest_RIP = wqthread_starter;
   vex->guest_RDI = self;
   vex->guest_RSI = kport;
   vex->guest_RDX = stackaddr;
   vex->guest_RCX = workitem;
   vex->guest_R8  = reuse;
   vex->guest_R9  = 0;
   vex->guest_RSP = sp;

   stacksize = 512*1024;  // wq stacks are always DEFAULT_STACK_SIZE
   stack = VG_PGROUNDUP(sp) - stacksize;

   if (is_reuse) {
      // Continue V's thread back in the scheduler. 
      // The client thread is of course in another location entirely.

      /* Drop the lock before going into
         ML_(wqthread_continue_NORETURN).  The latter will immediately
         attempt to reacquire it in non-LL mode, which is a bit
         wasteful but I don't think is harmful.  A better solution
         would be to not drop the lock but instead "upgrade" it from a
         LL lock to a full lock, but that's too much like hard work
         right now. */
      VG_(release_BigLock_LL)("wqthread_hijack(1)");
      ML_(wqthread_continue_NORETURN)(tst->tid);
   } 
   else {
      // Record thread's stack and Mach port and pthread struct
      tst->os_state.pthread = self;
      tst->os_state.lwpid = kport;
      record_named_port(tst->tid, kport, MACH_PORT_RIGHT_SEND, "wqthread-%p");
      
      // kernel allocated stack - needs mapping
      tst->client_stack_highest_byte = stack+stacksize-1;
      tst->client_stack_szB = stacksize;

      // GrP fixme scheduler lock?!
      
      // pthread structure
      ML_(notify_core_and_tool_of_mmap)(
            stack+stacksize, pthread_structsize, 
            VKI_PROT_READ|VKI_PROT_WRITE, VKI_MAP_PRIVATE, -1, 0);
      // stack contents
      // GrP fixme uninitialized!
      ML_(notify_core_and_tool_of_mmap)(
            stack, stacksize, 
            VKI_PROT_READ|VKI_PROT_WRITE, VKI_MAP_PRIVATE, -1, 0);
      // guard page
      // GrP fixme ban_mem_stack!
      ML_(notify_core_and_tool_of_mmap)(
            stack-VKI_PAGE_SIZE, VKI_PAGE_SIZE,
            0, VKI_MAP_PRIVATE, -1, 0);

      ML_(sync_mappings)("after", "wqthread_hijack", 0);

      // Go!
      /* Same comments as the 'release' in the then-clause.
         start_thread_NORETURN calls run_thread_NORETURN calls
         thread_wrapper which acquires the lock before continuing.
         Let's hope nothing non-thread-local happens until that point.

         DDD: I think this is plain wrong .. if we get to
         thread_wrapper not holding the lock, and someone has recycled
         this thread slot in the meantime, we're hosed.  Is that
         possible, though? */
      VG_(release_BigLock_LL)("wqthread_hijack(2)");
      call_on_new_stack_0_1(tst->os_state.valgrind_stack_init_SP, 0, 
                            start_thread_NORETURN, (Word)tst);
   }

   /*NOTREACHED*/
   vg_assert(0);
}
Ejemplo n.º 4
0
int
mono_sgen_thread_handshake (int signum)
{
	task_t task = current_task ();
	thread_port_t cur_thread = mach_thread_self ();
	thread_act_array_t thread_list;
	mach_msg_type_number_t num_threads;
	mach_msg_type_number_t num_state;
	thread_state_t state;
	kern_return_t ret;
	ucontext_t ctx;
	mcontext_t mctx;
	pthread_t exception_thread = mono_gc_get_mach_exception_thread ();

	SgenThreadInfo *info;
	gpointer regs [ARCH_NUM_REGS];
	gpointer stack_start;

	int count, i;

	mono_mach_get_threads (&thread_list, &num_threads);

	for (i = 0, count = 0; i < num_threads; i++) {
		thread_port_t t = thread_list [i];
		pthread_t pt = pthread_from_mach_thread_np (t);
		if (t != cur_thread && pt != exception_thread && !mono_sgen_is_worker_thread (pt)) {
			if (signum == suspend_signal_num) {
				ret = thread_suspend (t);
				if (ret != KERN_SUCCESS) {
					mach_port_deallocate (task, t);
					continue;
				}

				state = (thread_state_t) alloca (mono_mach_arch_get_thread_state_size ());
				ret = mono_mach_arch_get_thread_state (t, state, &num_state);
				if (ret != KERN_SUCCESS) {
					mach_port_deallocate (task, t);
					continue;
				}


				info = mono_sgen_thread_info_lookup (pt);

				/* Ensure that the runtime is aware of this thread */
				if (info != NULL) {
					mctx = (mcontext_t) alloca (mono_mach_arch_get_mcontext_size ());
					mono_mach_arch_thread_state_to_mcontext (state, mctx);
					ctx.uc_mcontext = mctx;

					info->stopped_domain = mono_mach_arch_get_tls_value_from_thread (t, mono_pthread_key_for_tls (mono_domain_get_tls_key ()));
					info->stopped_ip = (gpointer) mono_mach_arch_get_ip (state);
					stack_start = (char*) mono_mach_arch_get_sp (state) - REDZONE_SIZE;
					/* If stack_start is not within the limits, then don't set it in info and we will be restarted. */
					if (stack_start >= info->stack_start_limit && info->stack_start <= info->stack_end) {
						info->stack_start = stack_start;

						ARCH_COPY_SIGCTX_REGS (regs, &ctx);
						info->stopped_regs = regs;
					} else {
						g_assert (!info->stack_start);
					}

					/* Notify the JIT */
					if (mono_gc_get_gc_callbacks ()->thread_suspend_func)
						mono_gc_get_gc_callbacks ()->thread_suspend_func (info->runtime_data, &ctx);
				}
			} else {
				ret = thread_resume (t);
				if (ret != KERN_SUCCESS) {
					mach_port_deallocate (task, t);
					continue;
				}
			}
			count ++;

			mach_port_deallocate (task, t);
		}
	}

	mach_port_deallocate (task, cur_thread);

	return count;
}
Ejemplo n.º 5
0
Archivo: sets.c Proyecto: TalAloni/xnu
/*
 * This is the central function for every thread.
 * For each invocation, its role is ets by (a pointer to) a stage_info_t.
 */
void *
manager_fn(void *arg)
{
	stage_info_t			*sp = (stage_info_t *) arg;
	line_info_t			*lp = sp->set;
	kern_return_t			ret;
	long				iteration = 0;

	/*
	 * If we're using affinity sets (we are by default)
	 * set our tag to by our thread set number.
	 */
#ifdef AVAILABLE_MAC_OS_X_VERSION_10_5_AND_LATER
	thread_extended_policy_data_t	epolicy;
	thread_affinity_policy_data_t	policy;

	epolicy.timeshare = FALSE;
	ret = thread_policy_set(
			mach_thread_self(), THREAD_EXTENDED_POLICY,
			(thread_policy_t) &epolicy,
			THREAD_EXTENDED_POLICY_COUNT);
	if (ret != KERN_SUCCESS)
		printf("thread_policy_set(THREAD_EXTENDED_POLICY) returned %d\n", ret);
	
	if (affinity) {
		policy.affinity_tag = lp->setnum;
		ret = thread_policy_set(
				mach_thread_self(), THREAD_AFFINITY_POLICY,
				(thread_policy_t) &policy,
				THREAD_AFFINITY_POLICY_COUNT);
		if (ret != KERN_SUCCESS)
			printf("thread_policy_set(THREAD_AFFINITY_POLICY) returned %d\n", ret);
	}
#endif

	DBG("Starting %s set: %d stage: %d\n", sp->name, lp->setnum, sp->stagenum);

	/*
	 * Start barrier.
	 * The tets thread to get here releases everyone and starts the timer.
	 */
	pthread_mutex_lock(&funnel);
	threads_ready++;
	if (threads_ready == threads) {
		pthread_mutex_unlock(&funnel);
		if (halting) {
			printf("  all threads ready for process %d, "
				"hit any key to start", getpid());
			fflush(stdout);
			(void) getchar();
		}
		pthread_cond_broadcast(&barrier);
		timer = mach_absolute_time();
	} else {
		pthread_cond_wait(&barrier, &funnel);
		pthread_mutex_unlock(&funnel);
	}

	do {
		int		i;
		work_t		*workp;

		/*
		 * Get a buffer from the input queue.
		 * Block if none.
		 */
		pthread_mutex_lock(&sp->input->mtx);
		while (1) {
			workp = TAILQ_FIRST(&(sp->input->queue));
			if (workp != NULL)
				break;
			DBG("    %s[%d,%d] iteration %d waiting for buffer\n",
				sp->name, lp->setnum, sp->stagenum, iteration);
			sp->input->waiters = TRUE;
			pthread_cond_wait(&sp->input->cnd, &sp->input->mtx);
			sp->input->waiters = FALSE;
		}
		TAILQ_REMOVE(&(sp->input->queue), workp, link);
		pthread_mutex_unlock(&sp->input->mtx);

		DBG("  %s[%d,%d] iteration %d work %p data %p\n",
			sp->name, lp->setnum, sp->stagenum, iteration, workp, workp->data);

		/* Do our stuff with the buffer */
		(void) sp->fn(workp->data, lp->isize);

		/*
		 * Place the buffer on the input queue.
		 * Signal  waiters if required.
		 */
		pthread_mutex_lock(&sp->output->mtx);
		TAILQ_INSERT_TAIL(&(sp->output->queue), workp, link);
		if (sp->output->waiters) {
			DBG("    %s[%d,%d] iteration %d signaling work\n",
				sp->name, lp->setnum, sp->stagenum, iteration);
			pthread_cond_signal(&sp->output->cnd);
		}
		pthread_mutex_unlock(&sp->output->mtx);
	} while (++iteration < iterations);

	DBG("Ending %s[%d,%d]\n", sp->name, lp->setnum, sp->stagenum);

	return (void *) iteration;
}
Ejemplo n.º 6
0
/* this initializes the subsystem (sets the exception port, starts the
   exception handling thread, etc) */
static void macosx_init_exception_handler() 
{
  mach_port_t thread_self, exc_port_s;
  mach_msg_type_name_t type;
  kern_return_t retval;

  /* get ids for ourself */
  if(!task_self) task_self = mach_task_self();
  thread_self = mach_thread_self();

  /* allocate the port we're going to get exceptions on */
  retval = mach_port_allocate(task_self, MACH_PORT_RIGHT_RECEIVE, &exc_port);
  if(retval != KERN_SUCCESS) {
    GCPRINT(GCOUTF, "Couldn't allocate exception port: %s\n", 
	   mach_error_string(retval));
    abort();
  }

  /* extract out the send rights for that port, which the OS needs */
  retval = mach_port_extract_right(task_self, exc_port, MACH_MSG_TYPE_MAKE_SEND,
				   &exc_port_s, &type);
  if(retval != KERN_SUCCESS) {
    GCPRINT(GCOUTF, "Couldn't extract send rights: %s\n", mach_error_string(retval));
    abort();
  }

  /* set the exception ports for this thread to the above */
  retval = thread_set_exception_ports(thread_self, EXC_MASK_BAD_ACCESS, 
				      exc_port_s, EXCEPTION_DEFAULT, 
				      ARCH_THREAD_STATE);
  if(retval != KERN_SUCCESS) {
    GCPRINT(GCOUTF, "Couldn't set exception ports: %s\n", mach_error_string(retval));
    abort();
  }

#ifdef PPC_HAND_ROLLED_THREAD 
  /* Old hand-rolled thread creation. pthread_create is fine for our
     purposes. */
 {
   /* set up the subthread */
   mach_port_t exc_thread;
   ARCH_thread_state_t *exc_thread_state;
   void *subthread_stack;

   retval = thread_create(task_self, &exc_thread);
   if(retval != KERN_SUCCESS) {
     GCPRINT(GCOUTF, "Couldn't create exception thread: %s\n", mach_error_string(retval));
     abort();
   }
   subthread_stack = (void*)malloc(page_size);
   subthread_stack += (page_size - C_ARGSAVE_LEN - C_RED_ZONE);
   exc_thread_state = (ARCH_thread_state_t*)malloc(sizeof(ARCH_thread_state_t));
   exc_thread_state->srr0 = (unsigned int)exception_thread;
   exc_thread_state->r1 = (unsigned int)subthread_stack;
   retval = thread_set_state(exc_thread, ARCH_THREAD_STATE,
			     (thread_state_t)exc_thread_state,
			     ARCH_THREAD_STATE_COUNT);
   if(retval != KERN_SUCCESS) {
     GCPRINT(GCOUTF, "Couldn't set subthread state: %s\n", mach_error_string(retval));
     abort();
   }
   retval = thread_resume(exc_thread);
   if(retval != KERN_SUCCESS) {
     GCPRINT(GCOUTF, "Couldn't resume subthread: %s\n", mach_error_string(retval));
     abort();
   }
 }
#else
 {
   pthread_t th;
   pthread_create(&th, NULL, (void *(*)(void *))exception_thread, NULL);
 }
#endif
}
Ejemplo n.º 7
0
void runtime_terminate() {
  thread_terminate(mach_thread_self());
}
Ejemplo n.º 8
0
int
parallel_set_affinity(int cpu)
{
#if defined(HAVE_PTHREAD_H) && defined(CPU_ZERO)

    /* Linux and BSD pthread affinity */

#ifdef HAVE_CPU_SET_T
    cpu_set_t set_of_cpus; /* bsd */
#else
    cpuset_t set_of_cpus; /* linux */
#endif
    int ret;
    int ncpus = bu_avail_cpus();

    CPU_ZERO(&set_of_cpus);

    /* Set affinity to a single CPU core */
    CPU_SET(cpu % ncpus, &set_of_cpus);
    ret = pthread_setaffinity_np(pthread_self(), sizeof(set_of_cpus), &set_of_cpus);

    return ret;

#elif defined(HAVE_MACH_THREAD_POLICY_H)

    /* Mac OS X mach thread affinity hinting.  Mach implements a CPU
     * affinity policy by default so this just sets up an additional
     * hint on how threads can be grouped/ungrouped.  Here we set all
     * threads up into their own group so threads will get their own
     * cpu and hopefully be kept in place by Mach from there.
     */

    thread_extended_policy_data_t epolicy;
    thread_affinity_policy_data_t apolicy;
    thread_t curr_thread = mach_thread_self();
    kern_return_t ret;

    /* discourage interrupting this thread */
    epolicy.timeshare = FALSE;
    ret = thread_policy_set(curr_thread, THREAD_EXTENDED_POLICY, (thread_policy_t) &epolicy, THREAD_EXTENDED_POLICY_COUNT);
    if (ret != KERN_SUCCESS)
	return -1;

    /* put each thread into a separate group */
    apolicy.affinity_tag = cpu % bu_avail_cpus();
    ret = thread_policy_set(curr_thread, THREAD_EXTENDED_POLICY, (thread_policy_t) &apolicy, THREAD_EXTENDED_POLICY_COUNT);
    if (ret != KERN_SUCCESS)
	return -1;

    return 0;

#elif defined(HAVE_WINDOWS_H)

    BOOL ret = SetThreadAffinityMask(GetCurrentThread(), 1ul << cpu % bu_avail_cpus());
    if (ret  == 0)
	return -1;

    return 0;

#else

    /* don't know how to set thread affinity on this platform */
    cpu = 0;	/* do something with cpu to avoid an unused parameter warning */
    return cpu;

#endif
}
Ejemplo n.º 9
0
PASTIX_INT sopalin_bindthread(PASTIX_INT cpu)
{
#ifdef MARCEL

  {
    marcel_vpset_t vpset = MARCEL_VPSET_ZERO;
    marcel_vpset_vp(&vpset, cpu);
    marcel_apply_vpset(&vpset);
  }

#else /* Dans les autres cas on se preoccupe de l'archi */

#ifdef WITH_HWLOC
  {
    hwloc_topology_t topology; /* Topology object */
    hwloc_obj_t      obj;      /* Hwloc object    */
    hwloc_cpuset_t   cpuset;   /* HwLoc cpuset    */

    /* Allocate and initialize topology object.  */
    hwloc_topology_init(&topology);

    /* Perform the topology detection.  */
    hwloc_topology_load(topology);

    /* Get last one.  */
    obj = hwloc_get_obj_by_type(topology, HWLOC_OBJ_CORE, cpu);
    if (!obj)
      return 0;

    /* Get a copy of its cpuset that we may modify.  */
    /* Get only one logical processor (in case the core is SMT/hyperthreaded).  */
#if !defined(HWLOC_BITMAP_H)
    cpuset = hwloc_cpuset_dup(obj->cpuset);
    hwloc_cpuset_singlify(cpuset);
#else
    cpuset = hwloc_bitmap_dup(obj->cpuset);
    hwloc_bitmap_singlify(cpuset);
#endif

    /* And try to bind ourself there.  */
    if (hwloc_set_cpubind(topology, cpuset, HWLOC_CPUBIND_THREAD)) {
      char *str = NULL;
#if !defined(HWLOC_BITMAP_H)
      hwloc_cpuset_asprintf(&str, obj->cpuset);
#else
      hwloc_bitmap_asprintf(&str, obj->cpuset);
#endif
      printf("Couldn't bind to cpuset %s\n", str);
      free(str);
    }

    /* Get the number at Proc level */
    cpu = obj->children[0]->os_index;

    /* Free our cpuset copy */
#if !defined(HWLOC_BITMAP_H)
    hwloc_cpuset_free(cpuset);
#else
    hwloc_bitmap_free(cpuset);
#endif

    /* Destroy topology object.  */
    hwloc_topology_destroy(topology);
  }
#else /* WITH_HWLOC */
#ifdef X_ARCHpower_ibm_aix
  {
    tid_t self_ktid = thread_self ();

    bindprocessor(BINDTHREAD, self_ktid, cpu);
  }
#elif (defined X_ARCHalpha_compaq_osf1)
  {
    bind_to_cpu_id(getpid(), cpu, 0);
  }
#elif (defined X_ARCHi686_pc_linux)

#ifndef X_ARCHi686_mac
  {
    cpu_set_t mask;
    CPU_ZERO(&mask);
    CPU_SET(cpu, &mask);

#ifdef HAVE_OLD_SCHED_SETAFFINITY
    if(sched_setaffinity(0,&mask) < 0)
#else /* HAVE_OLD_SCHED_SETAFFINITY */
    if(sched_setaffinity(0,sizeof(mask),&mask) < 0)
#endif /* HAVE_OLD_SCHED_SETAFFINITY */
      {
  perror("sched_setaffinity");
  EXIT(MOD_SOPALIN, INTERNAL_ERR);
      }
  }
#else /* X_ARCHi686_mac */
  {
    thread_affinity_policy_data_t ap;
    int                           ret;

    ap.affinity_tag = 1; /* non-null affinity tag */
    ret = thread_policy_set(
          mach_thread_self(),
          THREAD_AFFINITY_POLICY,
          (integer_t*) &ap,
          THREAD_AFFINITY_POLICY_COUNT
          );
    if(ret != 0)
      {
  perror("thread_policy_set");
  EXIT(MOD_SOPALIN, INTERNAL_ERR);
      }
  }
#endif /* X_ARCHi686_mac */
#endif /* X_ACHIxxx      */
#endif /* WITH_HWLOC     */
#endif /* MARCEL         */

  return cpu;
}
Ejemplo n.º 10
0
/** PsychRealtimePriority: Temporarily boost priority to THREAD_TIME_CONSTRAINT_POLICY.
    PsychRealtimePriority(true) enables realtime-scheduling (like Priority(9) would do in Matlab).
    PsychRealtimePriority(false) restores scheduling to the state before last invocation of PsychRealtimePriority(true),
    it undos whatever the previous switch did.

    We switch to RT scheduling during PsychGetMonitorRefreshInterval() and a few other timing tests in
    PsychOpenWindow() to reduce measurement jitter caused by possible interference of other tasks.
*/
psych_bool PsychRealtimePriority(psych_bool enable_realtime)
{
    psych_bool				isError;
    thread_policy_flavor_t	flavorConstant;
    int						kernError;
    task_t					threadID;
    thread_policy_t			threadPolicy;
    static thread_policy_t	old_threadPolicy;
    mach_msg_type_number_t	policyCount, policyCountFilled;
    static mach_msg_type_number_t	old_policyCountFilled;
    boolean_t				isDefault;
    
    static psych_bool old_enable_realtime = FALSE;
    static psych_bool oldModeWasStandard = FALSE;
    
    if (old_enable_realtime == enable_realtime) {
        // No transition with respect to previous state -> Nothing to do.
        return(TRUE);
    }
    
    // Transition requested:
    old_enable_realtime = enable_realtime;
    
    // Determine our threadID:
    threadID = mach_thread_self();
    
    if (enable_realtime) {
        // Transition to realtime requested:

        // Get current scheduling policy and its settings and back it up for later restore:
        old_threadPolicy = (thread_policy_t) malloc(sizeof(thread_time_constraint_policy_data_t));
        policyCount = THREAD_TIME_CONSTRAINT_POLICY_COUNT;
        old_policyCountFilled = policyCount;
        isDefault = FALSE;
        // We check if STANDARD_POLICY is active and query its settings, if so...
        kernError = thread_policy_get(threadID, THREAD_STANDARD_POLICY, old_threadPolicy, &old_policyCountFilled, &isDefault);
        if (kernError) {
            // Failed!
            old_enable_realtime = FALSE;
            free(old_threadPolicy);
			printf("PsychRealtimePriority: ERROR! COULDN'T QUERY CURRENT SCHEDULING SETTINGS!!!\n");
            return(FALSE);
        }
        
        // oldModeWasStandard == TRUE --> We need to revert to STANDARD POLICY later...
        oldModeWasStandard = !isDefault;

        // printf("PRE-RT: CURRENTLY IN %s mode\n", oldModeWasStandard ? "STANDARD" : "REALTIME");

		if (!oldModeWasStandard) {
			// We are already RT scheduled. Backup settings for later switch-back:
			policyCount = THREAD_TIME_CONSTRAINT_POLICY_COUNT;
			old_policyCountFilled = policyCount;
			isDefault = FALSE;
			// We check if STANDARD_POLICY is active and query its settings, if so...
			kernError = thread_policy_get(threadID, THREAD_TIME_CONSTRAINT_POLICY, old_threadPolicy, &old_policyCountFilled, &isDefault);
			if (kernError) {
				// Failed!
				old_enable_realtime = FALSE;
				free(old_threadPolicy);
				printf("PsychRealtimePriority: ERROR! COULDN'T QUERY CURRENT RT SCHEDULING SETTINGS!!!\n");
				return(FALSE);
			}
		}

		// Switch to our ultra-high priority realtime mode: Guaranteed up to 3 msecs of uninterrupted
		// runtime as soon as we want to run: Perfect for swap completion timestamping in refresh rate
		// calibration - our only use-case:
		PsychSetThreadPriority(NULL, 10, 2);
    }
    else {
        // Transition from RT to Non-RT scheduling requested: We just reestablish the backed-up old
        // policy:
		kernError = thread_policy_set(threadID, (oldModeWasStandard) ? THREAD_STANDARD_POLICY : THREAD_TIME_CONSTRAINT_POLICY, old_threadPolicy, old_policyCountFilled);
		if (kernError) {
			// Failed!
			old_enable_realtime = TRUE;
			free((void*) old_threadPolicy);
			
			printf("PsychRealtimePriority: ERROR! COULDN'T SWITCH BACK TO NON-RT SCHEDULING!!!\n");
			fflush(NULL);
			return(FALSE);
		}
        
        // Successfully switchted to RT-Scheduling:
        free((void*) old_threadPolicy);
    }

    // Success.
    return(TRUE);
}
Ejemplo n.º 11
0
long do_mach_syscall(void *cpu_env, int num, uint32_t arg1, uint32_t arg2, uint32_t arg3,
                uint32_t arg4, uint32_t arg5, uint32_t arg6, uint32_t arg7,
                uint32_t arg8)
{
    extern uint32_t mach_reply_port();

    long ret = 0;

    arg1 = tswap32(arg1);
    arg2 = tswap32(arg2);
    arg3 = tswap32(arg3);
    arg4 = tswap32(arg4);
    arg5 = tswap32(arg5);
    arg6 = tswap32(arg6);
    arg7 = tswap32(arg7);
    arg8 = tswap32(arg8);

    DPRINTF("mach syscall %d : " , num);

    switch(num) {
    /* see xnu/osfmk/mach/syscall_sw.h */
    case -26:
        DPRINTF("mach_reply_port()\n");
        ret = mach_reply_port();
        break;
    case -27:
        DPRINTF("mach_thread_self()\n");
        ret = mach_thread_self();
        break;
    case -28:
        DPRINTF("mach_task_self()\n");
        ret = mach_task_self();
        break;
    case -29:
        DPRINTF("mach_host_self()\n");
        ret = mach_host_self();
        break;
    case -31:
        DPRINTF("mach_msg_trap(0x%x, 0x%x, 0x%x, 0x%x, 0x%x, 0x%x, 0x%x)\n",
                arg1, arg2, arg3, arg4, arg5, arg6, arg7);
        ret = target_mach_msg_trap((mach_msg_header_t *)arg1, arg2, arg3, arg4, arg5, arg6, arg7);
        break;
/* may need more translation if target arch is different from host */
#if (defined(TARGET_I386) && defined(__i386__)) || (defined(TARGET_PPC) && defined(__ppc__))
    case -33:
        DPRINTF("semaphore_signal_trap(0x%x)\n", arg1);
        ret = semaphore_signal_trap(arg1);
        break;
    case -34:
        DPRINTF("semaphore_signal_all_trap(0x%x)\n", arg1);
        ret = semaphore_signal_all_trap(arg1);
        break;
    case -35:
        DPRINTF("semaphore_signal_thread_trap(0x%x)\n", arg1, arg2);
        ret = semaphore_signal_thread_trap(arg1,arg2);
        break;
#endif
    case -36:
        DPRINTF("semaphore_wait_trap(0x%x)\n", arg1);
        extern int semaphore_wait_trap(int); // XXX: is there any header for that?
        ret = semaphore_wait_trap(arg1);
        break;
/* may need more translation if target arch is different from host */
#if (defined(TARGET_I386) && defined(__i386__)) || (defined(TARGET_PPC) && defined(__ppc__))
    case -37:
        DPRINTF("semaphore_wait_signal_trap(0x%x, 0x%x)\n", arg1, arg2);
        ret = semaphore_wait_signal_trap(arg1,arg2);
        break;
#endif
    case -43:
        DPRINTF("map_fd(0x%x, 0x%x, 0x%x, 0x%x, 0x%x)\n",
                arg1, arg2, arg3, arg4, arg5);
        ret = map_fd(arg1, arg2, (void*)arg3, arg4, arg5);
        tswap32s((uint32_t*)arg3);
        break;
/* may need more translation if target arch is different from host */
#if (defined(TARGET_I386) && defined(__i386__)) || (defined(TARGET_PPC) && defined(__ppc__))
    case -61:
        DPRINTF("syscall_thread_switch(0x%x, 0x%x, 0x%x)\n",
                arg1, arg2, arg3);
        ret = syscall_thread_switch(arg1, arg2, arg3);  // just a hint to the scheduler; can drop?
        break;
#endif
    case -89:
        DPRINTF("mach_timebase_info(0x%x)\n", arg1);
        struct mach_timebase_info info;
        ret = mach_timebase_info(&info);
        if(!is_error(ret))
        {
            struct mach_timebase_info *outInfo = (void*)arg1;
            outInfo->numer = tswap32(info.numer);
            outInfo->denom = tswap32(info.denom);
        }
        break;
    case -90:
        DPRINTF("mach_wait_until()\n");
        extern int mach_wait_until(uint64_t); // XXX: is there any header for that?
        ret = mach_wait_until(((uint64_t)arg2<<32) | (uint64_t)arg1);
        break;
    case -91:
        DPRINTF("mk_timer_create()\n");
        extern int mk_timer_create(); // XXX: is there any header for that?
        ret = mk_timer_create();
        break;
    case -92:
        DPRINTF("mk_timer_destroy()\n");
        extern int mk_timer_destroy(int); // XXX: is there any header for that?
        ret = mk_timer_destroy(arg1);
        break;
    case -93:
        DPRINTF("mk_timer_create()\n");
        extern int mk_timer_arm(int, uint64_t); // XXX: is there any header for that?
        ret = mk_timer_arm(arg1, ((uint64_t)arg3<<32) | (uint64_t)arg2);
        break;
    case -94:
        DPRINTF("mk_timer_cancel()\n");
        extern int mk_timer_cancel(int, uint64_t *); // XXX: is there any header for that?
        ret = mk_timer_cancel(arg1, (uint64_t *)arg2);
        if((!is_error(ret)) && arg2)
            tswap64s((uint64_t *)arg2);
        break;
    default:
        gemu_log("qemu: Unsupported mach syscall: %d(0x%x)\n", num, num);
        gdb_handlesig (cpu_env, SIGTRAP);
        exit(0);
        break;
    }
    return ret;
}
Ejemplo n.º 12
0
void *mach_profile_listener(void *arg)
{
    (void)arg;
    int i;
    const int max_size = 512;
    attach_exception_port(mach_thread_self());
#ifdef LIBOSXUNWIND
    mach_profiler_thread = mach_thread_self();
#endif
    mig_reply_error_t *bufRequest = (mig_reply_error_t *) malloc(max_size);
    while (1) {
        kern_return_t ret = mach_msg(&bufRequest->Head, MACH_RCV_MSG,
                                     0, max_size, profile_port,
                                     MACH_MSG_TIMEOUT_NONE, MACH_PORT_NULL);
        HANDLE_MACH_ERROR("mach_msg", ret);
        // sample each thread, round-robin style in reverse order
        // (so that thread zero gets notified last)
        for (i = jl_n_threads; i-- > 0; ) {
            // if there is no space left, break early
            if (bt_size_cur >= bt_size_max - 1)
                break;

            unw_context_t *uc;
            jl_thread_suspend_and_get_state(i, &uc, -1);

#ifdef LIBOSXUNWIND
            /*
             *  Unfortunately compact unwind info is incorrectly generated for quite a number of
             *  libraries by quite a large number of compilers. We can fall back to DWARF unwind info
             *  in some cases, but in quite a number of cases (especially libraries not compiled in debug
             *  mode, only the compact unwind info may be available). Even more unfortunately, there is no
             *  way to detect such bogus compact unwind info (other than noticing the resulting segfault).
             *  What we do here is ugly, but necessary until the compact unwind info situation improves.
             *  We try to use the compact unwind info and if that results in a segfault, we retry with DWARF info.
             *  Note that in a small number of cases this may result in bogus stack traces, but at least the topmost
             *  entry will always be correct, and the number of cases in which this is an issue is rather small.
             *  Other than that, this implementation is not incorrect as the other thread is paused while we are profiling
             *  and during stack unwinding we only ever read memory, but never write it.
             */

            forceDwarf = 0;
            unw_getcontext(&profiler_uc); // will resume from this point if the next lines segfault at any point

            if (forceDwarf == 0) {
                // Save the backtrace
                bt_size_cur += rec_backtrace_ctx((ptrint_t*)bt_data_prof + bt_size_cur, bt_size_max - bt_size_cur - 1, uc);
            }
            else if (forceDwarf == 1) {
                bt_size_cur += rec_backtrace_ctx_dwarf((ptrint_t*)bt_data_prof + bt_size_cur, bt_size_max - bt_size_cur - 1, uc);
            }
            else if (forceDwarf == -1) {
                jl_safe_printf("WARNING: profiler attempt to access an invalid memory location\n");
            }

            forceDwarf = -2;
#else
            bt_size_cur += rec_backtrace_ctx((ptrint_t*)bt_data_prof + bt_size_cur, bt_size_max - bt_size_cur - 1, uc);
#endif

            // Mark the end of this block with 0
            bt_data_prof[bt_size_cur++] = 0;

            // We're done! Resume the thread.
            jl_thread_resume(i, 0);

            if (running) {
                // Reset the alarm
                kern_return_t ret = clock_alarm(clk, TIME_RELATIVE, timerprof, profile_port);
                HANDLE_MACH_ERROR("clock_alarm", ret)
            }
        }
    }
}
Ejemplo n.º 13
0
/*
 * This is the central function for every thread.
 * For each invocation, its role is ets by (a pointer to) a stage_info_t.
 */
void *
manager_fn(void *arg)
{
	worker_info_t	*wp = (worker_info_t *) arg;
	stage_info_t	*sp = wp->stage;
	boolean_t	is_producer = (sp->stagenum == 0);
	long		iteration = 0;
	int		current_tag = 0;

	kern_return_t			ret;
	thread_extended_policy_data_t	epolicy;
	epolicy.timeshare = FALSE;
	ret = thread_policy_set(
			mach_thread_self(), THREAD_EXTENDED_POLICY,
			(thread_policy_t) &epolicy,
			THREAD_EXTENDED_POLICY_COUNT);
	if (ret != KERN_SUCCESS)
		printf("thread_policy_set(THREAD_EXTENDED_POLICY) returned %d\n", ret);
	
	/*
	 * If we're using affinity sets and we're a producer
	 * set our tag to by our thread set number.
	 */
	if (affinity && is_producer) {
		affinity_set(wp->setnum);
		current_tag = wp->setnum;
	}

	DBG("Starting %s %d, stage: %d\n", sp->name, wp->setnum, sp->stagenum);

	/*
	 * Start barrier.
	 * The tets thread to get here releases everyone and starts the timer.
	 */
	pthread_mutex_lock(&funnel);
	threads_ready++;
	if (threads_ready == threads) {
		pthread_mutex_unlock(&funnel);
		if (halting) {
			printf("  all threads ready for process %d, "
				"hit any key to start", getpid());
			fflush(stdout);
			(void) getchar();
		}
		pthread_cond_broadcast(&barrier);
		timer = mach_absolute_time();
	} else {
		pthread_cond_wait(&barrier, &funnel);
		pthread_mutex_unlock(&funnel);
	}

	do {
		work_t		*workp;

		/*
		 * Get a buffer from the input queue.
		 * Block if none.
		 * Quit if all work done.
		 */
		pthread_mutex_lock(&sp->input->mtx);
		while (1) {
			if (sp->work_todo == 0) {
				pthread_mutex_unlock(&sp->input->mtx);
				goto out;
			}
			workp = TAILQ_FIRST(&(sp->input->queue));
			if (workp != NULL)
				break;
			DBG("    %s[%d,%d] todo %d waiting for buffer\n",
				sp->name, wp->setnum, sp->stagenum, sp->work_todo);
			sp->input->waiters++;
			pthread_cond_wait(&sp->input->cnd, &sp->input->mtx);
			sp->input->waiters--;
		}
		TAILQ_REMOVE(&(sp->input->queue), workp, link);
		iteration = sp->work_todo--;
		pthread_mutex_unlock(&sp->input->mtx);

		if (is_producer) {
			workp->number = iteration;
			workp->tag = wp->setnum;
		} else {
			if (affinity && current_tag != workp->tag) {
				affinity_set(workp->tag);
				current_tag = workp->tag;
			}
		}

		DBG("  %s[%d,%d] todo %d work %p data %p\n",
			sp->name, wp->setnum, sp->stagenum, iteration, workp, workp->data);

		/* Do our stuff with the buffer */
		(void) sp->fn(workp->data, workp->isize);

		/*
		 * Place the buffer on the input queue of the next stage.
		 * Signal waiters if required.
		 */
		pthread_mutex_lock(&sp->output->mtx);
		TAILQ_INSERT_TAIL(&(sp->output->queue), workp, link);
		if (sp->output->waiters) {
			DBG("    %s[%d,%d] todo %d signaling work\n",
				sp->name, wp->setnum, sp->stagenum, iteration);
			pthread_cond_signal(&sp->output->cnd);
		}
		pthread_mutex_unlock(&sp->output->mtx);

	} while (1);

out:
	pthread_cond_broadcast(&sp->output->cnd);

	DBG("Ending %s[%d,%d]\n", sp->name, wp->setnum, sp->stagenum);

	return (void *) iteration;
}
Ejemplo n.º 14
0
void
mono_threads_platform_register (MonoThreadInfo *info)
{
	info->native_handle = mach_thread_self ();
	mono_threads_install_dead_letter ();
}
Ejemplo n.º 15
0
uint64_t current_thread() {
    uint64_t thread_port = find_port(mach_thread_self());
    return rk64(thread_port + koffset(KSTRUCT_OFFSET_IPC_PORT_IP_KOBJECT));
}
Ejemplo n.º 16
0
void *mach_profile_listener(void *arg)
{
    (void)arg;
    int max_size = 512;
    mach_profiler_thread = mach_thread_self();
    mig_reply_error_t *bufRequest = (mig_reply_error_t *) malloc(max_size);
    while (1) {
        kern_return_t ret = mach_msg(&bufRequest->Head, MACH_RCV_MSG,
                                     0, max_size, profile_port,
                                     MACH_MSG_TIMEOUT_NONE, MACH_PORT_NULL);
        HANDLE_MACH_ERROR("mach_msg",ret);
        if (bt_size_cur < bt_size_max) {
            kern_return_t ret;
            // Suspend the thread so we may safely sample it
            ret = thread_suspend(main_thread);
            HANDLE_MACH_ERROR("thread_suspend",ret);

            // Do the actual sampling
            unsigned int count = MACHINE_THREAD_STATE_COUNT;
            x86_thread_state64_t state;

            // Get the state of the suspended thread
            ret = thread_get_state(main_thread,x86_THREAD_STATE64,(thread_state_t)&state,&count);
            HANDLE_MACH_ERROR("thread_get_state",ret);

            // Initialize the unwind context with the suspend thread's state
            unw_context_t uc;
            memset(&uc,0,sizeof(unw_context_t));
            memcpy(&uc,&state,sizeof(x86_thread_state64_t));

            /*
             *  Unfortunately compact unwind info is incorrectly generated for quite a number of
             *  libraries by quite a large number of compilers. We can fall back to DWARF unwind info
             *  in some cases, but in quite a number of cases (especially libraries not compiled in debug
             *  mode, only the compact unwind info may be available). Even more unfortunately, there is no
             *  way to detect such bogus compact unwind info (other than noticing the resulting segfault).
             *  What we do here is ugly, but necessary until the compact unwind info situation improves.
             *  We try to use the compact unwind info and if that results in a segfault, we retry with DWARF info.
             *  Note that in a small number of cases this may result in bogus stack traces, but at least the topmost
             *  entry will always be correct, and the number of cases in which this is an issue is rather small.
             *  Other than that, this implementation is not incorrect as the other thread is paused while we are profiling
             *  and during stack unwinding we only ever read memory, but never write it.
             */

            forceDwarf = 0;
            unw_getcontext(&profiler_uc);

            if (forceDwarf == 0) {
                // Save the backtrace
                bt_size_cur += rec_backtrace_ctx((ptrint_t*)bt_data_prof+bt_size_cur, bt_size_max-bt_size_cur-1, &uc);
            }
            else if (forceDwarf == 1) {
                bt_size_cur += rec_backtrace_ctx_dwarf((ptrint_t*)bt_data_prof+bt_size_cur, bt_size_max-bt_size_cur-1, &uc);
            }
            else if (forceDwarf == -1) {
                JL_PRINTF(JL_STDERR, "Warning: Profiler attempt to access an invalid memory location\n");
            }

            forceDwarf = -2;

            // Mark the end of this block with 0
            bt_data_prof[bt_size_cur] = 0;
            bt_size_cur++;

            // We're done! Resume the thread.
            ret = thread_resume(main_thread);
            HANDLE_MACH_ERROR("thread_resume",ret)

            if (running) {
                // Reset the alarm
                ret = clock_alarm(clk, TIME_RELATIVE, timerprof, profile_port);
                HANDLE_MACH_ERROR("clock_alarm",ret)
            }
        }
    }
Ejemplo n.º 17
0
void mexFunction(int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[])
{
    bool						isError;
    thread_policy_flavor_t		flavorConstant;
    int							kernError;
    task_t						threadID;
    thread_policy_t				threadPolicy;
    mach_msg_type_number_t		policyCount, policyCountFilled;
    boolean_t					isDefault;
    char						commandString[COMMAND_STRING_LENGTH];
    
    
    threadID= mach_thread_self();
    kernError=0;
    
    //get the policy flavor constant specified by the user and the getDefault argument
    if(nrhs<1)
        mexErrMsgTxt("MachGetPriorityMex requires at least one argument.  See help MachGetPriorityMex.");
    if(!mxIsChar(prhs[0]))
        mexErrMsgTxt("First input argument is not a string.  See help MachGetPriorityMex.");
    mxGetString(prhs[0], commandString, COMMAND_STRING_LENGTH);
    isError=GetFlavorConstantFromFlavorString(commandString, mxGetM(prhs[0]) * mxGetN(prhs[0]), &flavorConstant);  //case sensitive.  
    if(isError)
        mexErrMsgTxt("Unrecognized command.  See help MachGetPriorityMex.");


    //branch according to the first argument
    switch(flavorConstant){
        case THREAD_STANDARD_POLICY:
            if(nrhs>2)
                mexErrMsgTxt("Extra argument(s) detected.  See help MachGetPriorityMex.");
            if(nrhs==2){
                if(!mxIsChar(prhs[1]))
                    mexErrMsgTxt("Expecting string in second argument.  See help MachGetPriorityMex.");
                mxGetString(prhs[1], commandString, COMMAND_STRING_LENGTH);
                commandString[COMMAND_STRING_LENGTH-1]= '\0';  //guarantee strcmp an end of string character
                if(strcmp(commandString, "default"))
                    mexErrMsgTxt("Unrecognized second argument.  See help MachGetPriorityMex.");
            }
            threadPolicy=(thread_policy_t)malloc(sizeof(thread_standard_policy_data_t));
            policyCount=THREAD_STANDARD_POLICY_COUNT;
            policyCountFilled=policyCount;
            isDefault=TRUE;
            kernError=thread_policy_get(threadID, THREAD_STANDARD_POLICY, threadPolicy, &policyCountFilled, &isDefault);
            if (kernError==0) kernError=thread_policy_set(threadID, THREAD_STANDARD_POLICY, threadPolicy, policyCountFilled);
            break; 
        case THREAD_TIME_CONSTRAINT_POLICY:
            if(nrhs==1)
                mexErrMsgTxt("Missing argument detected.  See help MachGetPriorityMex.");
            else if(nrhs==2){
                if(!mxIsChar(prhs[1]))
                    mexErrMsgTxt("Expecting string in second argument.  See help MachGetPriorityMex.");
                mxGetString(prhs[1], commandString, COMMAND_STRING_LENGTH);
                commandString[COMMAND_STRING_LENGTH-1]= '\0';  //guarantee strcmp an end of string character
                if(strcmp(commandString, "default"))
                    mexErrMsgTxt("Unrecognized second argument.  See help MachGetPriorityMex.");
                threadPolicy=(thread_policy_t)malloc(sizeof(thread_time_constraint_policy_data_t));
                policyCount=THREAD_TIME_CONSTRAINT_POLICY_COUNT;
                policyCountFilled=policyCount;
                isDefault=TRUE;
                kernError=thread_policy_get(threadID, THREAD_TIME_CONSTRAINT_POLICY, threadPolicy, &policyCountFilled, &isDefault);
                if (kernError==0) kernError=thread_policy_set(threadID, THREAD_TIME_CONSTRAINT_POLICY, threadPolicy, policyCountFilled);
                free((void*)threadPolicy);        
                break;
            }else if(nrhs != 5)
                mexErrMsgTxt("Incorrect number of arguments.  See help MachGetPriorityMex.");
            else{
                if(! (mxIsDouble(prhs[1]) && mxGetM(prhs[1]) * mxGetN(prhs[1]) == 1)) 
                    mexErrMsgTxt("Expected double in second argument.  See help MachSetPriorityMex.");
                if(! (mxIsDouble(prhs[2]) && mxGetM(prhs[2]) * mxGetN(prhs[2]) == 1)) 
                    mexErrMsgTxt("Expected double in third argument.  See help MachSetPriorityMex.");
                if(! (mxIsDouble(prhs[3]) && mxGetM(prhs[3]) * mxGetN(prhs[3]) == 1)) 
                    mexErrMsgTxt("Expected double in fourth argument.  See help MachGetPriorityMex.");
                if(!((mxIsDouble(prhs[4]) || mxIsLogical(prhs[4])) && (mxGetM(prhs[4]) * mxGetN(prhs[4]) == 1)))
                    mexErrMsgTxt("Expected double or logical in fifth argument.  See help MachSetPriorityMex.");
                threadPolicy=(thread_policy_t)malloc(sizeof(thread_time_constraint_policy_data_t));
                ((thread_time_constraint_policy_t)threadPolicy)->period=(uint32_t)mxGetPr(prhs[1])[0];
                ((thread_time_constraint_policy_t)threadPolicy)->computation=(uint32_t)mxGetPr(prhs[2])[0];
                ((thread_time_constraint_policy_t)threadPolicy)->constraint=(uint32_t)mxGetPr(prhs[3])[0];
                ((thread_time_constraint_policy_t)threadPolicy)->preemptible= (boolean_t)(mxIsDouble(prhs[4]) ? mxGetPr(prhs[4])[0] : mxGetLogicals(prhs[4])[0]);
                policyCount=THREAD_TIME_CONSTRAINT_POLICY_COUNT;
                policyCountFilled=policyCount;
                kernError=thread_policy_set(threadID, THREAD_TIME_CONSTRAINT_POLICY, threadPolicy, policyCountFilled);
                free((void*)threadPolicy);
                break;
            }
        case THREAD_PRECEDENCE_POLICY:
            if(nrhs>2)
                mexErrMsgTxt("Extra argument(s) detected.  See help MachGetPriorityMex.");
            if(nrhs<2)
                mexErrMsgTxt("Missing argument detected.  See help MachGetPriorityMex.");
            if(mxIsChar(prhs[1])){  //set the default
                mxGetString(prhs[1], commandString, COMMAND_STRING_LENGTH);
                commandString[COMMAND_STRING_LENGTH-1]= '\0';  //guarantee strcmp an end of string character
                if(strcmp(commandString, "default"))
                    mexErrMsgTxt("Unrecognized second argument.  See help MachGetPriorityMex.");
                threadPolicy=(thread_policy_t)malloc(sizeof(thread_precedence_policy_data_t));
                policyCount=THREAD_PRECEDENCE_POLICY_COUNT;
                policyCountFilled=policyCount;
                isDefault=TRUE;
                kernError=thread_policy_get(threadID, THREAD_PRECEDENCE_POLICY, threadPolicy, &policyCountFilled, &isDefault);
                if (kernError==0) kernError=thread_policy_set(threadID, THREAD_PRECEDENCE_POLICY, threadPolicy, policyCountFilled);
                free((void*)threadPolicy);
                break; 
            }else if(mxIsDouble(prhs[1]) && mxGetM(prhs[1]) * mxGetN(prhs[1]) == 1){  //set a specified value
                threadPolicy=(thread_policy_t)malloc(sizeof(thread_precedence_policy_data_t));
                ((thread_precedence_policy_t)threadPolicy)->importance=(integer_t)mxGetPr(prhs[1])[0];
                policyCount=THREAD_PRECEDENCE_POLICY_COUNT;
                policyCountFilled=policyCount;
                kernError=thread_policy_set(threadID, THREAD_PRECEDENCE_POLICY, threadPolicy, policyCountFilled);
            }
    }

    // Check for and report errors in thread_policy_set:
    if (kernError!=0) {
        mexErrMsgTxt("ERROR: Failed to set requested thread scheduling policy! thread_policy_set() failed!");
    }
}
Ejemplo n.º 18
0
void
default_pager_initialize(
	mach_port_t host_port)
{
	kern_return_t		kr;
	static char		here[] = "default_pager_initialize";

	/* 
	 * Initial thread and task ports.
	 */
	default_pager_self = mach_task_self();
	default_pager_default_thread = mach_thread_self();

	PRINTF_LOCK_INIT();

	/*
	 * Make ourselves unswappable.
	 */
	kr = task_swappable(default_pager_host_port, default_pager_self, FALSE);
	if (kr != KERN_SUCCESS)
		dprintf(("task_swappable failed 0x%x %s\n",
			 kr, mach_error_string(kr)));

	/*
	 * Exported DMM port.
	 */
	kr = mach_port_allocate(default_pager_self, MACH_PORT_RIGHT_RECEIVE,
				&default_pager_default_port);
	if (kr != KERN_SUCCESS)
		Panic("default port");

	/*
	 * Port sets.
	 */
	kr = mach_port_allocate(default_pager_self, MACH_PORT_RIGHT_PORT_SET,
				&default_pager_internal_set);
	if (kr != KERN_SUCCESS)
		Panic("internal set");

	kr = mach_port_allocate(default_pager_self, MACH_PORT_RIGHT_PORT_SET,
				&default_pager_external_set);
	if (kr != KERN_SUCCESS)
		Panic("external set");

	/*
	 * Export pager interfaces.
	 */
#ifdef	USER_PAGER
	if ((kr = netname_check_in(name_server_port, "UserPager",
				   default_pager_self,
				   default_pager_default_port))
	    != KERN_SUCCESS) {
		dprintf(("netname_check_in returned 0x%x %s\n",
			 kr, mach_error_string(kr)));
		exit(1);
	}
#else	/* USER_PAGER */
	{
		int clsize;
		memory_object_t DMM;

		/* get a send right for vm_set_default_memory_manager */
		kr = mach_port_insert_right(default_pager_self,
					    default_pager_default_port,
					    default_pager_default_port,
					    MACH_MSG_TYPE_MAKE_SEND);
		DMM = default_pager_default_port;
		clsize = (vm_page_size << vstruct_def_clshift);

		kr = host_default_memory_manager(host_port, &DMM, clsize);
		if ((kr != KERN_SUCCESS) || (DMM != MACH_PORT_NULL))
			Panic("default memory manager");

		/* release the extra send right */
		(void) mach_port_mod_refs(default_pager_self,
					  default_pager_default_port,
					  MACH_PORT_RIGHT_SEND,
					  -1);
	}
#endif	/* USER_PAGER */

	kr = mach_port_allocate(default_pager_self, MACH_PORT_RIGHT_PORT_SET,
				&default_pager_default_set);
	if (kr != KERN_SUCCESS)
		Panic("default set");

	kr = mach_port_move_member(default_pager_self,
				   default_pager_default_port,
				   default_pager_default_set);
	if (kr != KERN_SUCCESS)
		Panic("set up default");

	/*
	 * Arrange for wiring privileges.
	 */
	wire_setup(host_port);

	/*
	 * Find out how many CPUs we have, to determine the number
	 * of threads to create.
	 */
	if (default_pager_internal_count == 0) {
		host_basic_info_data_t h_info;
		mach_msg_type_number_t h_info_count;

		h_info_count = HOST_BASIC_INFO_COUNT;
		(void) host_info(host_port, HOST_BASIC_INFO,
				(host_info_t) &h_info, &h_info_count);

		/*
		 * Random computation to get more parallelism on
		 * multiprocessors.
		 */
		default_pager_internal_count = ((h_info.avail_cpus > 32)
						? 32
						: h_info.avail_cpus) / 4 + 3;
	}

	/*
	 * Vm variables.
	 */
	vm_page_mask = vm_page_size - 1;
	vm_page_shift = log2(vm_page_size);

	/*
	 * List of all vstructs.
	 */
	VSL_LOCK_INIT();
	queue_init(&vstruct_list.vsl_queue);
	queue_init(&vstruct_list.vsl_leak_queue);
	vstruct_list.vsl_count = 0;

	VSTATS_LOCK_INIT(&global_stats.gs_lock);

	bs_initialize();
}
Ejemplo n.º 19
0
    resource()
    {
#if defined RUSAGE_THREAD
      struct rusage  ruse;
      struct rusage  ruse_thread;
      struct timeval utime;
    
      gettimeofday(&utime, NULL);
      getrusage(RUSAGE_SELF, &ruse);
      getrusage(RUSAGE_THREAD, &ruse_thread);
      
      __cpu_time = (double(ruse.ru_utime.tv_sec + ruse.ru_stime.tv_sec)
		    + 1e-6 * (ruse.ru_utime.tv_usec + ruse.ru_stime.tv_usec));
      __user_time = double(utime.tv_sec) + 1e-6 * utime.tv_usec;
      __thread_time = (double(ruse_thread.ru_utime.tv_sec + ruse_thread.ru_stime.tv_sec)
		       + 1e-6 * (ruse_thread.ru_utime.tv_usec + ruse_thread.ru_stime.tv_usec));
#elif defined HAVE_CLOCK_GETTIME
      struct rusage   ruse;
      struct timeval  utime;
      struct timespec tspec;
      
      gettimeofday(&utime, NULL);
      getrusage(RUSAGE_SELF, &ruse);
#if defined CLOCK_THREAD_CPUTIME_ID
      ::clock_gettime(CLOCK_THREAD_CPUTIME_ID, &tspec);
#else
      // get the current thread
      pthread_t pth=pthread_self();
      // get the clock_id associated to the current thread
      clockid_t clock_id;
      pthread_getcpuclockid(pth, &clock_id);
      // get the timespec associated to the thread clock
      ::clock_gettime(clock_id, &tspec);
#endif
      
      __cpu_time = (double(ruse.ru_utime.tv_sec + ruse.ru_stime.tv_sec)
		    + 1e-6 * (ruse.ru_utime.tv_usec + ruse.ru_stime.tv_usec));
      __user_time = double(utime.tv_sec) + 1e-6 * utime.tv_usec;
      __thread_time = double(tspec.tv_sec) + 1e-9 * tspec.tv_nsec;
#elif defined HAVE_THREAD_INFO
      struct timeval utime;
      struct rusage  ruse;
      struct thread_basic_info th_info;
      mach_msg_type_number_t th_info_count = THREAD_BASIC_INFO_COUNT;
      
      gettimeofday(&utime, NULL);
      getrusage(RUSAGE_SELF, &ruse);
      thread_info(mach_thread_self(), THREAD_BASIC_INFO, (thread_info_t)&th_info, &th_info_count);
      
      __cpu_time = (double(ruse.ru_utime.tv_sec + ruse.ru_stime.tv_sec)
		    + 1e-6 * (ruse.ru_utime.tv_usec + ruse.ru_stime.tv_usec));
      __user_time = double(utime.tv_sec) + 1e-6 * utime.tv_usec;;
      __thread_time = (double(th_info.user_time.seconds + th_info.system_time.seconds)
		       + 1e-6 * (th_info.user_time.microseconds + th_info.system_time.microseconds));
#else
      struct rusage  ruse;
      struct timeval utime;
      
      gettimeofday(&utime, NULL);
      getrusage(RUSAGE_SELF, &ruse);
      
      __cpu_time = (double(ruse.ru_utime.tv_sec + ruse.ru_stime.tv_sec)
		    + 1e-6 * (ruse.ru_utime.tv_usec + ruse.ru_stime.tv_usec));
      __user_time = double(utime.tv_sec) + 1e-6 * utime.tv_usec;
      __thread_time = __cpu_time;
#endif
    }