Esempio n. 1
0
static int SetThreadToPriority(pthread_t thread, UInt32 inPriority, Boolean inIsFixed, UInt64 period, UInt64 computation, UInt64 constraint)
{
    if (inPriority == 96) {
        // REAL-TIME / TIME-CONSTRAINT THREAD
        thread_time_constraint_policy_data_t theTCPolicy;
        theTCPolicy.period = period;
        theTCPolicy.computation = computation;
        theTCPolicy.constraint = constraint;
        theTCPolicy.preemptible = true;
        kern_return_t res = thread_policy_set(pthread_mach_thread_np(thread), THREAD_TIME_CONSTRAINT_POLICY, (thread_policy_t)&theTCPolicy, THREAD_TIME_CONSTRAINT_POLICY_COUNT);
        return (res == KERN_SUCCESS) ? 0 : -1;
    } else {
        // OTHER THREADS
        thread_extended_policy_data_t theFixedPolicy;
        thread_precedence_policy_data_t thePrecedencePolicy;
        SInt32 relativePriority;
        
        // [1] SET FIXED / NOT FIXED
        theFixedPolicy.timeshare = !inIsFixed;
        thread_policy_set(pthread_mach_thread_np(thread), THREAD_EXTENDED_POLICY, (thread_policy_t)&theFixedPolicy, THREAD_EXTENDED_POLICY_COUNT);
        
        // [2] SET PRECEDENCE
        // N.B.: We expect that if thread A created thread B, and the program wishes to change
        // the priority of thread B, then the call to change the priority of thread B must be
        // made by thread A.
        // This assumption allows us to use pthread_self() to correctly calculate the priority
        // of the feeder thread (since precedency policy's importance is relative to the
        // spawning thread's priority.)
        relativePriority = inPriority - GetThreadSetPriority(pthread_self());
        
        thePrecedencePolicy.importance = relativePriority;
        kern_return_t res = thread_policy_set(pthread_mach_thread_np(thread), THREAD_PRECEDENCE_POLICY, (thread_policy_t)&thePrecedencePolicy, THREAD_PRECEDENCE_POLICY_COUNT);
        return (res == KERN_SUCCESS) ? 0 : -1;
    }
}
Esempio n. 2
0
bool ph_thread_set_affinity(ph_thread_t *me, int affinity)
{
#ifdef HAVE_PTHREAD_SETAFFINITY_NP
# ifdef __linux__
  cpu_set_t set;
# else /* FreeBSD */
  cpuset_t set;
#endif

  CPU_ZERO(&set);
  CPU_SET(affinity, &set);

  return pthread_setaffinity_np(me->thr, sizeof(set), &set) == 0;
#elif defined(__APPLE__)
  thread_affinity_policy_data_t data;

  data.affinity_tag = affinity + 1;
  return thread_policy_set(pthread_mach_thread_np(me->thr),
      THREAD_AFFINITY_POLICY,
      (thread_policy_t)&data, THREAD_AFFINITY_POLICY_COUNT) == 0;
#elif defined(HAVE_CPUSET_SETAFFINITY)
  /* untested bsdish */
  cpuset_t set;

  CPU_ZERO(&set);
  CPU_SET(affinity, &set);
  cpuset_setaffinity(CPU_LEVEL_WHICH, CPU_WHICH_TID, -1, sizeof(set), set);
#elif defined(HAVE_PROCESSOR_BIND)
  return processor_bind(P_LWPID, me->lwpid, affinity, NULL) == 0;
#endif
  return true;
}
Esempio n. 3
0
void 
o_thrSuspend(o_thr_t thr) {
    mach_port_t mthread;

    mthread = pthread_mach_thread_np(thr);
    thread_suspend(mthread);
}
Esempio n. 4
0
static void pid_callback(int *const pid, int *const tid)
{
#if !_BT_LOG_MESSAGE_FORMAT_CONTAINS(PID, BT_LOG_MESSAGE_CTX_FORMAT)
	VAR_UNUSED(pid);
#else
	#if defined(_WIN32) || defined(_WIN64)
	*pid = GetCurrentProcessId();
	#else
	*pid = getpid();
	#endif
#endif

#if !_BT_LOG_MESSAGE_FORMAT_CONTAINS(TID, BT_LOG_MESSAGE_CTX_FORMAT)
	VAR_UNUSED(tid);
#else
	#if defined(_WIN32) || defined(_WIN64)
	*tid = GetCurrentThreadId();
	#elif defined(__CYGWIN__)
	pthread_t thr = pthread_self();
	*tid = (int)pthread_getsequence_np(&thr);
	#elif defined(__sun__)
	*tid = (int)pthread_self();
	#elif defined(__ANDROID__)
	*tid = gettid();
	#elif defined(__linux__)
	*tid = syscall(SYS_gettid);
	#elif defined(__MACH__)
	*tid = (int)pthread_mach_thread_np(pthread_self());
	#else
		#define Platform not supported
	#endif
#endif
}
Esempio n. 5
0
static void allocate_segv_handler()
{
#ifdef JULIA_ENABLE_THREADING
    arraylist_new(&suspended_threads, jl_n_threads);
#endif
    pthread_t thread;
    pthread_attr_t attr;
    kern_return_t ret;
    mach_port_t self = mach_task_self();
    ret = mach_port_allocate(self, MACH_PORT_RIGHT_RECEIVE, &segv_port);
    HANDLE_MACH_ERROR("mach_port_allocate",ret);
    ret = mach_port_insert_right(self, segv_port, segv_port, MACH_MSG_TYPE_MAKE_SEND);
    HANDLE_MACH_ERROR("mach_port_insert_right",ret);
    // Alright, create a thread to serve as the listener for exceptions
    if (pthread_attr_init(&attr) != 0) {
        jl_error("pthread_attr_init failed");
    }
    pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
    if (pthread_create(&thread, &attr, mach_segv_listener, NULL) != 0) {
        jl_error("pthread_create failed");
    }
    pthread_attr_destroy(&attr);
    for (int16_t tid = 0;tid < jl_n_threads;tid++) {
        attach_exception_port(pthread_mach_thread_np(jl_all_task_states[tid].system_id));
    }
}
Esempio n. 6
0
void 
o_thrResume(o_thr_t thr) {
    mach_port_t mthread;

    mthread = pthread_mach_thread_np(thr);
    thread_resume(mthread);
}
void NaClSetCurrentMachThreadForThreadIndex(uint32_t nacl_thread_index) {
  /*
   * This implementation relies on the Mach port for the thread stored by the
   * pthread library, and assumes that the pthread library does not close and
   * re-acquire the Mach port for the thread. If that happens, Mach could
   * theoretically assign the port a different number in the process' port
   * table. This approach avoids having to deal with ownership of the port and
   * the system call overhad to obtain and deallocate it as would be the case
   * with mach_thread_self().
   *
   * When used by the Mach exception handler, this also assumes that the
   * thread port number when received for an exception will match the port
   * stored in the mach_threads table. This is guaranteed by how the kernel
   * coalesces ports in a single port namespace. (A task, or process, is a
   * single port namespace.)
   *
   * An alternative implementation that works on Mac OS X 10.6 or higher is to
   * use pthread_threadid_np() to obtain a thread ID to use as the key for
   * this thread map. Such thread IDs are unique system-wide. An exception
   * handler can find the thread ID for a Mach thread by calling thread_info()
   * with flavor THREAD_IDENTIFIER_INFO. This approach is not used here
   * because of the added system call overhead at exception time.
   */
  mach_port_t mach_thread = pthread_mach_thread_np(pthread_self());
  CHECK(mach_thread != MACH_PORT_NULL);

  DCHECK(nacl_thread_index > NACL_TLS_INDEX_INVALID &&
         nacl_thread_index < NACL_THREAD_MAX);
  DCHECK(mach_threads[nacl_thread_index] == MACH_PORT_NULL);
  DCHECK(NaClGetThreadIndexForMachThread(mach_thread) ==
         NACL_TLS_INDEX_INVALID);

  mach_threads[nacl_thread_index] = mach_thread;
}
int nacl_thread_nice(int nacl_nice) {
    kern_return_t kr;

    /*
     * Don't use mach_thread_self() because it requires a separate
     * mach_port_deallocate() system call to release it. Instead, rely on
     * pthread's cached copy of the port.
     */
    thread_act_t mthread = pthread_mach_thread_np(pthread_self());

    switch (nacl_nice) {
    case NICE_REALTIME: {
        struct thread_time_constraint_policy tcpolicy;
        const int kPeriodInNanoseconds = 2902490;
        const float kDutyCycle = 0.5;
        const float kDutyMax = 0.85;
        tcpolicy.period = MyConvertToHostTime(kPeriodInNanoseconds);
        tcpolicy.computation = kDutyCycle * tcpolicy.period;
        tcpolicy.constraint = kDutyMax * tcpolicy.period;
        tcpolicy.preemptible = 1;
        /* Sadly it appears that a MacOS system can be locked up by too
         * many real-time threads. So use normal priority until we figure
         * out a way to control things globally.
         */
        /* kr = thread_policy_set(mthread, THREAD_TIME_CONSTRAINT_POLICY,
         *                        (thread_policy_t)&tcpolicy,
         *                        THREAD_TIME_CONSTRAINT_POLICY_COUNT);
         */
        kr = thread_policy_set(mthread, THREAD_PRECEDENCE_POLICY,
                               (thread_policy_t)&tcpolicy,
                               THREAD_PRECEDENCE_POLICY_COUNT);
    }
    break;
    case NICE_BACKGROUND: {
        struct thread_precedence_policy tppolicy;
        tppolicy.importance = 0;  /* IDLE_PRI */
        kr = thread_policy_set(mthread, THREAD_PRECEDENCE_POLICY,
                               (thread_policy_t)&tppolicy,
                               THREAD_PRECEDENCE_POLICY_COUNT);
    }
    break;
    case NICE_NORMAL: {
        struct thread_standard_policy tspolicy;
        kr = thread_policy_set(mthread, THREAD_STANDARD_POLICY,
                               (thread_policy_t)&tspolicy,
                               THREAD_STANDARD_POLICY_COUNT);
    }
    break;
    default:
        NaClLog(LOG_WARNING, "nacl_thread_nice() failed (bad nice value).\n");
        return -1;
        break;
    }
    if (kr != KERN_SUCCESS) {
        NaClLog(LOG_WARNING, "nacl_thread_nice() failed.\n");
        return -1;
    } else {
        return 0;
    }
}
Esempio n. 9
0
BOOL WINAPI GetThreadTimes (
  HANDLE hThread,
  LPFILETIME lpCreationTime,
  LPFILETIME lpExitTime,
  LPFILETIME lpKernelTime,
  LPFILETIME lpUserTime
) {
  if (!hThread)
    return false;
  if (!hThread->m_threadValid)
    return false;

  if (hThread == (HANDLE)-1) {
    if (lpCreationTime)
      TimeTToFileTime(0,lpCreationTime);
    if (lpExitTime)
      TimeTToFileTime(time(NULL),lpExitTime);
    if (lpKernelTime)
      TimeTToFileTime(0,lpKernelTime);
    if (lpUserTime)
      TimeTToFileTime(0,lpUserTime);

    return true;
  }

  if (lpCreationTime)
    TimeTToFileTime(hThread->m_tmCreation,lpCreationTime);
  if (lpExitTime)
    TimeTToFileTime(time(NULL),lpExitTime);
  if (lpKernelTime)
    TimeTToFileTime(0,lpKernelTime);

#ifdef __APPLE__
  thread_info_data_t     threadInfo;
  mach_msg_type_number_t threadInfoCount = THREAD_INFO_MAX;

  if (hThread->m_machThreadPort == MACH_PORT_NULL)
    hThread->m_machThreadPort = pthread_mach_thread_np(hThread->m_hThread);

  kern_return_t ret = thread_info(hThread->m_machThreadPort, THREAD_BASIC_INFO, (thread_info_t)threadInfo, &threadInfoCount);
  if (ret == KERN_SUCCESS)
  {
    thread_basic_info_t threadBasicInfo = (thread_basic_info_t)threadInfo;

    if (lpUserTime)
    {
      // User time.
      unsigned long long time = ((__int64)threadBasicInfo->user_time.seconds * 10000000L) + threadBasicInfo->user_time.microseconds*10L;
      lpUserTime->dwLowDateTime = (time & 0xFFFFFFFF);
      lpUserTime->dwHighDateTime = (time >> 32);
    }

    if (lpKernelTime)
    {
      // System time.
      unsigned long long time = ((__int64)threadBasicInfo->system_time.seconds * 10000000L) + threadBasicInfo->system_time.microseconds*10L;
      lpKernelTime->dwLowDateTime = (time & 0xFFFFFFFF);
      lpKernelTime->dwHighDateTime = (time >> 32);
    }
  }
Esempio n. 10
0
int ksbt_backtracePthread(const pthread_t thread,
                          uintptr_t* const backtraceBuffer,
                          const int maxEntries)
{
    const thread_t mach_thread = pthread_mach_thread_np(thread);
    return ksbt_backtraceThread(mach_thread, backtraceBuffer, maxEntries);
}
Esempio n. 11
0
void
gdb_pthread_kill(pthread_t pthread)
{
  mach_port_t mthread;
  kern_return_t kret;
  int ret;

  mthread = pthread_mach_thread_np(pthread);

  kret = thread_suspend(mthread);
  MACH_CHECK_ERROR(kret);

  ret = pthread_cancel(pthread);
  if (ret != 0)
    {
/* in case a macro has re-defined this function: */
#undef strerror
      warning("Unable to cancel thread: %s (%d)", strerror(errno), errno);
      thread_terminate(mthread);
    }

  kret = thread_abort (mthread);
  MACH_CHECK_ERROR (kret);

  kret = thread_resume (mthread);
  MACH_CHECK_ERROR (kret);

  ret = pthread_join (pthread, NULL);
  if (ret != 0)
    {
      warning ("Unable to join to canceled thread: %s (%d)", strerror (errno),
               errno);
    }
}
void
_dispatch_introspection_init(void)
{
	TAILQ_INSERT_TAIL(&_dispatch_introspection_queues,
			&_dispatch_main_q, diq_list);
	TAILQ_INSERT_TAIL(&_dispatch_introspection_queues,
			&_dispatch_mgr_q, diq_list);
#if DISPATCH_ENABLE_PTHREAD_ROOT_QUEUES
	TAILQ_INSERT_TAIL(&_dispatch_introspection_queues,
			_dispatch_mgr_q.do_targetq, diq_list);
#endif
	for (size_t i = 0; i < DISPATCH_ROOT_QUEUE_COUNT; i++) {
		TAILQ_INSERT_TAIL(&_dispatch_introspection_queues,
				&_dispatch_root_queues[i], diq_list);
	}

	// Hack to determine queue TSD offset from start of pthread structure
	uintptr_t thread = _dispatch_thread_self();
	thread_identifier_info_data_t tiid;
	mach_msg_type_number_t cnt = THREAD_IDENTIFIER_INFO_COUNT;
	kern_return_t kr = thread_info(pthread_mach_thread_np((void*)thread),
			THREAD_IDENTIFIER_INFO, (thread_info_t)&tiid, &cnt);
	if (!dispatch_assume_zero(kr)) {
		_dispatch_introspection_thread_queue_offset =
				(void*)(uintptr_t)tiid.dispatch_qaddr - (void*)thread;
	}
	_dispatch_thread_key_create(&dispatch_introspection_key,
			_dispatch_introspection_thread_remove);
	_dispatch_introspection_thread_add(); // add main thread
}
Esempio n. 13
0
static int gettid(void)
{
    #ifdef __APPLE__
        return pthread_mach_thread_np(pthread_self());
    #else
        return syscall(__NR_gettid);
    #endif
}
Esempio n. 14
0
File: Test.C Progetto: B-Rich/EBBlib
int
create_bound_thread(pthread_t *tid, int id,  void *(*func)(void *), void *arg)
{
    int numcores, pid, rc;
  numcores = num_phys_cores();
  pid = id % numcores;

  if ((id < 0)) return -1;

#ifdef __APPLE__
  // Apple bind code
  thread_affinity_policy_data_t affinityinfo;
  rc = pthread_create_suspended_np(tid, NULL, func, arg);// TODO: verify value of arg is correct for OSX
  if (rc != 0) {
    perror("pthread_create_suspended_np");
    return -1;
  }
  affinityinfo.affinity_tag = pid+2; // FIXME: confirm +2

  rc = thread_policy_set(pthread_mach_thread_np(*tid),
			 THREAD_AFFINITY_POLICY,
			 (int *)&affinityinfo,
			 THREAD_AFFINITY_POLICY_COUNT);
  if (rc != KERN_SUCCESS) {
    perror("thread_policy_set");
    return -1;
  }
  thread_resume(pthread_mach_thread_np(*tid));
#else

  // Linux bind code
  struct linux_thread_init_arg *lnxargs;
  lnxargs = (linux_thread_init_arg *)malloc(sizeof(linux_thread_init_arg));// FIXME: memory leak...
  lnxargs->func = func;
  lnxargs->args = arg;
  lnxargs->proc = pid;
  rc = pthread_create(tid, NULL, linux_thread_init, lnxargs);
  if (rc != 0) {
    perror("pthread_create");
    return -1;
  }

#endif
  return 0;
}
Esempio n. 15
0
static void set_affinity(pthread_t thread, int tag)
{
    thread_affinity_policy theTCPolicy;
    theTCPolicy.affinity_tag = tag;
    kern_return_t res = thread_policy_set(pthread_mach_thread_np(thread), THREAD_AFFINITY_POLICY, (thread_policy_t)&theTCPolicy, THREAD_AFFINITY_POLICY_COUNT);
    if (res == KERN_SUCCESS) {
        //printf("set_affinity = %d\n", theTCPolicy.affinity_tag);
    }
}
Esempio n. 16
0
ThreadId_t ThreadPosix::getThreadId() 
{
    pthread_t t = pthread_self();
#if defined (__MACH__)
    return pthread_mach_thread_np(t);
#else 
    return reinterpret_cast<ThreadId_t>(pthread_self());
#endif
}
Esempio n. 17
0
static unsigned long callbackThreadID() {
  return static_cast<unsigned long>(
#ifdef __APPLE__
    pthread_mach_thread_np(pthread_self())
#else
    pthread_self()
#endif
  );
}
Esempio n. 18
0
/*
 * Signal a condition variable, waking a specified thread.
 */
int       
pthread_cond_signal_thread_np(pthread_cond_t *ocond, pthread_t thread)
{
	mach_port_t mp = MACH_PORT_NULL;
	if (thread) {
		mp = pthread_mach_thread_np(thread);
	}
	return _pthread_cond_signal(ocond, false, mp);
}
Esempio n. 19
0
UInt32 ZKMORHP_ForeignThread::getScheduledPriority(pthread_t inThread, int inPriorityKind)
{
    thread_basic_info_data_t			threadInfo;
	policy_info_data_t					thePolicyInfo;
	unsigned int						count;

	if (inThread == NULL)
		return 0;
    
    // get basic info
    count = THREAD_BASIC_INFO_COUNT;
    thread_info (pthread_mach_thread_np (inThread), THREAD_BASIC_INFO, (thread_info_t)&threadInfo, &count);
    
	switch (threadInfo.policy) {
		case POLICY_TIMESHARE:
			count = POLICY_TIMESHARE_INFO_COUNT;
			thread_info(pthread_mach_thread_np (inThread), THREAD_SCHED_TIMESHARE_INFO, (thread_info_t)&(thePolicyInfo.ts), &count);
            if (inPriorityKind == CAPTHREAD_SCHEDULED_PRIORITY) {
                return thePolicyInfo.ts.cur_priority;
            }
            return thePolicyInfo.ts.base_priority;
            break;
            
        case POLICY_FIFO:
			count = POLICY_FIFO_INFO_COUNT;
			thread_info(pthread_mach_thread_np (inThread), THREAD_SCHED_FIFO_INFO, (thread_info_t)&(thePolicyInfo.fifo), &count);
            if ( (thePolicyInfo.fifo.depressed) && (inPriorityKind == CAPTHREAD_SCHEDULED_PRIORITY) ) {
                return thePolicyInfo.fifo.depress_priority;
            }
            return thePolicyInfo.fifo.base_priority;
            break;
            
		case POLICY_RR:
			count = POLICY_RR_INFO_COUNT;
			thread_info(pthread_mach_thread_np (inThread), THREAD_SCHED_RR_INFO, (thread_info_t)&(thePolicyInfo.rr), &count);
			if ( (thePolicyInfo.rr.depressed) && (inPriorityKind == CAPTHREAD_SCHEDULED_PRIORITY) ) {
                return thePolicyInfo.rr.depress_priority;
            }
            return thePolicyInfo.rr.base_priority;
            break;
	}
    
    return 0;
}
Esempio n. 20
0
static void get_affinity(pthread_t thread)
{
    thread_affinity_policy theTCPolicy;
    mach_msg_type_number_t count = THREAD_AFFINITY_POLICY_COUNT;
    boolean_t get_default = false;
    kern_return_t res = thread_policy_get(pthread_mach_thread_np(thread), THREAD_AFFINITY_POLICY, (thread_policy_t)&theTCPolicy, &count, &get_default);
    if (res == KERN_SUCCESS) {
        //printf("get_affinity = %d\n", theTCPolicy.affinity_tag);
    }
}
static inline PlatformThread getCurrentPlatformThread()
{
#if OS(DARWIN)
    return pthread_mach_thread_np(pthread_self());
#elif OS(WINDOWS)
    return GetCurrentThread();
#elif USE(PTHREADS)
    return pthread_self();
#endif
}
Esempio n. 22
0
static int
sys_thread_create (struct sys_thread *td, const int is_affin)
{
#ifndef _WIN32
  pthread_attr_t attr;
  int res;

  if ((res = pthread_attr_init(&attr)))
    goto err;
  if ((res = pthread_attr_setstacksize(&attr, td->vmtd->stack_size))) {
    pthread_attr_destroy(&attr);
    goto err;
  }

#ifdef USE_MACH_AFFIN
  if (is_affin) {
    res = pthread_create_suspended_np(&td->tid, &attr,
     (thread_func_t) thread_start, td);
    if (!res) {
      mach_port_t mt = pthread_mach_thread_np(td->tid);
      affin_cpu_set(mt, td->vmtd->cpu);
      thread_resume(mt);
    }
  } else
#endif
  res = pthread_create(&td->tid, &attr, (thread_func_t) thread_start, td);
  pthread_attr_destroy(&attr);
  if (!res) {
#if defined(USE_PTHREAD_AFFIN)
    if (is_affin)
      affin_cpu_set(td->tid, td->vmtd->cpu);
#else
    (void) is_affin;
#endif
    return 0;
  }
 err:
  errno = res;
#else
  unsigned int tid;
  const uintptr_t hThr = _beginthreadex(NULL,
   (unsigned int) td->vmtd->stack_size,
   (thread_func_t) thread_start, td, 0, &tid);

  (void) is_affin;

  if (hThr) {
    td->tid = (HANDLE) hThr;
    if (is_WinNT && td->vmtd->cpu)
      affin_cpu_set(td->tid, td->vmtd->cpu);
    return 0;
  }
#endif
  return -1;
}
Esempio n. 23
0
void setThreadToPriority (pthread_t inThread, UInt32 inPriority, Boolean inIsFixed, UInt64 inHALIOProcCycleDurationInNanoseconds)
{
	if (inPriority == 96)
	{
        // REAL-TIME / TIME-CONSTRAINT THREAD
        thread_time_constraint_policy_data_t		theTCPolicy;
		UInt64										theComputeQuanta;
		UInt64										thePeriod;
		UInt64										thePeriodNanos;
		
        thePeriodNanos = inHALIOProcCycleDurationInNanoseconds;
        theComputeQuanta = AudioConvertNanosToHostTime ( thePeriodNanos * 0.15 );
		thePeriod = AudioConvertNanosToHostTime (thePeriodNanos);
        
		theTCPolicy.period = thePeriod;
		theTCPolicy.computation = theComputeQuanta;
		theTCPolicy.constraint = thePeriod;
		theTCPolicy.preemptible = true;
		thread_policy_set (pthread_mach_thread_np(inThread), THREAD_TIME_CONSTRAINT_POLICY, (thread_policy_t)&theTCPolicy, THREAD_TIME_CONSTRAINT_POLICY_COUNT);
	} else {
        // OTHER THREADS
		thread_extended_policy_data_t		theFixedPolicy;
        thread_precedence_policy_data_t		thePrecedencePolicy;
        SInt32								relativePriority;
        
		// [1] SET FIXED / NOT FIXED
        theFixedPolicy.timeshare = !inIsFixed;
        thread_policy_set (pthread_mach_thread_np(inThread), THREAD_EXTENDED_POLICY, (thread_policy_t)&theFixedPolicy, THREAD_EXTENDED_POLICY_COUNT);
        
		// [2] SET PRECEDENCE
        // N.B.: We expect that if thread A created thread B, and the program wishes to change
        // the priority of thread B, then the call to change the priority of thread B must be
        // made by thread A.
        // This assumption allows us to use pthread_self() to correctly calculate the priority
        // of the feeder thread (since precedency policy's importance is relative to the
        // spawning thread's priority.)
        relativePriority = inPriority - getThreadSetPriority (pthread_self());
        
        thePrecedencePolicy.importance = relativePriority;
        thread_policy_set (pthread_mach_thread_np(inThread), THREAD_PRECEDENCE_POLICY, (thread_policy_t)&thePrecedencePolicy, THREAD_PRECEDENCE_POLICY_COUNT);
	}
}
Esempio n. 24
0
UInt32 _getThreadPriority (pthread_t inThread, int inWhichPriority)
{
    thread_basic_info_data_t			threadInfo;
	policy_info_data_t					thePolicyInfo;
	unsigned int						count;
    
    // get basic info
    count = THREAD_BASIC_INFO_COUNT;
    thread_info (pthread_mach_thread_np (inThread), THREAD_BASIC_INFO, (thread_info_t)&threadInfo, &count);
    
	switch (threadInfo.policy) {
		case POLICY_TIMESHARE:
			count = POLICY_TIMESHARE_INFO_COUNT;
			thread_info(pthread_mach_thread_np (inThread), THREAD_SCHED_TIMESHARE_INFO, (thread_info_t)&(thePolicyInfo.ts), &count);
            if (inWhichPriority == THREAD_SCHEDULED_PRIORITY) {
                return thePolicyInfo.ts.cur_priority;
            } else {
                return thePolicyInfo.ts.base_priority;
            }
            break;
            
        case POLICY_FIFO:
			count = POLICY_FIFO_INFO_COUNT;
			thread_info(pthread_mach_thread_np (inThread), THREAD_SCHED_FIFO_INFO, (thread_info_t)&(thePolicyInfo.fifo), &count);
            if ( (thePolicyInfo.fifo.depressed) && (inWhichPriority == THREAD_SCHEDULED_PRIORITY) ) {
                return thePolicyInfo.fifo.depress_priority;
            }
            return thePolicyInfo.fifo.base_priority;
            break;
            
		case POLICY_RR:
			count = POLICY_RR_INFO_COUNT;
			thread_info(pthread_mach_thread_np (inThread), THREAD_SCHED_RR_INFO, (thread_info_t)&(thePolicyInfo.rr), &count);
			if ( (thePolicyInfo.rr.depressed) && (inWhichPriority == THREAD_SCHEDULED_PRIORITY) ) {
                return thePolicyInfo.rr.depress_priority;
            }
            return thePolicyInfo.rr.base_priority;
            break;
	}
    
    return 0;
}
Esempio n. 25
0
Thread::Thread(bool external, int stackSize) :
    _id(threadIdInvalid),
    _stackSize(stackSize),
    _lock(new honey::Mutex)
{
    if (external)
    {
        _handle = pthread_self();
        _id = pthread_mach_thread_np(_handle);
    }
}
Esempio n. 26
0
int ParaEngine::GetThisThreadID()
{
#ifdef WIN32
	return (int)(::GetCurrentThreadId());
#elif (PARA_TARGET_PLATFORM == PARA_PLATFORM_IOS || PARA_TARGET_PLATFORM == PARA_PLATFORM_MAC)
	return (int)pthread_mach_thread_np(pthread_self());
#else
    //return (int)gettid();
    return (int)pthread_self();
#endif
}