/**
 * Prepend a new entry value to the list
 *
 * @param value The value to be prepended.
 *
 * @warning This method is not async safe.
 */
template <typename V> void async_list<V>::nasync_prepend (V value) {
    /* Lock the list from other writers. */
    OSSpinLockLock(&_write_lock); {
        /* Construct the new entry, or recycle an existing one. */
        node *new_node;
        if (_free != NULL) {
            /* Fetch a node from the free list */
            new_node = _free;
            new_node->reset(value);
            
            /* Update the free list */
            _free = _free->_next;
        } else {
            new_node = new node(value);
        }
        
        /* Issue a memory barrier to ensure a consistent view of the value. */
        OSMemoryBarrier();
        
        /* If this is the first entry, initialize the list. */
        if (_tail == NULL) {
            
            /* Update the list tail. This need not be done atomically, as tail is never accessed by a lockless reader. */
            _tail = new_node;
            
            /* Atomically update the list head; this will be iterated upon by lockless readers. */
            if (!OSAtomicCompareAndSwapPtrBarrier(NULL, new_node, (void **) (&_head))) {
                /* Should never occur */
                PLCF_DEBUG("An async image head was set with tail == NULL despite holding lock.");
            }
        }
        
        /* Otherwise, prepend to the head of the list */
        else {
            new_node->_next = _head;
            new_node->_prev = NULL;
            
            /* Update the prev pointers. This is never accessed without a lock, so no additional synchronization
             * is required here. */
            _head->_prev = new_node;

            /* Issue a memory barrier to ensure a consistent view of the nodes. */
            OSMemoryBarrier();

            /* Atomically slot the new record into place; this may be iterated on by a lockless reader. */
            if (!OSAtomicCompareAndSwapPtrBarrier(new_node->_next, new_node, (void **) (&_head))) {
                PLCF_DEBUG("Failed to prepend to image list despite holding lock");
            }
        }
    } OSSpinLockUnlock(&_write_lock);
}
Beispiel #2
0
void OSXAddEntry(platform_work_queue* Queue, platform_work_queue_callback* Callback, void* Data)
{
    // TODO(casey): Switch to InterlockedCompareExchange eventually
    // so that any thread can add?
    uint32 NewNextEntryToWrite = (Queue->NextEntryToWrite + 1) % ArrayCount(Queue->Entries);
    Assert(NewNextEntryToWrite != Queue->NextEntryToRead);
    platform_work_queue_entry *Entry = Queue->Entries + Queue->NextEntryToWrite;
    Entry->Callback = Callback;
    Entry->Data = Data;
    ++Queue->CompletionGoal;
    OSMemoryBarrier();
    // Not needed: _mm_sfence();
    Queue->NextEntryToWrite = NewNextEntryToWrite;
	dispatch_semaphore_signal(Queue->SemaphoreHandle);

#if 0
	int r = dispatch_semaphore_signal(Queue->SemaphoreHandle);
	if (r > 0)
	{
		printf("  dispatch_semaphore_signal: A thread was woken\n");
	}
	else
	{
		printf("  dispatch_semaphore_signal: No thread was woken\n");
	}
#endif
}
__private_extern__ void *__CFStartSimpleThread(void *func, void *arg) {
#if DEPLOYMENT_TARGET_MACOSX || DEPLOYMENT_TARGET_EMBEDDED || DEPLOYMENT_TARGET_LINUX || DEPLOYMENT_TARGET_FREEBSD
    pthread_attr_t attr;
    pthread_t tid = 0;
    pthread_attr_init(&attr);
    pthread_attr_setscope(&attr, PTHREAD_SCOPE_SYSTEM);
    pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
    pthread_attr_setstacksize(&attr, 60 * 1024);	// 60K stack for our internal threads is sufficient
    OSMemoryBarrier(); // ensure arg is fully initialized and set in memory
    pthread_create(&tid, &attr, func, arg);
    pthread_attr_destroy(&attr);
//warning CF: we dont actually know that a pthread_t is the same size as void *
    return (void *)tid;
#elif DEPLOYMENT_TARGET_WINDOWS
    unsigned tid;
    struct _args *args = (struct _args*)CFAllocatorAllocate(kCFAllocatorSystemDefault, sizeof(struct _args), 0);
    if (__CFOASafe) __CFSetLastAllocationEventName(args, "CFUtilities (thread-args)");
    HANDLE handle;
    args->func = func;
    args->arg = arg;
    /* The thread is created suspended, because otherwise there would be a race between the assignment below of the handle field, and it's possible use in the thread func above. */
    args->handle = (HANDLE)_beginthreadex(NULL, 0, __CFWinThreadFunc, args, CREATE_SUSPENDED, &tid);
    handle = args->handle;
    ResumeThread(handle);
    return handle;
#endif
}
IOReturn IOInterruptController::enableInterrupt(IOService *nub, int source)
{
  IOInterruptSource *interruptSources;
  IOInterruptVectorNumber vectorNumber;
  IOInterruptVector *vector;
  OSData            *vectorData;
  
  interruptSources = nub->_interruptSources;
  vectorData = interruptSources[source].vectorData;
  vectorNumber = *(IOInterruptVectorNumber *)vectorData->getBytesNoCopy();
  vector = &vectors[vectorNumber];
  
  if (vector->interruptDisabledSoft) {
    vector->interruptDisabledSoft = 0;
#if !defined(__i386__) && !defined(__x86_64__)
    OSMemoryBarrier();
#endif

    if (!getPlatform()->atInterruptLevel()) {
      while (vector->interruptActive)
	{}
    }
    if (vector->interruptDisabledHard) {
      vector->interruptDisabledHard = 0;
      
      enableVector(vectorNumber, vector);
    }
  }
  
  return kIOReturnSuccess;
}
IOReturn IOSharedInterruptController::disableInterrupt(IOService *nub,
						       int source)
{
  IOInterruptSource *interruptSources;
  IOInterruptVectorNumber vectorNumber;
  IOInterruptVector *vector;
  OSData            *vectorData;
  IOInterruptState  interruptState;
  
  interruptSources = nub->_interruptSources;
  vectorData = interruptSources[source].vectorData;
  vectorNumber = *(IOInterruptVectorNumber *)vectorData->getBytesNoCopy();
  vector = &vectors[vectorNumber];
  
  interruptState = IOSimpleLockLockDisableInterrupt(controllerLock); 
  if (!vector->interruptDisabledSoft) {
    vector->interruptDisabledSoft = 1;
#if !defined(__i386__) && !defined(__x86_64__)
    OSMemoryBarrier();
#endif

    vectorsEnabled--;
  }
  IOSimpleLockUnlockEnableInterrupt(controllerLock, interruptState);
  
  if (!getPlatform()->atInterruptLevel()) {
    while (vector->interruptActive)
	{}
  }
  
  return kIOReturnSuccess;
}
Beispiel #6
0
/**
 \brief use this method to reserve a blob of data to fill.
 \param inCount count of bytes you need to reserve
 \param inOutRangeList RangeList to hold new state
 
 This method should only be called from the storing thread
 */
LockFreeQueueReturnCode    LockFreeQueue::ReserveRange(unsigned long inCount, RangeList* inOutRangeList)
{
    OSMemoryBarrier();

    RangeList *oldRangeList = (RangeList*)mRangeList;
    
    if (oldRangeList == inOutRangeList)
    {
        printf("reserve: RangeList in use!\n");
        return LockFreeQueue_rangeListInUse;
    }
    
    if (oldRangeList)
    {
        if (oldRangeList->mHasReserved)
        {
            // someone has already reserved space, try again later!
            return LockFreeQueue_alreadyReserved;
        }
        
        if (FreeBytesWithList(oldRangeList) < inCount)
        {
            // not enough space!
            return LockFreeQueue_notEnoughSpaceLeft;
        }
        
        memcpy(inOutRangeList, oldRangeList, sizeof(RangeList));
    }
    else
    {
        memset(inOutRangeList, 0, sizeof(RangeList));
    }

    unsigned long firstReserved = FirstEmptyByteIndexWithList(inOutRangeList);
    inOutRangeList->mReservedRange.mPosition = firstReserved;
    inOutRangeList->mReservedRange.mLength = inCount;
    inOutRangeList->mHasReserved = true;
    
    bool result = OSAtomicCompareAndSwapPtr(oldRangeList, inOutRangeList, (void* volatile*)&mRangeList);
    
    if (result && true)
    {
        Range firstByteRange;
        Range secondByteRange;
        
        RangePartsOfByteRange(&firstByteRange, &secondByteRange, &inOutRangeList->mReservedRange);
        
        if (secondByteRange.mLength == 0)
        {
            memset(&(mDataRing[firstByteRange.mPosition]), 'r', inCount);
        }
        else
        {
            memset(&(mDataRing[firstByteRange.mPosition]), 'r', firstByteRange.mLength);
            memset(&(mDataRing[secondByteRange.mPosition]), 'r', inCount-firstByteRange.mLength);
        }        
    }
    
    return result ? LockFreeQueue_OK : LockFreeQueue_casUnsuccessful;
}
__private_extern__ int       
_pthread_cond_init(_pthread_cond *cond, const pthread_condattr_t *attr, int conforming)
{
	volatile uint32_t *c_lseqcnt, *c_useqcnt, *c_sseqcnt;

	cond->busy = NULL;
	cond->c_seq[0] = 0;
	cond->c_seq[1] = 0;
	cond->c_seq[2] = 0;
	cond->unused = 0;

	cond->misalign = (((uintptr_t)&cond->c_seq[0]) & 0x7) != 0;
	COND_GETSEQ_ADDR(cond, &c_lseqcnt, &c_useqcnt, &c_sseqcnt);
	*c_sseqcnt = PTH_RWS_CV_CBIT; // set Sword to 0c
	
	if (conforming) {
		if (attr) {
			cond->pshared = attr->pshared;
		} else {
			cond->pshared = _PTHREAD_DEFAULT_PSHARED;
		}
	} else {
		cond->pshared = _PTHREAD_DEFAULT_PSHARED;
	}
	
	// Ensure all contents are properly set before setting signature.
	OSMemoryBarrier();
	cond->sig = _PTHREAD_COND_SIG;
	
	return 0;
}
Beispiel #8
0
/**
 \brief Fetch a blob of data
 \param inOutBuffer buffer to hold the fetched data
 \param inBufferLength length of supplied buffer in inOutBuffer
 \param inOutRangeList RangeList to hold new state
 \param outReturnedBytesCount count of bytes which are returned
 
 This method should only be called from the fetching thread
 */
LockFreeQueueReturnCode LockFreeQueue::Fetch(char *inOutBuffer, unsigned long inBufferLength, RangeList* inOutRangeList, unsigned long * outReturnedBytesCount)
{
    OSMemoryBarrier();
    RangeList *oldRangeList = (RangeList*)mRangeList;
    
    if (oldRangeList == inOutRangeList)
    {
        printf("fetch: RangeList in use!\n");
        *outReturnedBytesCount = 0;
        return LockFreeQueue_rangeListInUse;
    }
    
    if (oldRangeList->mFullRangeCount == 0)
    {
        // nothing to fetch!
        *outReturnedBytesCount = 0;
        return LockFreeQueue_empty;
    }

    if (oldRangeList->mFullRanges[0].mLength > inBufferLength)
    {
        printf("inBuffer not large enough!\n");
        *outReturnedBytesCount = 0;
        return LockFreeQueue_bufferToSmall;
    }

    Range firstRange;
    Range secondRange;
    
    RangePartsOfByteRange(&firstRange, &secondRange, &oldRangeList->mFullRanges[0]);
    
    const bool doClearBuffer = true;
    
    memcpy(inOutBuffer, &mDataRing[firstRange.mPosition], firstRange.mLength);
    
    if (secondRange.mLength)
    {
        memcpy(&inOutBuffer[firstRange.mLength], &mDataRing[secondRange.mPosition], secondRange.mLength);
    }

    inOutRangeList->mHasReserved = oldRangeList->mHasReserved;
    inOutRangeList->mReservedRange = oldRangeList->mReservedRange;
    for (unsigned long i=0; i+1<oldRangeList->mFullRangeCount ; i++)
    {
        inOutRangeList->mFullRanges[i] = oldRangeList->mFullRanges[i+1];
    }
    inOutRangeList->mFullRangeCount = oldRangeList->mFullRangeCount-1;
    
    bool result = OSAtomicCompareAndSwapPtr(oldRangeList, inOutRangeList, (void* volatile*)&mRangeList);
    
    if (result && doClearBuffer)
    {
        memset(&mDataRing[firstRange.mPosition], '-', firstRange.mLength);
        if (secondRange.mLength) memset(&mDataRing[secondRange.mPosition], '-', secondRange.mLength);
    }
    
    *outReturnedBytesCount = result ? oldRangeList->mFullRanges[0].mLength : 0;
    return result ? LockFreeQueue_OK : LockFreeQueue_casUnsuccessful;
}
Beispiel #9
0
void
ecc_log_init()
{
	ecc_prefer_panic = !PE_reboot_on_panic();
	ecc_data_lock_group = lck_grp_alloc_init("ecc-data", NULL);
	lck_spin_init(&ecc_data_lock, ecc_data_lock_group, NULL);
	OSMemoryBarrier();
}
void Semaphore::close()
{
    if (mValid)
    {
        mValid = false;
        OSMemoryBarrier();
        semaphore_signal_all(mInternal);
    }
}
Beispiel #11
0
/* release hv_*_trap traps */
void
hv_release_traps(hv_trap_type_t trap_type) {
	hv_trap_table_t *trap_table = &hv_trap_table[trap_type];

	lck_mtx_lock(hv_support_lck_mtx);
	trap_table->trap_count = 0;
	OSMemoryBarrier();
	trap_table->traps = NULL;
	lck_mtx_unlock(hv_support_lck_mtx);
}
Beispiel #12
0
static VALUE ir_get(VALUE self) {
#if HAVE_GCC_SYNC
    __sync_synchronize();
#elif defined _MSC_VER
    MemoryBarrier();
#elif __ENVIRONMENT_MAC_OS_X_VERSION_MIN_REQUIRED__ >= 1050
    OSMemoryBarrier();
#endif
    return (VALUE) DATA_PTR(self);
}
Beispiel #13
0
static VALUE ir_set(VALUE self, VALUE new_value) {
    DATA_PTR(self) = (void *) new_value;
#if HAVE_GCC_SYNC
    __sync_synchronize();
#elif defined _MSC_VER
    MemoryBarrier();
#elif __ENVIRONMENT_MAC_OS_X_VERSION_MIN_REQUIRED__ >= 1050
    OSMemoryBarrier();
#endif
    return new_value;
}
void Thread::close()
{
    if (mValid)
    {
        mValid = false;
        OSMemoryBarrier();
        
        // Wait for thread to join before we allow the program to continue
        
        pthread_join(mInternal, NULL);
    }
}
Beispiel #15
0
Atomic::operator unsigned() const
{
#if defined(_OPENTHREADS_ATOMIC_USE_GCC_BUILTINS)
    __sync_synchronize();
    return _value;
#elif defined(_OPENTHREADS_ATOMIC_USE_WIN32_INTERLOCKED)
    MemoryBarrier();
    return _value;
#elif defined(_OPENTHREADS_ATOMIC_USE_BSD_ATOMIC)
    OSMemoryBarrier();
    return static_cast<unsigned const volatile>(_value);
#else
# error This implementation should happen inline in the include file
#endif
}
Beispiel #16
0
void*
AtomicPtr::get() const
{
#if defined(_OPENTHREADS_ATOMIC_USE_GCC_BUILTINS)
    __sync_synchronize();
    return _ptr;
#elif defined(_OPENTHREADS_ATOMIC_USE_WIN32_INTERLOCKED)
    MemoryBarrier();
    return _ptr;
#elif defined(_OPENTHREADS_ATOMIC_USE_BSD_ATOMIC)
    OSMemoryBarrier();
    return _ptr;
#else
# error This implementation should happen inline in the include file
#endif
}
Beispiel #17
0
/**
 \brief Copy supplied RangeList to the internal RangeList and make that the valid one.
 \param inOutBuffer buffer to hold the fetched data
 \param inBufferLength length of supplied buffer in inOutBuffer
 \param inOutRangeList RangeList to hold new state
 \param outReturnedBytesCount count of bytes which are returned
 
 This method can be used to free a locally used RangeList. It can be called from any thread
 */
LockFreeQueueReturnCode LockFreeQueue::InternalizeRangeList(RangeList* inRangeList)
{
    OSMemoryBarrier();
    RangeList *oldRangeList = (RangeList*)mRangeList;
    
    if (inRangeList != oldRangeList)
    {
        // supplied range list is not the valid one --> nothing to do
        return LockFreeQueue_OK;
    }

    memcpy(&mInternalRangeList, oldRangeList, sizeof(RangeList));
    
    bool result = OSAtomicCompareAndSwapPtr(oldRangeList, &mInternalRangeList, (void* volatile*)&mRangeList);
    
    return result ? LockFreeQueue_OK : LockFreeQueue_casUnsuccessful;
}
IOReturn IOSharedInterruptController::handleInterrupt(void * /*refCon*/,
						      IOService * nub,
						      int /*source*/)
{
  IOInterruptVectorNumber vectorNumber;
  IOInterruptVector *vector;
  
  for (vectorNumber = 0; vectorNumber < numVectors; vectorNumber++) {
    vector = &vectors[vectorNumber];
    
    vector->interruptActive = 1;
#if !defined(__i386__) && !defined(__x86_64__)
    OSMemoryBarrier();
#endif

    if (!vector->interruptDisabledSoft) {

      // Call the handler if it exists.
      if (vector->interruptRegistered) {

        bool trace = (gIOKitTrace & kIOTraceInterrupts) ? true : false;

        if (trace)
          timeStampInterruptHandlerStart(vectorNumber, vector);

        // Call handler.
        vector->handler(vector->target, vector->refCon, vector->nub, vector->source);

        if (trace)
          timeStampInterruptHandlerEnd(vectorNumber, vector);
      }
    }
    
    vector->interruptActive = 0;
  }
  
  // if any of the vectors are dissabled, then dissable this controller.
  IOSimpleLockLock(controllerLock);
  if (vectorsEnabled != vectorsRegistered) {
    nub->disableInterrupt(0);
    controllerDisabled = 1;
  }
  IOSimpleLockUnlock(controllerLock);
  
  return kIOReturnSuccess;
}
Beispiel #19
0
/* register a list of trap handlers for the hv_*_trap syscalls */
kern_return_t
hv_set_traps(hv_trap_type_t trap_type, const hv_trap_t *traps,
	unsigned trap_count)
{
	hv_trap_table_t *trap_table = &hv_trap_table[trap_type];
	kern_return_t kr = KERN_FAILURE;

	lck_mtx_lock(hv_support_lck_mtx);
	if (trap_table->trap_count == 0) {	
		trap_table->traps = traps;
		OSMemoryBarrier();
		trap_table->trap_count = trap_count;
		kr = KERN_SUCCESS;
	}
	lck_mtx_unlock(hv_support_lck_mtx);

	return kr;
}
Beispiel #20
0
static void
kperf_timer_handler( void *param0, __unused void *param1 )
{
	struct time_trigger *trigger = param0;
	unsigned ntimer = (unsigned)(trigger - timerv);
	unsigned ncpus  = machine_info.logical_cpu_max;

	trigger->active = 1;

	/* along the lines of do not ipi if we are all shutting down */
	if( kperf_sampling_status() == KPERF_SAMPLING_SHUTDOWN )
		goto deactivate;

	/* clean-up the thread-on-CPUs cache */
	bzero(kperf_thread_on_cpus, ncpus * sizeof(*kperf_thread_on_cpus));

	/* ping all CPUs */
#ifndef USE_SIMPLE_SIGNALS
	kperf_mp_broadcast( kperf_ipi_handler, trigger );
#else
	trigger->fire_count++;
	OSMemoryBarrier();
	kperf_mp_signal();
#endif

	/* release the pet thread? */
	if( ntimer == pet_timer )
	{
		/* timer re-enabled when thread done */
		kperf_pet_thread_go();
	}
	else
	{
		/* re-enable the timer
		 * FIXME: get the current time from elsewhere
		 */
		uint64_t now = mach_absolute_time();
		kperf_timer_schedule( trigger, now );
	}

deactivate:
	trigger->active = 0;
}
Beispiel #21
0
/* if we can't pass a (function, arg) pair through a signal properly,
 * we do it the simple way. When a timer fires, we increment a counter
 * in the time trigger and broadcast a generic signal to all cores. Cores
 * search the time trigger list for any triggers for which their last seen
 * firing counter is lower than the current one.
 */
void
kperf_signal_handler(void)
{
	int i, cpu;
	struct time_trigger *tr = NULL;

	OSMemoryBarrier();

	cpu = chudxnu_cpu_number();
	for( i = 0; i < (int) timerc; i++ )
	{
		tr = &timerv[i];
		if( tr->fire_count <= tr->last_cpu_fire[cpu] )
			continue; /* this trigger hasn't fired */

		/* fire the trigger! */
		tr->last_cpu_fire[cpu] = tr->fire_count;
		kperf_ipi_handler( tr );
	}
}
Beispiel #22
0
void
initialiseSystemHeap(virt_ptr<void> base,
                     uint32_t size)
{
   if (internal::isAppDebugLevelVerbose()) {
      COSInfo(COSReportModule::Unknown2,
              "RPL_SYSHEAP:Event,Change,Hex Addr,Bytes,Available");
      COSInfo(
         COSReportModule::Unknown2,
         fmt::format("RPL_SYSHEAP:SYSHEAP START,CREATE,=\"{}\",{}",
                     base, size));
   }

   sSystemHeapData->handle = MEMCreateExpHeapEx(base,
                                                size,
                                                MEMHeapFlags::ThreadSafe);
   sSystemHeapData->numAllocs = 0u;
   sSystemHeapData->numFrees = 0u;
   OSMemoryBarrier();
}
Beispiel #23
0
__private_extern__
void __sync_synchronize(void)
{
	OSMemoryBarrier();
}
Beispiel #24
0
int
main(int argc, char **argv)
{
	uint64_t iterations, i;
	double *jitter_arr, *fraction_arr;
	double *wakeup_second_jitter_arr;
	uint64_t target_time;
	uint64_t sleep_length_abs;
	uint64_t min_sleep_ns = 0;
	uint64_t max_sleep_ns = DEFAULT_MAX_SLEEP_NS;
	uint64_t wake_time;
	unsigned random_seed;
	boolean_t need_seed = TRUE;
	char ch;
	int res;
	kern_return_t kret;
	my_policy_type_t pol;
	boolean_t wakeup_second_thread = FALSE;
	semaphore_t wakeup_semaphore, return_semaphore;

	double avg, stddev, max, min;
	double avg_fract, stddev_fract, max_fract, min_fract;
	uint64_t too_much;

	struct second_thread_args secargs;
	pthread_t secthread;

	mach_timebase_info(&g_mti);

	/* Seed random */
	opterr = 0;
	while ((ch = getopt(argc, argv, "m:n:hs:w")) != -1 && ch != '?') {
		switch (ch) {
			case 's':
				/* Specified seed for random)() */
				random_seed = (unsigned)atoi(optarg);
				srandom(random_seed);
				need_seed = FALSE;
				break;
			case 'm':
				/* How long per timer? */
				max_sleep_ns = strtoull(optarg, NULL, 10);	
				break;
			case 'n':
				/* How long per timer? */
				min_sleep_ns = strtoull(optarg, NULL, 10);	
				break;
			case 'w':
				/* After each timed wait, wakeup another thread */
				wakeup_second_thread = TRUE;
				break;
			case 'h':
				print_usage();
				exit(0);
				break;
			default:
				fprintf(stderr, "Got unexpected result from getopt().\n");
				exit(1);
				break;
		}
	}

	argc -= optind;
	argv += optind;

	if (argc != 3) {
		print_usage();
		exit(1);
	}

	if (min_sleep_ns >= max_sleep_ns) {
		print_usage();
		exit(1);
	}

	if (need_seed) {
		srandom(time(NULL));
	}

	/* What scheduling policy? */
	pol = parse_thread_policy(argv[0]);

	/* How many timers? */
	iterations = strtoull(argv[1], NULL, 10);

	/* How much jitter is so extreme that we should cut a trace point */
	too_much = strtoull(argv[2], NULL, 10);
	
	/* Array for data */
	jitter_arr = (double*)malloc(sizeof(*jitter_arr) * iterations);
	if (jitter_arr == NULL) {
		printf("Couldn't allocate array to store results.\n");
		exit(1);
	}

	fraction_arr = (double*)malloc(sizeof(*fraction_arr) * iterations);
	if (fraction_arr == NULL) {
		printf("Couldn't allocate array to store results.\n");
		exit(1);
	}

	if (wakeup_second_thread) {
		/* Array for data */
		wakeup_second_jitter_arr = (double*)malloc(sizeof(*jitter_arr) * iterations);
		if (wakeup_second_jitter_arr == NULL) {
			printf("Couldn't allocate array to store results.\n");
			exit(1);
		}

		kret = semaphore_create(mach_task_self(), &wakeup_semaphore, SYNC_POLICY_FIFO, 0);
		if (kret != KERN_SUCCESS) {
			printf("Couldn't allocate semaphore %d\n", kret);
			exit(1);
		}

		kret = semaphore_create(mach_task_self(), &return_semaphore, SYNC_POLICY_FIFO, 0);
		if (kret != KERN_SUCCESS) {
			printf("Couldn't allocate semaphore %d\n", kret);
			exit(1);
		}


		secargs.wakeup_semaphore = wakeup_semaphore;
		secargs.return_semaphore = return_semaphore;
		secargs.iterations = iterations;
		secargs.pol = pol;
		secargs.wakeup_second_jitter_arr = wakeup_second_jitter_arr;
		secargs.woke_on_same_cpu = 0;
		secargs.too_much = too_much;
		secargs.last_poke_time = 0ULL;
		secargs.cpuno = 0;

		res = pthread_create(&secthread, NULL, second_thread, &secargs);
		if (res) {
			err(1, "pthread_create");
		}

		sleep(1); /* Time for other thread to start up */
	}

	/* Set scheduling policy */
	res = thread_setup(pol);
	if (res != 0) {
		printf("Couldn't set thread policy.\n");
		exit(1);
	}

	/* 
	 * Repeatedly pick a random timer length and 
	 * try to sleep exactly that long 
	 */
	for (i = 0; i < iterations; i++) {
		sleep_length_abs = (uint64_t) (get_random_sleep_length_abs_ns(min_sleep_ns, max_sleep_ns) * (((double)g_mti.denom) / ((double)g_mti.numer)));
		target_time = mach_absolute_time() + sleep_length_abs;
		
		/* Sleep */
		kret = mach_wait_until(target_time);
		wake_time = mach_absolute_time();
	
		jitter_arr[i] = (double)(wake_time - target_time);
		fraction_arr[i] = jitter_arr[i] / ((double)sleep_length_abs);
		
		/* Too much: cut a tracepoint for a debugger */
		if (jitter_arr[i] >= too_much) {
			kdebug_trace(0xeeeee0 | DBG_FUNC_NONE, 0, 0, 0, 0);
		}

		if (wakeup_second_thread) {
			secargs.last_poke_time = mach_absolute_time();
			secargs.cpuno = cpu_number();
			OSMemoryBarrier();
			kret = semaphore_signal(wakeup_semaphore);
			if (kret != KERN_SUCCESS) {
				errx(1, "semaphore_signal");
			}

			kret = semaphore_wait(return_semaphore);
			if (kret != KERN_SUCCESS) {
				errx(1, "semaphore_wait");
			}

		}
	}

	/*
	 * Compute statistics and output results. 
	 */
	compute_stats(jitter_arr, iterations, &avg, &max, &min, &stddev);
	compute_stats(fraction_arr, iterations, &avg_fract, &max_fract, &min_fract, &stddev_fract);

	putchar('\n');
	print_stats_us("jitter", avg, max, min, stddev);
	print_stats_fract("%", avg_fract, max_fract, min_fract, stddev_fract);

	if (wakeup_second_thread) {

		res = pthread_join(secthread, NULL);
		if (res) {
			err(1, "pthread_join");
		}

		compute_stats(wakeup_second_jitter_arr, iterations, &avg, &max, &min, &stddev);
		
		putchar('\n');
		print_stats_us("second jitter", avg, max, min, stddev);

		putchar('\n');
		printf("%llu/%llu (%.1f%%) wakeups on same CPU\n", secargs.woke_on_same_cpu, iterations,
			   100.0*((double)secargs.woke_on_same_cpu)/iterations);
	}

	return 0;
}
Beispiel #25
0
static int atomic_get(sp_counted_base_atomic_type volatile *pw)
{
    OSMemoryBarrier();
    return pw->i;
}
Beispiel #26
0
 // Destructor.
 ~macos_fenced_block()
 {
   OSMemoryBarrier();
 }
Beispiel #27
0
 // Constructor for a full fenced block.
 explicit macos_fenced_block(full_t)
 {
   OSMemoryBarrier();
 }
Beispiel #28
0
static void atomic_set(sp_counted_base_atomic_type volatile *pw,int v)
{
    pw->i=v;
    OSMemoryBarrier();
}
Beispiel #29
0
void __sync_synchronize(void)
{
  OSMemoryBarrier();
}
Beispiel #30
0
boolean_t 
ecc_log_prefer_panic(void)
{
	OSMemoryBarrier();
	return ecc_prefer_panic;
}