Exemplo n.º 1
1
void exit_np(void)
{
	if (likely(ctrl_page != NULL) &&
	    ctrl_page->sched.np.flag &&
	    !(--ctrl_page->sched.np.flag)) {
		/* became preemptive, let's check for delayed preemptions */
		__sync_synchronize();
		if (ctrl_page->sched.np.preempt)
			sched_yield();
	}
}
VALUE ir_set(VALUE self, VALUE new_value) {
  DATA_PTR(self) = (void *) new_value;
#if HAVE_GCC_SYNC
  __sync_synchronize();
#elif defined _MSC_VER
  MemoryBarrier();
#elif __ENVIRONMENT_MAC_OS_X_VERSION_MIN_REQUIRED__ >= 1050
  OSMemoryBarrier();
#endif
  return new_value;
}
Exemplo n.º 3
0
void lock(spinlock_t* lock) {
	if (lock == NULL) {
		return;
	}

	const uint16_t ticket = __sync_fetch_and_add(&lock->users, 1);
	while (lock->ticket != ticket) {
		barrier();
		thread_yield();
	}
	__sync_synchronize(); /* we don't use cmpxchg explicitly */
}
Exemplo n.º 4
0
void ReleaseKernelThread(Thread *thread)
{
	// busy-wait until the thread terminates
	while ((thread->flags & THREAD_TERMINATED) != 0)
	{
		__sync_synchronize();
	};
	
	// release the stack and thread description
	kfree(thread->stack);
	kfree(thread);
};
Exemplo n.º 5
0
//flush the cache
inline void cache_flush()
{
#if defined BGQ
 #if ! defined BGQ_EMU
  mbar();
 #else
  __sync_synchronize();
 #endif
#else
 #pragma omp flush
#endif
}
Exemplo n.º 6
0
static bool notifier_is_online(void)
{
	unsigned int i;

	__sync_synchronize();
	for (i = 0; i < MAX_CONNMAN_SERVICE_TYPES; i++) {
		if (online[i] > 0)
			return true;
	}

	return false;
}
Exemplo n.º 7
0
int __connman_technology_add_device(struct connman_device *device)
{
	struct connman_technology *technology;
	enum connman_service_type type;

	type = __connman_device_get_service_type(device);

	DBG("device %p type %s", device, get_name(type));

	technology = technology_get(type);
	if (technology == NULL) {
		/*
		 * Since no driver can be found for this device at the moment we
		 * add it to the techless device list.
		*/
		techless_device_list = g_slist_prepend(techless_device_list,
								device);

		return -ENXIO;
	}

	__sync_synchronize();
	if (technology->rfkill_driven == TRUE) {
		if (technology->enabled == TRUE && global_offlinemode == FALSE)
			__connman_device_enable(device);
		else
			__connman_device_disable(device);

		goto done;
	}

	if (technology->enable_persistent == TRUE &&
					global_offlinemode == FALSE) {
		int err = __connman_device_enable(device);
		/*
		 * connman_technology_add_device() calls __connman_device_enable()
		 * but since the device is already enabled, the calls does not
		 * propagate through to connman_technology_enabled via
		 * connman_device_set_powered.
		 */
		if (err == -EALREADY)
			__connman_technology_enabled(type);
	}
	/* if technology persistent state is offline */
	if (technology->enable_persistent == FALSE)
		__connman_device_disable(device);

done:
	technology->device_list = g_slist_prepend(technology->device_list,
								device);

	return 0;
}
Exemplo n.º 8
0
void
lvm_pool_add(struct lvm_pool* pool, struct lvm vm) {
    pool->queue[pool->tail] = vm;

    __sync_synchronize();

    pool->tail = (pool->tail + 1) % LVM_POOL_MAX;

    if (pool->sleep && lvm_pool_count(pool) > 10) {
        pthread_cond_signal(&pool->cond);
    }
}
Exemplo n.º 9
0
bool __connman_notifier_is_connected(void)
{
	unsigned int i;

	__sync_synchronize();
	for (i = 0; i < MAX_CONNMAN_SERVICE_TYPES; i++) {
		if (connected[i] > 0)
			return true;
	}

	return false;
}
Exemplo n.º 10
0
unsigned int __connman_notifier_count_connected(void)
{
	unsigned int i, count = 0;

	__sync_synchronize();
	for (i = 0; i < MAX_TECHNOLOGIES; i++) {
		if (connected[i] > 0)
			count++;
	}

	return count;
}
Exemplo n.º 11
0
static int technology_disabled(struct connman_technology *technology)
{
	__sync_synchronize();
	if (technology->enabled == FALSE)
		return -EALREADY;

	technology->enabled = FALSE;

	powered_changed(technology);

	return 0;
}
Exemplo n.º 12
0
/*
** Try to provide a memory barrier operation, needed for initialization
** and also for the xShmBarrier method of the VFS in cases when SQLite is
** compiled without mutexes (SQLITE_THREADSAFE=0).
*/
void sqlite3MemoryBarrier(void){
#if defined(SQLITE_MEMORY_BARRIER)
  SQLITE_MEMORY_BARRIER;
#elif defined(__GNUC__)
  __sync_synchronize();
#elif !defined(SQLITE_DISABLE_INTRINSIC) && \
      defined(_MSC_VER) && _MSC_VER>=1300
  _ReadWriteBarrier();
#elif defined(MemoryBarrier)
  MemoryBarrier();
#endif
}
Exemplo n.º 13
0
void
_gst_disable_interrupts (mst_Boolean from_signal_handler)
{
  sigset_t newSet;

  __sync_synchronize ();
  if (_gst_signal_count++ == 0)
    {
      __sync_synchronize ();
      if (from_signal_handler)
        return;

      sigfillset (&newSet);
      sigdelset (&newSet, SIGSEGV);
      sigdelset (&newSet, SIGBUS);
      sigdelset (&newSet, SIGILL);
      sigdelset (&newSet, SIGQUIT);
      sigdelset (&newSet, SIGABRT);
      pthread_sigmask (SIG_BLOCK, &newSet, &oldSet);
    }
}
Exemplo n.º 14
0
/******************************************************************************
* Thread safe instance function for singleton behavior
******************************************************************************/
Cmem* Cmem::instance() 
{
    static Mutex Cmem_instance_mutex;
    Cmem* tmp = pInstance;

    __sync_synchronize();

    if (tmp == 0) 
    {
        ScopedLock lck(Cmem_instance_mutex);

        tmp = pInstance;
        if (tmp == 0) 
        {
            tmp = new Cmem;
            __sync_synchronize();
            pInstance = tmp;
        }
    }
    return tmp;
}
Exemplo n.º 15
0
int pthread_spin_trylock(pthread_spinlock_t* lock)
{
  int ret;

  if (__sync_fetch_and_or(lock, 1) == 0)
    ret = 0;
  else
    ret = EBUSY;

  __sync_synchronize();

  return ret;
}
Exemplo n.º 16
0
connman_bool_t __connman_notifier_is_enabled(enum connman_service_type type)
{
	DBG("type %d", type);

	if (technology_supported(type) == FALSE)
		return FALSE;

	__sync_synchronize();
	if (enabled[type] > 0)
		return TRUE;

	return FALSE;
}
Exemplo n.º 17
0
int
fair_futex_lock(fair_futex_t *lock) {

	uint32_t ticket;
	uint32_t old_futex;
	int pause_cnt;

	/*
	 * Possibly wrap: if we have more than 64K lockers waiting, the ticket
	 * value will wrap and two lockers will simultaneously be granted the
	 * lock.
	 */
	ticket = __atomic_fetch_add(&lock->fairlock.fair_lock_waiter, 1,
				      __ATOMIC_SEQ_CST);
retry:
	__sync_synchronize();
	old_futex = lock->futex;
	if(old_futex == (uint32_t)ticket / SPIN_CONTROL) {
//		printf("ticket %d spins (lo: %d)\n", ticket,
//		       lock->fairlock.fair_lock_owner);
		while (ticket != lock->fairlock.fair_lock_owner) ;
	}
	else {
//		printf("ticket %d sleeps (lo: %d)\n", ticket,
//		       lock->fairlock.fair_lock_owner);
		sys_futex((void*)&lock->futex, FUTEX_WAIT, old_futex, 0, 0, 0);
		goto retry;
	}

	/*
	 * Applications depend on a barrier here so that operations holding the
	 * lock see consistent data.
	 */
	__sync_synchronize();

//	printf("ticket %d got lock\n", ticket);

	return ticket;
}
Exemplo n.º 18
0
struct queue_root *ALLOC_QUEUE_ROOT()
{
	struct queue_root *root = \
		malloc(sizeof(struct queue_root));
	root->head_lock = 0;
	root->tail_lock = 0;
	__sync_synchronize();

	root->divider.next = NULL;
	root->head = &root->divider;
	root->tail = &root->divider;
	return root;
}
Exemplo n.º 19
0
/**
 * MIDI callback.
 *   @key: The key.
 *   @val: The value.
 *   @arg: The argument.
 */
static void callback(uint16_t key, uint16_t val, void *arg)
{
	struct inst_t *inst = arg;
	struct amp_comm_t *comm = inst->comm;

	sys_mutex_lock(&comm->lock);

	comm->event[comm->wr] = (struct amp_event_t){ inst->dev, key, val };
	__sync_synchronize();
	comm->wr = (comm->wr + 1) % AMP_COMM_LEN;

	sys_mutex_unlock(&comm->lock);
}
Exemplo n.º 20
0
void
GOMP_critical_name_start (void **pptr)
{
  gomp_mutex_t *plock;

  /* If a mutex fits within the space for a pointer, and is zero initialized,
     then use the pointer space directly.  */
  if (GOMP_MUTEX_INIT_0
      && sizeof (gomp_mutex_t) <= sizeof (void *)
      && __alignof (gomp_mutex_t) <= sizeof (void *))
    plock = (gomp_mutex_t *)pptr;

  /* Otherwise we have to be prepared to malloc storage.  */
  else
    {
      plock = *pptr;

      if (plock == NULL)
	{
#ifdef HAVE_SYNC_BUILTINS
	  gomp_mutex_t *nlock = gomp_malloc (sizeof (gomp_mutex_t));
	  gomp_mutex_init (nlock);

	  plock = __sync_val_compare_and_swap (pptr, NULL, nlock);
	  if (plock != NULL)
	    {
	      gomp_mutex_destroy (nlock);
	      gomp_free (nlock);
	    }
	  else
	    plock = nlock;
#else
	  gomp_mutex_lock (&create_lock_lock);
	  plock = *pptr;
	  if (plock == NULL)
	    {
	      plock = gomp_malloc (sizeof (gomp_mutex_t));
	      gomp_mutex_init (plock);
	      __sync_synchronize ();
	      *pptr = plock;
	    }
	  gomp_mutex_unlock (&create_lock_lock);
#endif
	}
    }

  gomp_mutex_lock (plock);

  /* OMP v3.1, 2.8.6 p81,l16 - "At entry to critical regions" */
  gomp_flush0();
}
Exemplo n.º 21
0
int32_t scap_readbuf(scap_t* handle, uint32_t cpuid, bool blocking, OUT char** buf, OUT uint32_t* len)
{
	uint32_t thead;
	uint32_t ttail;
	uint32_t read_size;

	//
	// Update the tail based on the amount of data read in the *previous* call.
	// Tail is never updated when we serve the data, because we assume that the caller is using
	// the buffer we give to her until she calls us again.
	//
	ttail = handle->m_devs[cpuid].m_bufinfo->tail + handle->m_devs[cpuid].m_lastreadsize;

	//
	// Make sure every read of the old buffer is completed before we move the tail and the
	// producer (on another CPU) can start overwriting it.
	// I use this instead of asm(mfence) because it should be portable even on the weirdest
	// CPUs
	//
	__sync_synchronize();

	if(ttail < RING_BUF_SIZE)
	{
		handle->m_devs[cpuid].m_bufinfo->tail = ttail;
	}
	else
	{
		handle->m_devs[cpuid].m_bufinfo->tail = ttail - RING_BUF_SIZE;
	}

	//
	// Read the pointers.
	//
	get_buf_pointers(handle->m_devs[cpuid].m_bufinfo,
	                 &thead,
	                 &ttail,
	                 &read_size);

	//
	// Remember read_size so we can update the tail at the next call
	//
	handle->m_devs[cpuid].m_lastreadsize = read_size;

	//
	// Return the results
	//
	*len = read_size;
	*buf = handle->m_devs[cpuid].m_buffer + ttail;

	return SCAP_SUCCESS;
}
Exemplo n.º 22
0
void init_array()
{
  int i, j;

  for (i=0; i<N; i++) {
    for (j=0; j<N; j++) {
      A[i][j] = (i + j);
      // We do not want to optimize this.
      __sync_synchronize();
      B[i][j] = (double)(i*j);
      C[i][j] = 0.0;
    }
  }
}
Exemplo n.º 23
0
/*
 * Iterate through all the pending sampled events in `pe' and pass each one to
 * `consume'.
 */
void consume_events(PerfEvent kind, perf_event_handle& pe,
                    perf_event_consume_fn_t consume) {
  auto const data_tail = pe.meta->data_tail;
  auto const data_head = pe.meta->data_head;

  asm volatile("" : : : "memory"); // smp_rmb()
  if (data_head == data_tail) return;

  auto const base = reinterpret_cast<char*>(pe.meta) + s_pagesz;

  auto const begin = base + data_tail % buffer_sz();
  auto const end = base + data_head % buffer_sz();

  auto cur = begin;

  while (cur != end) {
    auto header = reinterpret_cast<struct perf_event_header*>(cur);

    if (cur + header->size > base + buffer_sz()) {
      // The current entry wraps around the ring buffer.  Copy it into a stack
      // buffer, and update `cur' to wrap around appropriately.
      auto const prefix_len = base + buffer_sz() - cur;

      ensure_buffer_capacity(pe, header->size);

      memcpy(pe.buf, cur, prefix_len);
      memcpy(pe.buf + prefix_len, base, header->size - prefix_len);
      header = reinterpret_cast<struct perf_event_header*>(pe.buf);

      cur = base + header->size - prefix_len;
    } else if (cur + header->size == base + buffer_sz()) {
      // Perfect wraparound.
      cur = base;
    } else {
      cur += header->size;
    }

    if (header->type == PERF_RECORD_SAMPLE) {
      auto const sample = reinterpret_cast<perf_event_sample*>(header + 1);

      assertx(header->size == sizeof(struct perf_event_header) +
                              sizeof(perf_event_sample) +
                              sample->nr * sizeof(*sample->ips));
      consume(kind, sample);
    }
  }

  __sync_synchronize(); // smp_mb()
  pe.meta->data_tail = data_head;
}
Exemplo n.º 24
0
static void sn_enable_interrupt(struct sn_queue *rx_queue)
{
	__sync_synchronize();
	rx_queue->rx.rx_regs->irq_disabled = 0;
	__sync_synchronize();

	/* NOTE: make sure check again if the queue is really empty,
	 * to avoid potential race conditions when you call this function:
	 *
	 * Driver:			BESS:
	 * [IRQ is disabled]
	 * [doing polling]
	 * if (no pending packet)
	 * 				push a packet
	 * 				if (IRQ enabled)
	 * 					inject IRQ <- not executed
	 *     stop polling
	 *     enable IRQ
	 *
	 * [at this point, IRQ is enabled but pending packets are never
	 *  polled by the driver. So the driver needs to double check.]
	 */
}
Exemplo n.º 25
0
extern "C" int
ebbos_gthread_once(__gthread_once_t *once, void (*func) (void))
{
  __gthread_once_t val = __sync_val_compare_and_swap(once, 0, 1);
  if (val == 0) {
    func();
    __sync_synchronize();
    *once = reinterpret_cast<__gthread_once_t>(2);
  } else {
    while (access_once(*once) != reinterpret_cast<__gthread_once_t>(2))
      ;
  }
  return 0;
}
Exemplo n.º 26
0
void *process_two(void *a) {
    int i;
    for (i = 0; i < TIMES; i++) {
        own_tasks();
        wants2 = 1;
        __sync_synchronize();
        whoWaits = 2;
        while (wants1 && (whoWaits == 2)) { }
        critical_section();
        wants2 = 0;
    }
    pthread_exit(NULL);
    return NULL;
}
Exemplo n.º 27
0
  void set(int v)
  {
    if (FLAGS_v != v) {
      VLOG(FLAGS_v) << "Setting verbose logging level to " << v;
      FLAGS_v = v;

      // Ensure 'FLAGS_v' visible in other threads.
#ifdef __WINDOWS__
      MemoryBarrier();
#else
      __sync_synchronize();
#endif // __WINDOWS__
    }
  }
Exemplo n.º 28
0
int cobalt_event_inquire(cobalt_event_t *event, unsigned long *bits_r)
{
	struct cobalt_event_data *datp = get_event_data(event);

	/*
	 * We don't guarantee clean readings, this service is
	 * primarily for debug purposes when the caller won't bet the
	 * house on the values returned.
	 */
	__sync_synchronize();
	*bits_r = datp->value;

	return datp->nwaiters;
}
Exemplo n.º 29
0
static void sig_intr_handler(int sig)
{
#if defined( __GNUC__ )
  __sync_synchronize(); 
#endif /* defined( __GNUC__ ) */
  int fd = (int)sig_intr_pipe;
  if( 0 < fd ){
    char b[1] = {0};
    if( sizeof( b ) != write( fd , b , sizeof( b ) )){
      abort();
    }
  }
  return;
};
Exemplo n.º 30
0
int SRPCStreamer::streamEnd(std::string *ErrMsg) {
  Q.SetDone();
  int err = pthread_join(CompileThread, NULL);
  __sync_synchronize();
  if (Error) {
    if (ErrMsg)
      *ErrMsg = std::string("PNaCl Translator Error: " + ErrorMessage);
    return 1;
  } else if (err) {
    if (ErrMsg) *ErrMsg = std::string(strerror(errno));
    return err;
  }
  return 0;
}