Example #1
0
void* thread_func() {
  int i = 0;
  long long prev = 0;
  long long sum = 0;

  for (i = 0; i < num_iterations; i++) {
    switch (opt_sync) {
      case 'c':
        do {
          prev = counter;
          sum = prev + 1;
          if (opt_yield) {
            pthread_yield();
          }
        } while (__sync_val_compare_and_swap(&counter, prev, sum) != prev);
        break;
      case 'm':
        pthread_mutex_lock(&add_mutex);
        add(&counter, 1);
        pthread_mutex_unlock(&add_mutex);
        break;
      case 's':
        while (__sync_lock_test_and_set(&add_spin, 1));
        add(&counter, 1);
        __sync_lock_release(&add_spin);
        break;
      default:
        add(&counter, 1);
        break;
    }
  }

  for (i = 0; i < num_iterations; i++) {
    switch (opt_sync) {
      case 'c':
        do {
          prev = counter;
          sum = prev - 1;
          if (opt_yield) {
            pthread_yield();
          }
        } while (__sync_val_compare_and_swap(&counter, prev, sum) != prev);
        break;
      case 'm':
        pthread_mutex_lock(&add_mutex);
        add(&counter, -1);
        pthread_mutex_unlock(&add_mutex);
        break;
      case 's':
        while (__sync_lock_test_and_set(&add_spin, 1));
        add(&counter, -1);
        __sync_lock_release(&add_spin);
        break;
      default:
        add(&counter, -1);
        break;  
    }
  }
  return NULL;
}
Example #2
0
  inline void writelock(request *I) {
    I->lockclass = QUEUED_RW_LOCK_REQUEST_WRITE;
    I->next = NULL;
    I->s.stateu = 0;
    I->s.state.blocked = true;
    I->s.state.successor_class = QUEUED_RW_LOCK_REQUEST_NONE;
    __sync_synchronize();
    request* predecessor = __sync_lock_test_and_set(&tail, I);

    if (predecessor == NULL) {
      next_writer = I;
      __sync_synchronize();
      if (reader_count.value == 0) {
        if (__sync_lock_test_and_set(&next_writer, (request*)NULL) == I) {
          I->s.state.blocked = false;
        }
      }
    }
    else {
      predecessor->s.state.successor_class = QUEUED_RW_LOCK_REQUEST_WRITE;
    __sync_synchronize();      
      predecessor->next = I;
    }
    // while I->blocked. continue
    volatile state_union& is = I->s;
    while (is.state.blocked) sched_yield();
    assert(reader_count.value == 0);
  }
Example #3
0
File: sync-1.c Project: 0day-ci/gcc
void test_lock (void)
{
  sc = __sync_lock_test_and_set (&sc, 1);
  uc = __sync_lock_test_and_set (&uc, 1);
  ss = __sync_lock_test_and_set (&ss, 1);
  us = __sync_lock_test_and_set (&us, 1);
  si = __sync_lock_test_and_set (&si, 1);
  ui = __sync_lock_test_and_set (&ui, 1);
  sl = __sync_lock_test_and_set (&sl, 1);
  ul = __sync_lock_test_and_set (&ul, 1);
  sll = __sync_lock_test_and_set (&sll, 1);
  ull = __sync_lock_test_and_set (&ull, 1);

  __sync_synchronize ();

  __sync_lock_release (&sc);
  __sync_lock_release (&uc);
  __sync_lock_release (&ss);
  __sync_lock_release (&us);
  __sync_lock_release (&si);
  __sync_lock_release (&ui);
  __sync_lock_release (&sl);
  __sync_lock_release (&ul);
  __sync_lock_release (&sll);
  __sync_lock_release (&ull);
}
Example #4
0
int flowdrv_start_offline_device(const char *dev)
//Start offline device
//dev = offline device that should be used
{
  struct mapiipcbuf qbuf;

  pthread_once(&mapi_is_initialized, (void*)mapi_init);

  if (dev == NULL) {
    printf("ERROR: NULL device in flowdrv_start_offline_device\n");
    return -1;
  }
  while(__sync_lock_test_and_set(mapi_lock,1));
  if (((*get_numflows)() == 0) && ((*get_totalflows)() > 0) && *minit) { // socket has been closed, re-create it
    if (mapiipc_client_init()<0) {
      *mapi_lock = 0;
      *local_err = MCOM_INIT_SOCKET_ERROR;
      return -1;
    }
  }
  *mapi_lock = 0;

  qbuf.mtype = 1;
  qbuf.cmd = START_OFFLINE_DEVICE;
  qbuf.fd = getpid();
  qbuf.pid = getpid();

  strncpy((char *) qbuf.data, dev, DATA_SIZE);

  qbuf.fid = 0;
  while(__sync_lock_test_and_set(mapi_lock,1));
  if (mapiipc_write((struct mapiipcbuf*)&qbuf) < 0)
  {
    *mapi_lock = 0;	  
    *local_err = MCOM_SOCKET_ERROR;
    return -1;
  }
  if (mapiipc_read((struct mapiipcbuf*)&qbuf) < 0)
  {
    *mapi_lock = 0;
    *local_err = MCOM_SOCKET_ERROR;
    return -1;
  }
  *mapi_lock = 0;
  switch(qbuf.cmd)
  {
    case START_OFFLINE_DEVICE_ACK:
      return 0;
    case ERROR_ACK:
      *local_err = qbuf.remote_errorcode;
      return -1;
    default:
      *local_err = MCOM_UNKNOWN_ERROR;
      return -1;
  }
}
Example #5
0
int flowdrv_delete_offline_device(char *dev)
// Delete offline device
// dev = offline device that should be deleted
{
  struct mapiipcbuf qbuf;

  pthread_once(&mapi_is_initialized, (void*)mapi_init);

  if (dev == NULL){
    printf("ERROR: NULL device in flowdrv_delete_offline_device\n");
    return -1;
  }

  qbuf.mtype = 1;
  qbuf.cmd = DELETE_OFFLINE_DEVICE;
  qbuf.fd = getpid();
  qbuf.pid = getpid();

  strncpy((char *) qbuf.data, dev, DATA_SIZE);

  free(dev);
  qbuf.fid = 0;
  while(__sync_lock_test_and_set(mapi_lock,1));
  if (mapiipc_write((struct mapiipcbuf*)&qbuf) < 0)
  {
    *mapi_lock = 0;
    *local_err = MCOM_SOCKET_ERROR;
    return -1;
  }
  if (mapiipc_read((struct mapiipcbuf*)&qbuf) < 0)
  {
    *mapi_lock = 0;
    *local_err = MCOM_SOCKET_ERROR;
    return -1;
  }

  *mapi_lock = 0;
  switch(qbuf.cmd)
  {
    case DELETE_OFFLINE_DEVICE_ACK:
      while(__sync_lock_test_and_set(mapi_lock,1));
      if(((*get_numflows)() == 0) && --offline_devices == 0)
        mapiipc_client_close();
      *mapi_lock = 0;
      return 0;
    case ERROR_ACK:
      *local_err = qbuf.remote_errorcode;
      return -1;
    default:
      *local_err = MCOM_UNKNOWN_ERROR;
      return -1;
  }
}
Example #6
0
void test_lock (void)
{
  sc = __sync_lock_test_and_set (&sc, -1);
  uc = __sync_lock_test_and_set (&uc, -1);
  ss = __sync_lock_test_and_set (&ss, -1);
  us = __sync_lock_test_and_set (&us, -1);
  si = __sync_lock_test_and_set (&si, -1);
  ui = __sync_lock_test_and_set (&ui, -1);
  sl = __sync_lock_test_and_set (&sl, -1);
  ul = __sync_lock_test_and_set (&ul, -1);
  sll = __sync_lock_test_and_set (&sll, -1);
  ull = __sync_lock_test_and_set (&ull, -1);
}
Example #7
0
/*
 * Propagate reachability to the start or the end from mcp1 to mcp2.
 * When any thread can see both the start and the end, the maze has
 * been solved, so set the "done" flag.
 *
 * This vaguely resembles a link-state routing algorithm.
 */
void maze_solve_propagate(struct maze_child *mcp1, struct maze_child *mcp2)
{
	struct maze_child_shared *mcsp = mymcp->mcsp;

	if (__sync_fetch_and_add(&mcp1->see_start, 0)) {
		(void)__sync_lock_test_and_set(&mcp2->see_start, 1);
		if (__sync_fetch_and_add(&mcp2->see_end, 0))
			ACCESS_ONCE(mcsp->done) = 1;
	}
	if (__sync_fetch_and_add(&mcp1->see_end, 0)) {
		(void)__sync_lock_test_and_set(&mcp2->see_end, 1);
		if (__sync_fetch_and_add(&mcp2->see_start, 0))
			ACCESS_ONCE(mcsp->done) = 1;
	}
}
Example #8
0
int csoundWriteCircularBuffer(CSOUND *csound, void *p, const void *in, int items)
{
    IGN(csound);
    if (p == NULL) return 0;
    int remaining;
    int itemswrite, numelem = ((circular_buffer *)p)->numelem;
    int elemsize = ((circular_buffer *)p)->elemsize;
    int i=0, wp = ((circular_buffer *)p)->wp;
    char *buffer = ((circular_buffer *)p)->buffer;
    if ((remaining = checkspace(p, 1)) == 0) {
        return 0;
    }
    itemswrite = items > remaining ? remaining : items;
    for(i=0; i < itemswrite; i++){
        memcpy(&(buffer[elemsize * wp++]),
                ((char *) in) + (i * elemsize),  elemsize);
        if(wp == numelem) wp = 0;
    }
#if defined(MSVC)
      InterlockedExchange(&((circular_buffer *)p)->wp, wp);
#elif defined(HAVE_ATOMIC_BUILTIN)
      __sync_lock_test_and_set(&((circular_buffer *)p)->wp,wp);
#else
      ((circular_buffer *)p)->wp = wp;
#endif
    return itemswrite;
}
Example #9
0
// The default implementation of halide_acquire_cl_context uses the global
// pointers above, and serializes access with a spin lock.
// Overriding implementations of acquire/release must implement the following
// behavior:
// - halide_acquire_cl_context should always store a valid context/command
//   queue in ctx/q, or return an error code.
// - A call to halide_acquire_cl_context is followed by a matching call to
//   halide_release_cl_context. halide_acquire_cl_context should block while a
//   previous call (if any) has not yet been released via halide_release_cl_context.
WEAK int halide_acquire_cl_context(void *user_context, cl_context *ctx, cl_command_queue *q) {
    // TODO: Should we use a more "assertive" assert? These asserts do
    // not block execution on failure.
    halide_assert(user_context, ctx != NULL);
    halide_assert(user_context, q != NULL);

    // If the context pointers aren't hooked up, use our weak globals.
    if (cl_ctx_ptr == NULL) {
        cl_ctx_ptr = &weak_cl_ctx;
        cl_q_ptr = &weak_cl_q;
        cl_lock_ptr = &weak_cl_lock;
    }

    halide_assert(user_context, cl_lock_ptr != NULL);
    while (__sync_lock_test_and_set(cl_lock_ptr, 1)) { }

    // If the context has not been initialized, initialize it now.
    halide_assert(user_context, cl_ctx_ptr != NULL);
    halide_assert(user_context, cl_q_ptr != NULL);
    if (!(*cl_ctx_ptr)) {
        cl_int error = create_context(user_context, cl_ctx_ptr, cl_q_ptr);
        if (error != CL_SUCCESS) {
            __sync_lock_release(cl_lock_ptr);
            return error;
        }
    }

    *ctx = *cl_ctx_ptr;
    *q = *cl_q_ptr;
    return 0;
}
Example #10
0
static inline void
lock(struct silly_timer *timer)
{
	while (__sync_lock_test_and_set(&timer->lock, 1))
		;

}
Example #11
0
int csoundReadCircularBuffer(CSOUND *csound, void *p, void *out, int items)
{
    IGN(csound);
    if (p == NULL) return 0;
    {
      int remaining;
      int itemsread, numelem = ((circular_buffer *)p)->numelem;
      int elemsize = ((circular_buffer *)p)->elemsize;
      int i=0, rp = ((circular_buffer *)p)->rp;
      char *buffer = ((circular_buffer *)p)->buffer;
      if ((remaining = checkspace(p, 0)) == 0) {
        return 0;
      }
      itemsread = items > remaining ? remaining : items;
      for (i=0; i < itemsread; i++){
        memcpy((char *) out + (i * elemsize),
               &(buffer[elemsize * rp++]),  elemsize);
        if (rp == numelem) {
          rp = 0;
        }
      }
#if defined(MSVC)
      InterlockedExchange(&((circular_buffer *)p)->rp, rp);
#elif defined(HAVE_ATOMIC_BUILTIN)
      __sync_lock_test_and_set(&((circular_buffer *)p)->rp,rp);
#else
      ((circular_buffer *)p)->rp = rp;
#endif
      return itemsread;
    }
}
Example #12
0
sLONG8 VInterlocked::Exchange(sLONG8* inValue, sLONG8 inNewValue)
{
	sLONG8 val;

#if VERSIONWIN

	//jmo - Ugly workaround for win XP lack of InterlockedCompareExchange64
	//      We use the VoidPtr version to achieve the same result on 64 bit platforms.
	
	val=(sLONG8)VInterlocked::ExchangeVoidPtr((void**)inValue, (void*)inNewValue);

#elif VERSIONMAC

	do {
		val = *inValue;
		// one must loop if a swap occured between reading the old val and a failed CAS
		// because if we return the old val, the caller may assume that the CAS has succeeded.
	} while(!::OSAtomicCompareAndSwap64Barrier((int64_t) val, (int64_t) inNewValue, (int64_t*) inValue));

#elif VERSION_LINUX

    val=__sync_lock_test_and_set(inValue, inNewValue);

#endif

	return val;

}
Example #13
0
struct skynet_module *
skynet_module_query(const char * name) {
    struct skynet_module * result = _query(name);
    if (result)
        return result;

    while(__sync_lock_test_and_set(&M->lock,1)) {}

    result = _query(name); // double check

    if (result == NULL && M->count < MAX_MODULE_TYPE) {
        int index = M->count;
        void * dl = _try_open(M,name);
        if (dl) {
            M->m[index].name = name;
            M->m[index].module = dl;
            if (_open_sym(&M->m[index])== 0) {
                M->m[index].name = strdup(name);
                M->count ++;
                result = &M->m[index];
            }
        }
    }

    __sync_lock_release(&M->lock);

    return result;
}
Example #14
0
/**
 * Prints character ch at the current location of the cursor.
 *
 *  If the character is a newline ('\n'), the cursor is
 *  moved to the next line (scrolling if necessary). If
 *  the character is a carriage return ('\r'), the cursor
 *  is immediately reset to the beginning of the current
 *  line, causing any future output to overwrite any existing
 *  output on the line.
 *
 * @param ch The character to print.
 * @return
 */
int console_putc(const int ch) {
#if defined (ARM_ALLOW_MULTI_CORE)
	while (__sync_lock_test_and_set(&lock, 1) == 1);
#endif

	if (ch == (int)'\n') {
		newline();
	} else if (ch == (int)'\r') {
		current_x = 0;
	} else if (ch == (int)'\t') {
		current_x += 4;
	} else {
		draw_char(ch, current_x * CHAR_W, current_y * CHAR_H, cur_fore, cur_back);
		current_x++;
		if (current_x == WIDTH / CHAR_W) {
			newline();
		}
	}

#if defined (ARM_ALLOW_MULTI_CORE)
	__sync_lock_release(&lock);
#endif

	return ch;
}
Example #15
0
/* 
 * Throughput measurement thread. Sleeps for a while then wakes up and measures
 * the throughput, resetting counters in the process.
 */
void
*measure_thread(void *arg)
{
	int c, i;
	unsigned long long throughput;
        for (c = 1 ;; c++) {
                sleep(1);
                throughput = 0;
                for (i = 0; i < ncounters; i++) {
                        if (counters[i]->c) {

#if defined(SYNC_FETCH_ADD)
                                throughput += \
					__sync_lock_test_and_set(&counters[i]->c, 0);

#elif defined(PTHREAD_SPIN)
				pthread_spin_lock(&counters[i]->spin);
				throughput += counters[i]->c;
				counters[i]->c = 0;
				pthread_spin_unlock(&counters[i]->spin);

#elif defined(PTHREAD_MUTEX)
				pthread_mutex_lock(&counters[i]->mutex);
				throughput += counters[i]->c;
				counters[i]->c = 0;
				pthread_mutex_unlock(&counters[i]->mutex);
#endif
			}
		}
		printf("%lld\n", throughput);
                fflush(stdout);
        }
}
Example #16
0
int timer_dispatch()
{
        int curr = _getms();
        struct timer_node *t;
        struct timer_node *last;

        while(__sync_lock_test_and_set(&TIMER->lock, 1))
                ;

        t = TIMER->list;
        last = TIMER->list;
        while (t) {
                if (t->expire <= curr) {
                        struct timer_node *tmp;
                        struct event_handler e;
                        e.ud = t->ud;
                        e.cb = t->cb;
                        event_add_handler(&e);
                        if (last == TIMER->list)
                                TIMER->list = t->next;
                        else
                                last->next = t->next;
                        tmp = t;
                        t = t->next;
                        free(tmp);
                } else {
                        last = t;
                        t = t->next;
                }
        }

        __sync_lock_release(&TIMER->lock);

        return 0;
}
Example #17
0
// decreases numflows and returns its new value
int decr_numflows() {
  int n;
  while(__sync_lock_test_and_set(&numflows_lock,1));
  n = --numflows;
  numflows_lock = 0;
  return n;
}
Example #18
0
static int take_lock(lock_t *lock)
{
   unsigned long one = 1, result = 0;
   unsigned long count = 0;

   if (lock->id != -1 && *lock->lock == 1 && *lock->held_by == lock->id) {
      lock->ref_count++;
      return 0;
   }

   do {
      while (*lock->lock == 1) {
         count++;
         if (count > 100000) {
            usleep(10000); //.01 sec
         }
      }
      result = __sync_lock_test_and_set(lock->lock, one);
   } while (result == 1);

   if (lock->id != -1) {
      *lock->held_by = lock->id;
      lock->ref_count = 1;
   }

   return 0;
}
Example #19
0
static void cmd_get_next_flow_info(int fd, int pid, int sock) {
	flist_node_t *f;
	int d = 0;
	struct mapiipcbuf buf;

	while(__sync_lock_test_and_set(&flowlist_lock,1));

	f=flist_head(flowlist);
	//Loop through flows to find the next one
	while (f!=NULL) {
		if (flist_id(f)>fd) {
			if (d==0)
				d=flist_id(f);
			else if (flist_id(f)<d)
				d=flist_id(f);
		}
		f=flist_next(f);
	}

	flowlist_lock = 0;

	if (d != 0)
		cmd_get_flow_info(d, pid, sock);
	else {
		//Send back error message
		buf.mtype = pid;
		buf.fd = fd;
		buf.cmd = GET_FLOW_INFO_NACK;
		mapiipc_daemon_write(&buf, sock);
	}
}
Example #20
0
void * acl_atomic_xchg(ACL_ATOMIC *self, void *value)
{
#ifndef HAS_ATOMIC
	void *old;

	acl_pthread_mutex_lock(&self->lock);
	old = self->value;
	self->value = value;
	acl_pthread_mutex_unlock(&self->lock);

	return old;
#elif	defined(ACL_WINDOWS)
	return InterlockedExchangePointer((volatile PVOID*)&self->value, value);
#elif	defined(ACL_LINUX)
# if defined(__GNUC__) && (__GNUC__ >= 4)
	return __sync_lock_test_and_set(&self->value, value);
# else
	(void) self;
	(void) value;
	acl_msg_error("%s(%d), %s: not support!",
		 __FILE__, __LINE__, __FUNCTION__);
	return NULL;
# endif
#endif
}
Example #21
0
void counter_clear(struct stats_counter *ctr)
{
    if (ctr != NULL)
    {
        __sync_lock_test_and_set(&ctr->ctr_value.val64,0ll);
    }
}
Example #22
0
/* 原子操作 */
int _xchg32(ATOM32* addr,int value){
#if defined(WIN32) || defined(_WIN64)
	return InterlockedExchange(addr,value);
#else
	return __sync_lock_test_and_set(addr,value);
#endif
}
Example #23
0
void counter_set(struct stats_counter *ctr, long long val)
{
    if (ctr != NULL)
    {
        __sync_lock_test_and_set(&ctr->ctr_value.val64,val);
    }
}
Example #24
0
int c_net_io_notifier::popup()
{
    if (!m_init_)
    {
        return -1;
    }

    if (__sync_lock_test_and_set(m_p_mutex_, 1))
    {
        return 0;
    }

    for (;;)
    {
        int ret = write(m_wfd_, "w", 1);

        if (ret < 0 && EINTR == errno)
        {
            continue;
        }

        break;
    }

    return 0;
}
Example #25
0
/* locking futex, syscall is not necessary if futex
 * was free
 */
PUBLIC int futex_lock(futex_t *f)
{
    int ret, c;

    ret = OK;
    if((c = __sync_val_compare_and_swap(&(f->val), 0, 1)) != 0) {
        if(c != 2)
            c = __sync_lock_test_and_set(&(f->val), 2);
        while(c != 0) {
            ret = futex_wait(f);
            if(ret < 0) break;
            c = __sync_lock_test_and_set(&(f->val), 2);
        }
    }
    return ret;
}
Example #26
0
void* VInterlocked::ExchangeVoidPtr(void** inValue, void* inNewValue)
{
#if VERSIONWIN

	#pragma warning (push)
	#pragma warning (disable: 4311)
	#pragma warning (disable: 4312)

	void* val = InterlockedExchangePointer( inValue, inNewValue);
//	void* val = (void*) ::InterlockedExchange((long*) inValue, (long)inNewValue);

	#pragma warning (pop)

#elif VERSIONMAC

	void* val;
	do {
		val = *inValue;
		// one must loop if a swap occured between reading the old val and a failed CAS
		// because if we return the old val, the caller may assume that the CAS has succeeded.
	} while(!::OSAtomicCompareAndSwapPtrBarrier( val, inNewValue, inValue));

#elif VERSION_LINUX

    void* val=__sync_lock_test_and_set(inValue, inNewValue);

#endif
	return val;
}
Example #27
0
// increases totalflows and returns its new value
int incr_totalflows() {
  int n;
  while(__sync_lock_test_and_set(&numflows_lock,1));
  n = ++totalflows;
  numflows_lock = 0;
  return n;
}
Example #28
0
static inline void
lock(struct silly_queue *q)
{
	while (__sync_lock_test_and_set(&q->lock, 1))
		;
	return ;
}
Example #29
0
// The default implementation of halide_acquire_cl_context uses the global
// pointers above, and serializes access with a spin lock.
// Overriding implementations of acquire/release must implement the following
// behavior:
// - halide_acquire_cl_context should always store a valid context/command
//   queue in ctx/q, or return an error code.
// - A call to halide_acquire_cl_context is followed by a matching call to
//   halide_release_cl_context. halide_acquire_cl_context should block while a
//   previous call (if any) has not yet been released via halide_release_cl_context.
WEAK int halide_acquire_cuda_context(void *user_context, CUcontext *ctx) {
    // TODO: Should we use a more "assertive" assert? these asserts do
    // not block execution on failure.
    halide_assert(user_context, ctx != NULL);

    if (cuda_ctx_ptr == NULL) {
        cuda_ctx_ptr = &weak_cuda_ctx;
        cuda_lock_ptr = &weak_cuda_lock;
    }

    halide_assert(user_context, cuda_lock_ptr != NULL);
    while (__sync_lock_test_and_set(cuda_lock_ptr, 1)) { }

    // If the context has not been initialized, initialize it now.
    halide_assert(user_context, cuda_ctx_ptr != NULL);
    if (*cuda_ctx_ptr == NULL) {
        CUresult error = create_context(user_context, cuda_ctx_ptr);
        if (error != CUDA_SUCCESS) {
            __sync_lock_release(cuda_lock_ptr);
            return error;
        }
    }

    *ctx = *cuda_ctx_ptr;
    return 0;
}
Example #30
0
void mutex_t::lock() {
	int oval = __sync_val_compare_and_swap(&val, 0, 1);

	if(!oval) {
	}
	else {
		assert(oval == 1 || oval == 2);
		thr::tstate_t *tstate = thr::tstate;

		thr::state_t old_state;

		if(tstate)
			old_state = tstate->set(thr::locked);

		while(__sync_lock_test_and_set(&val, 2) != 0)
			futex_wait(&val, 2);

		if(tstate)
			tstate->set(old_state);

	}

	tid = thr::id;

}