/** * Unlock this spin lock. * @throws boost::thread_resource_error Something went wrong. */ void unlock() { __sync_lock_release(&exclusion); }
static inline int ls_xxx_spin_unlock(ls_spinlock_t *p) { __sync_lock_release(p); return 0; }
static inline void unlock(struct silly_queue *q) { __sync_lock_release(&q->lock); return ; }
static inline void cell_unlock(struct cell *c) { __sync_lock_release(&c->lock); }
static void spin_unlock(int volatile * lock) { __sync_lock_release(lock); }
static void gcc_builtin_unlock(int *lock_var) { __sync_lock_release(lock_var); }
void spinlock_unlock(spinlock_t *lock) { __sync_lock_release(&lock->lock, 0); }
void atomicStore(volatile T &mem, typename Identity_<T>::type val) throw() { mem = val; volatile int barrier; __sync_lock_release(&barrier); }
void* thread_func(void* argc) { int i; //insert //printf("%d\n", *(int*)argc); for (i = *(int*)argc; i < operations; i += num_thread) { if (sync_s == 'm') { pthread_mutex_lock(&lock); SortedList_insert(&list[hash_key(element[i].key)], &element[i]); pthread_mutex_unlock(&lock); } else if (sync_s=='s') { while (__sync_lock_test_and_set(&locker, 1)); SortedList_insert(&list[hash_key(element[i].key)], &element[i]); __sync_lock_release(&locker); } else { SortedList_insert(&list[hash_key(element[i].key)], &element[i]); } } int b; //get the length if (sync_s=='m') { pthread_mutex_lock(&lock); for (b=0; b<num_list; b++) { SortedList_length(&list[b]); //printf("%d\n",SortedList_length(&list[b]) ); } pthread_mutex_unlock(&lock); } else if (sync_s=='s') { while (__sync_lock_test_and_set(&locker, 1)); for (b=0; b<num_list; b++) { SortedList_length(&list[b]); } __sync_lock_release(&locker); } else { for (b=0; b<num_list; b++) { SortedList_length(&list[b]); } } //lookup and delete. SortedListElement_t* node_deleted; for ( i = *(int *)argc; i < operations; i += num_thread) { if (sync_s=='m') { pthread_mutex_lock(&lock); node_deleted = SortedList_lookup(&list[hash_key(element[i].key)], element[i].key); SortedList_delete(node_deleted); pthread_mutex_unlock(&lock); } else if (sync_s=='s') { while (__sync_lock_test_and_set(&locker, 1)); node_deleted = SortedList_lookup(&list[hash_key(element[i].key)], element[i].key); SortedList_delete(node_deleted); __sync_lock_release(&locker); } else { node_deleted = SortedList_lookup(&list[hash_key(element[i].key)], element[i].key); SortedList_delete(node_deleted); } } }
void spin_unlock(uint8_t volatile * lock) { __sync_lock_release(lock); }
void sync_unlock(lock_t *p){ __sync_lock_release(p); wakeup( (void*)p ); }
KMerStat() : totalQual(1.0), count(0), qual(), lock_(0) { __sync_lock_release(&lock_); }
KMerStat(uint32_t cnt, float kquality, const unsigned char *quality) : totalQual(kquality), count(cnt), qual(quality), lock_(0) { __sync_lock_release(&lock_); }
inline static void _unlock(struct group *g) { __sync_lock_release(&g->lock); }
void unlock() { __sync_lock_release( &v_ ); }
void * backtrace_alloc (struct backtrace_state *state, size_t size, backtrace_error_callback error_callback, void *data) { void *ret; int locked; struct backtrace_freelist_struct **pp; size_t pagesize; size_t asksize; void *page; ret = NULL; /* If we can acquire the lock, then see if there is space on the free list. If we can't acquire the lock, drop straight into using mmap. __sync_lock_test_and_set returns the old state of the lock, so we have acquired it if it returns 0. */ if (!state->threaded) locked = 1; else locked = __sync_lock_test_and_set (&state->lock_alloc, 1) == 0; if (locked) { for (pp = &state->freelist; *pp != NULL; pp = &(*pp)->next) { if ((*pp)->size >= size) { struct backtrace_freelist_struct *p; p = *pp; *pp = p->next; /* Round for alignment; we assume that no type we care about is more than 8 bytes. */ size = (size + 7) & ~ (size_t) 7; if (size < p->size) backtrace_free_locked (state, (char *) p + size, p->size - size); ret = (void *) p; break; } } if (state->threaded) __sync_lock_release (&state->lock_alloc); } if (ret == NULL) { /* Allocate a new page. */ pagesize = getpagesize (); asksize = (size + pagesize - 1) & ~ (pagesize - 1); page = mmap (NULL, asksize, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); if (page == MAP_FAILED) { if (error_callback) error_callback (data, "mmap", errno); } else { size = (size + 7) & ~ (size_t) 7; if (size < asksize) backtrace_free (state, (char *) page + size, asksize - size, error_callback, data); ret = page; } } return ret; }
void *thread_func(void *num_iteration) { long numIteration; numIteration = (long)num_iteration; long long expected; //add 1 for n times int i; for (i=0; i<numIteration; i++) { /* switch function to choose which protection to use */ switch (sync) { case 'm': pthread_mutex_lock(&count_mutex); add(&counter, 1); pthread_mutex_unlock(&count_mutex); break; case 's': while (__sync_lock_test_and_set(&lock, 1)==1); // critical section add(&counter, 1); __sync_lock_release(&lock); break; case 'c': do { expected = counter; } while (__sync_val_compare_and_swap(&counter, expected, expected+1) != expected); break; default: add(&counter, 1); break; } } //subtract 1 for n times for (i=0; i<numIteration; i++) { /* switch function to choose which protection to use */ switch (sync) { case 'm': pthread_mutex_lock(&count_mutex); add(&counter, -1); pthread_mutex_unlock(&count_mutex); break; case 's': while (__sync_lock_test_and_set(&lock, 1)==1); // critical section add(&counter, -1); __sync_lock_release(&lock); break; case 'c': do { expected = counter; } while (__sync_val_compare_and_swap(&counter, expected, expected-1) != expected); break; default: add(&counter, -1); break; } } pthread_exit(NULL); }
void *threadFunction(void *arg){ long long *counter = (long long *)arg; // Add and substract for the specified number of iterations int i; for(i = 0; i < numIterations; i++){ if(sync == 'm'){ // Lock the mutex int status = pthread_mutex_lock(&mutex); if(status){ fprintf(stderr, "Error: An error occurred while locking\n"); exit(EXIT_FAILURE); } add(counter, 1); // Unlock the mutex status = pthread_mutex_unlock(&mutex); if(status){ fprintf(stderr, "Error, An error occurred while unlocking\n"); exit(EXIT_FAILURE); } } else if(sync == 's'){ // Test the lock. Spin while waiting for the lock while(__sync_lock_test_and_set(&lock, 1) == 1); add(counter, 1); // Release the lock __sync_lock_release(&lock); } else if(sync == 'c'){ compareAdd(counter, 1); } else{ add(counter, 1); } } for(i = 0; i < numIterations; i++){ if(sync == 'm'){ // Lock the mutex int status = pthread_mutex_lock(&mutex); if(status){ fprintf(stderr, "Error: An error occurred while locking\n"); exit(EXIT_FAILURE); } add(counter, -1); // Unlock the mutex status = pthread_mutex_unlock(&mutex); if(status){ fprintf(stderr, "Error, An error occurred while unlocking\n"); exit(EXIT_FAILURE); } } else if(sync == 's'){ // Test the lock. Spin while waiting for the lock while(__sync_lock_test_and_set(&lock, 1) == 1); add(counter, -1); // Release the lock __sync_lock_release(&lock); } else if(sync == 'c'){ compareAdd(counter, -1); } else{ add(counter, -1); } } return NULL; }
// Destructor. ~gcc_sync_fenced_block() { __sync_lock_release(&value_); }
int tci_mutex_unlock(tci_mutex_t* mutex) { __sync_lock_release(mutex); return 0; }