/** * Pops the front value from the queue and places it in value. * * @param value Will have the T at the front of the queue copied into it. * @returns whether value was changed (if the queue had at least one item). */ bool pop(T &value) { volatile Node* formerHead = NULL; bool headAlreadyAdvanced = false; while (!headAlreadyAdvanced) { formerHead = mHead; if (formerHead == NULL) { // fork() function is operating on mTail. continue; } volatile Node* formerHeadNext = formerHead->mNext; volatile Node*formerTail = mTail; if (formerHead == mHead) { if (formerHead == formerTail) { if (formerHeadNext == NULL) { value=T(); return false; } compare_and_swap(&mTail, formerTail, formerHeadNext); } else { value = ((Node*)formerHeadNext)->mContent;//FIXME volatile cast only allowed if mContent is primitive type of pointer size or less headAlreadyAdvanced = compare_and_swap(&mHead, formerHead, formerHeadNext); } } } mFreeNodePool.release((Node*)formerHead);//FIXME volatile cast only allowed if mContent is primitive type of pointer size or less return true; }
int mmc_process(int flag) { int ret = -1; if(flag) { if((mmc_status==0) && 0 == access(MMC_DEVICE_P1,F_OK)) { ret = mount(MMC_DEVICE_P1, MMC_PATH, "vfat", MS_MGC_VAL | MS_SYNCHRONOUS, NULL); if(ret==0)compare_and_swap(&mmc_status,0,1); dbg_printf("mount1 ret == %d \n",ret); } else if((mmc_status==0) && 0 == access(MMC_DEVICE,F_OK)) { ret = mount(MMC_DEVICE, MMC_PATH, "vfat", MS_MGC_VAL | MS_SYNCHRONOUS, NULL); if(ret == 0)compare_and_swap(&mmc_status,0,1); dbg_printf("mount2 ret == %d \n",ret); } } else { if((mmc_status==1) && 0 != access(MMC_DEVICE_P1,F_OK) && 0 != access(MMC_DEVICE,F_OK)) { ret = umount(MMC_PATH); if(ret == 0)compare_and_swap(&mmc_status,1,0); dbg_printf("umount ret == %d \n",ret); } } return(ret); }
int mythread_mutex_unlock(mythread_mutex_t *mutex) { int status; if(mutex->lockvariable==-1) // if the lock was already destroyed, return an error code { return 1; // return the error code } status=compare_and_swap(&mutex->lockvariable,0,1); // changing the state of the lockvariable to unlocked atomically while(status==0) { status=compare_and_swap(&mutex->lockvariable,0,1); } mythread_enter_kernel(); mutex->noofwaitingthreads-=1; // decrementing the number of threads waiting on the mutex by one mutex->owner_thread=NULL; // Chech if there are any threads that are blocked in the queue due to exceeding the threshold if(mutex->noofwaitingthreads==mutex->noofblockedthreads && mutex->noofwaitingthreads > 0) { mutex->noofblockedthreads-=1; mythread_unblock(mutex->blocked_threads,1); } else { mythread_leave_kernel(); } return 0; }
int lf_ordlist_remove(struct lf_ordlist *lst, void *value) { struct lf_ordlist_node *right_node, *right_node_next, *left_node; for ( ; ; ) { right_node = search(lst, value, &left_node); if ((right_node == lst->tail) || lst->cmp(right_node->value, value) != 0) { mem_release(lst->fl, right_node); mem_release(lst->fl, left_node); return 0; } right_node_next = mem_safe_read(lst->fl, &NEXT(right_node)); if (!IS_MARKED(right_node_next)) { assert(right_node != lst->tail); if (compare_and_swap(&NEXT(right_node), (intptr_t) right_node_next, (intptr_t) GET_MARKED(right_node_next))) break; } mem_release(lst->fl, right_node_next); mem_release(lst->fl, right_node); mem_release(lst->fl, left_node); } /* if the CAS succeeds, NEXT(left_node) gets our ref to * 'right_node_next' */ assert(left_node != lst->tail); if (!compare_and_swap(&NEXT(left_node), (intptr_t) right_node, (intptr_t) right_node_next)) { mem_release(lst->fl, right_node_next); mem_release(lst->fl, right_node); mem_release(lst->fl, left_node); /* delete it via a search. */ right_node = search(lst, value, &left_node); mem_release(lst->fl, right_node); mem_release(lst->fl, left_node); } else { /* safely deleted. */ mem_release(lst->fl, right_node); /* our ref */ mem_release(lst->fl, right_node); /* NEXT(left_node) ref */ mem_release(lst->fl, left_node); } return 1; }
int sem_wait(sem_t * sem) { long oldstatus, newstatus; volatile pthread_t self = thread_self(); pthread_t * th; while (1) { do { oldstatus = sem->sem_status; if ((oldstatus & 1) && (oldstatus != 1)) newstatus = oldstatus - 2; else { newstatus = (long) self; self->p_nextwaiting = (pthread_t) oldstatus; } } while (! compare_and_swap(sem, oldstatus, newstatus)); if (newstatus & 1) /* We got the semaphore. */ return 0; /* Wait for sem_post or cancellation */ suspend_with_cancellation(self); /* This is a cancellation point */ if (self->p_canceled && self->p_cancelstate == PTHREAD_CANCEL_ENABLE) { /* Remove ourselves from the waiting list if we're still on it */ /* First check if we're at the head of the list. */ do { oldstatus = sem->sem_status; if (oldstatus != (long) self) break; newstatus = (long) self->p_nextwaiting; } while (! compare_and_swap(sem, oldstatus, newstatus)); /* Now, check if we're somewhere in the list. There's a race condition with sem_post here, but it does not matter: the net result is that at the time pthread_exit is called, self is no longer reachable from sem->sem_status. */ if (oldstatus != (long) self && (oldstatus & 1) == 0) { th = &(((pthread_t) oldstatus)->p_nextwaiting); while (*th != (pthread_t) 1 && *th != NULL) { if (*th == self) { *th = self->p_nextwaiting; break; } th = &((*th)->p_nextwaiting); } } pthread_exit(PTHREAD_CANCELED); } } }
inline void* RefCountAllocator_alloc(RefCountAllocator rc, size_t size, int clear) { struct refCons* head = NULL; #ifndef WITH_MEMORYAREA_TAGS JNIEnv* env = FNI_GetJNIEnv(); #endif #ifdef RTJ_DEBUG printf("RefCountAllocator_alloc(%p, %d)\n", rc, size); checkException(); #endif RefCountAllocator_INC(rc); if (clear) { head = (struct refCons*)RTJ_CALLOC_UNCOLLECTABLE(sizeof(struct refCons)+size, 1); } else { head = (struct refCons*)RTJ_MALLOC_UNCOLLECTABLE(sizeof(struct refCons)+size); ((struct oobj*)(&(head->obj)))->claz = NULL; head->finalize = NULL; head->nextFree = NULL; } head->refCount = 1; while (!compare_and_swap((long int*)(&(rc->in_use)), (long int)(head->next = rc->in_use), (long int)head)) {} RefCountAllocator_DEC(rc); #ifndef WITH_MEMORYAREA_TAGS RTJ_tagObject(env, FNI_WRAP(&(head->obj))); #endif #ifdef RTJ_DEBUG checkException(); printf(" = %p\n", &(head->obj)); #endif return (void*)(&(head->obj)); }
int testandtestandset(int *mem) { int numberoftries=100; int currenttry=0; int oldvalue; while(currenttry<numberoftries) //test and test and set lock with a busy wait threshold of 100 times { while(*mem==1) // wait until the lock becomes available { } oldvalue=compare_and_swap(mem,1,0); // try to acquire the lock if(oldvalue==0) { return oldvalue; // if lock acquiring is successful } else { currenttry+=1; // if lock acquiring fails then increment the number of unsuccessful attempts } } return oldvalue; // if the lock is not acquired after 100 tries, return "FAILURE" }
void *mqueue_writer_parpare(struct mqueue *q) { struct item *ret = NULL; uint64_t oldf, newf; uint32_t freen, seq; for (;;) { rmb(); oldf = atomic_load(q->free); freen = oldf >> 32; seq = (oldf & 0xFFFFFFFF) + 1; if (unlikely(freen == TAIL_IDX)) return NULL; ret = ITEM(q, freen); newf = ((uint64_t)ret->next << 32) | seq; if (compare_and_swap(&q->free, oldf, newf)) { ret->next = UNUSED_FLAG; return ret->content; } cpu_relax(); } return NULL; }
/* * mcount is called on entry to each function compiled with the profiling * switch set. _mcount(), which is declared in a machine-dependent way * with _MCOUNT_DECL, does the actual work and is either inlined into a * C routine or called by an assembly stub. In any case, this magic is * taken care of by the MCOUNT definition in <machine/profile.h>. * * _mcount updates data structures that represent traversals of the * program's call graph edges. frompc and selfpc are the return * address and function address that represents the given call graph edge. * * Note: the original BSD code used the same variable (frompcindex) for * both frompcindex and frompc. Any reasonable, modern compiler will * perform this optimization. */ _MCOUNT_DECL(frompc, selfpc) /* _mcount; may be static, inline, etc */ { register u_short *frompcindex; register struct tostruct *top, *prevtop; register struct gmonparam *p; register long toindex; int i; p = &_gmonparam; /* * check that we are profiling * and that we aren't recursively invoked. */ if (! compare_and_swap (&p->state, GMON_PROF_ON, GMON_PROF_BUSY)) return; /* * check that frompcindex is a reasonable pc value. * for example: signal catchers get called from the stack, * not from text space. too bad. */ frompc -= p->lowpc; if (frompc > p->textsize) goto done; /* The following test used to be if (p->log_hashfraction >= 0) But we can simplify this if we assume the profiling data is always initialized by the functions in gmon.c. But then it is possible to avoid a runtime check and use the smae `if' as in gmon.c. So keep these tests in sync. */ if ((HASHFRACTION & (HASHFRACTION - 1)) == 0) { /* avoid integer divide if possible: */ i = frompc >> p->log_hashfraction; } else {
// ----------------------------------------------------------------------- // Miscellaneous synchronization methods // ----------------------------------------------------------------------- void* XMLPlatformUtils::compareAndSwap ( void** toFill , const void* const newValue , const void* const toCompare) { boolean_t boolVar = compare_and_swap((atomic_p)toFill, (int *)&toCompare, (int)newValue ); return (void *)toCompare; }
int sem_post(sem_t * sem) { long oldstatus, newstatus; pthread_t th, next_th; do { oldstatus = sem->sem_status; if ((oldstatus & 1) == 0) newstatus = 3; else { if (oldstatus >= SEM_VALUE_MAX) { /* Overflow */ errno = ERANGE; return -1; } newstatus = oldstatus + 2; } } while (! compare_and_swap(sem, oldstatus, newstatus)); if ((oldstatus & 1) == 0) { th = (pthread_t) oldstatus; do { next_th = th->p_nextwaiting; th->p_nextwaiting = NULL; restart(th); th = next_th; } while(th != (pthread_t) 1); } return 0; }
void push(Stack *s, Node *n) { while (1) { Node *old_top = s->top; n->next = old_top; if (compare_and_swap(&s->top, old_top, n) == old_top) return; } }
int goldilocks_init (void) { const char *res = compare_and_swap(&goldilocks_global.state, NULL, G_INITING); if (res == G_INITED) return GOLDI_EALREADYINIT; else if (res) { return GOLDI_ECORRUPT; } #if GOLDILOCKS_USE_PTHREAD int ret = pthread_mutex_init(&goldilocks_global.mutex, NULL); if (ret) goto fail; #endif struct extensible_t ext; struct tw_extensible_t text; /* Sanity check: the base point is on the curve. */ assert(validate_affine(&goldilocks_base_point)); /* Convert it to twisted Edwards. */ convert_affine_to_extensible(&ext, &goldilocks_base_point); twist_even(&text, &ext); /* Precompute the tables. */ mask_t succ; succ = precompute_fixed_base(&goldilocks_global.fixed_base, &text, COMB_N, COMB_T, COMB_S, goldilocks_global.combs); succ &= precompute_fixed_base_wnaf(goldilocks_global.wnafs, &text, WNAF_PRECMP_BITS); int criff_res = crandom_init_from_file(&goldilocks_global.rand, GOLDILOCKS_RANDOM_INIT_FILE, GOLDILOCKS_RANDOM_RESEED_INTERVAL, GOLDILOCKS_RANDOM_RESEEDS_MANDATORY); #ifdef SUPERCOP_WONT_LET_ME_OPEN_FILES if (criff_res == EMFILE) { crandom_init_from_buffer(&goldilocks_global.rand, "SUPERCOP won't let me open files"); criff_res = 0; } #endif if (succ & !criff_res) { if (!bool_compare_and_swap(&goldilocks_global.state, G_INITING, G_INITED)) { abort(); } return 0; } /* it failed! fall though... */ fail: if (!bool_compare_and_swap(&goldilocks_global.state, G_INITING, G_FAILED)) { /* ok something is seriously wrong */ abort(); } return -1; }
unsigned long udp_get_packet_index(unsigned long * value) { unsigned long return_value = 0; compare_and_swap(value,65535,0); fetch_and_add(value,1); return_value = *value; return(return_value); }
Node *pop(Stack *s) { while (1) { Node *old_top = s->top; if (old_top == NULL) return NULL; Node *new_top = old_top->next; if (compare_and_swap(&s->top, old_top, new_top) == old_top) return old_top; } }
PRInt32 _AIX_AtomicSet(PRInt32 *val, PRInt32 newval) { PRIntn oldval; boolean_t stored; oldval = fetch_and_add((atomic_p)val, 0); do { stored = compare_and_swap((atomic_p)val, &oldval, newval); } while (!stored); return oldval; } /* _AIX_AtomicSet */
int ring_queue_pop(ring_queue_t *queue, void **ele) { if (!(queue->num > 0)) return -1; int cur_head_index = queue->head; char * cur_head_flag_index = queue->flags + cur_head_index; while (!compare_and_swap(cur_head_flag_index, 2, 3)) { cur_head_index = queue->head; cur_head_flag_index = queue->flags + cur_head_index; } int update_head_index = (cur_head_index + 1) % queue->size; compare_and_swap(&queue->head, cur_head_index, update_head_index); *ele = *(queue->data + cur_head_index); fetch_and_sub(cur_head_flag_index, 3); fetch_and_sub(&queue->num, 1); return 0; }
Node* allocate() { volatile Node* node=NULL; do { node = mHead->mNext; if (node == NULL) return new Node();//FIXME should probably be aligned to size(Node) bytes } while (!compare_and_swap(&mHead->mNext, node, node->mNext)); Node * return_node=(Node*)node;//FIXME volatile cast only allowed if mContent is primitive type of pointer size or less return_node->mNext = NULL; return_node->mContent=T(); return return_node; }
inline int32_t atomic_conditional_increment( int32_t * pw ) { // if( *pw != 0 ) ++*pw; // return *pw; int32_t tmp = fetch_and_add( pw, 0 ); for( ;; ) { if( tmp == 0 ) return 0; if( compare_and_swap( pw, &tmp, tmp + 1 ) ) return (tmp + 1); } }
int pthread_once(pthread_once_t *once_control, void (*init_routine)(void)) { if(compare_and_swap((void *)once_control, (void *)0, (void *)1)) init_routine(); //TODO: "The pthread_once() function is not a cancellation point. //However, if init_routine is a cancellation point and is canceled, //the effect on once_control shall be as if pthread_once() was never called." //TODO: "The pthread_once() function may fail if: //[EINVAL] // If either once_control or init_routine is invalid. (Question: what does 'invalid' mean?) return 0; }
/** * Pushes value onto the queue * * @param value Will be copied and placed onto the end of the queue. */ void push(const T &value) { volatile Node* formerTail = NULL; volatile Node* formerTailNext=NULL; Node* newerNode = mFreeNodePool.allocate(); newerNode->mContent = value; volatile Node*newNode=newerNode; bool successfulAddNode = false; while (!successfulAddNode) { formerTail = mTail; formerTailNext = formerTail->mNext; if (mTail == formerTail) { if (formerTailNext == 0) successfulAddNode = compare_and_swap(&mTail->mNext, (volatile Node*)0, newNode); else compare_and_swap(&mTail, formerTail, formerTailNext); } } compare_and_swap(&mTail, formerTail, newNode); }
Node *fork() { volatile Node *newHead = mFreeNodePool.allocate(); volatile Node *oldHead = mHead; // Acquire "lock" on head, for multiple people fork()ing at once. { while (oldHead == 0 || !compare_and_swap(&mHead, oldHead, (volatile Node*)0)) { oldHead = mHead; } } { volatile Node *oldTail = mTail; while (!compare_and_swap(&mTail, oldTail, newHead)) { oldTail = mTail; } } mHead = newHead; return const_cast<Node*>(oldHead); }
int ring_queue_push(ring_queue_t *queue, void * ele) { if (!(queue->num < queue->size)) { return -1; } int cur_tail_index = queue->tail; char * cur_tail_flag_index = queue->flags + cur_tail_index; while (!compare_and_swap(cur_tail_flag_index, 0, 1)) { cur_tail_index = queue->tail; cur_tail_flag_index = queue->flags + cur_tail_index; } int update_tail_index = (cur_tail_index + 1) % queue->size; compare_and_swap(&queue->tail, cur_tail_index, update_tail_index); *(queue->data + cur_tail_index) = ele; fetch_and_add(cur_tail_flag_index, 1); fetch_and_add(&queue->num, 1); return 0; }
int sem_trywait(sem_t * sem) { long oldstatus, newstatus; do { oldstatus = sem->sem_status; if ((oldstatus & 1) == 0 || (oldstatus == 1)) { errno = EAGAIN; return -1; } newstatus = oldstatus - 2; } while (! compare_and_swap(sem, oldstatus, newstatus)); return 0; }
inline uint32_t atomic_fetch_and_add( uint32_t * pw, uint32_t dv ) { // long r = *pw; // *pw += dv; // return r; for( ;; ) { uint32_t r = *pw; if( __builtin_expect((compare_and_swap(pw, r, r + dv) == r), 1) ) { return r; } } }
inline void* LListAllocator_alloc(LListAllocator ls, size_t size) { struct cons* newCons; if (ls->size && (exchange_and_add((uint32_t*)(&(ls->used)), size)>(ls->size))) { return NULL; /* Out of space. */ } newCons = (struct cons*)RTJ_MALLOC_UNCOLLECTABLE(sizeof(struct cons)); newCons->car = (struct oobj*)RTJ_MALLOC_UNCOLLECTABLE(size); #ifdef WITH_PRECISE_GC ((struct oobj*)(newCons->car))->claz = NULL; #endif while (!compare_and_swap((long int*)(&(ls->data)), (long int)(newCons->cdr = ls->data), (long int)newCons)) {} return (void*)(newCons->car); }
int loin_stop(void * dev) { camera_handle_t * camera_dev = (camera_handle_t*)dev; process_loin_handle_t * handle = (process_loin_handle_t*)camera_dev->loin; if(NULL == handle) { dbg_printf("check the handle ! \n"); return(-1); } pthread_mutex_lock(&(handle->mutex_loin)); volatile unsigned int *task_num = &handle->is_run; compare_and_swap(task_num, 1,0); pthread_mutex_unlock(&(handle->mutex_loin)); return(0); }
int heartbeat_stop(void * dev) { camera_handle_t * camera_dev = (camera_handle_t*)dev; heart_beat_handle_t * handle = (heart_beat_handle_t*)camera_dev->beatheart; if(NULL==camera_dev || NULL == handle) { dbg_printf("check the handle ! \n"); return(-1); } pthread_mutex_lock(&(handle->mutex_heart)); volatile unsigned int *task_num = &handle->is_run; compare_and_swap(task_num, 1,0); pthread_mutex_unlock(&(handle->mutex_heart)); return(0); }
//! Sorting networks for a_size <= 4 void sort( double *a, unsigned a_size ) { switch ( a_size ) { case 4: compare_and_swap( a[0], a[2] ); compare_and_swap( a[1], a[3] ); compare_and_swap( a[0], a[1] ); compare_and_swap( a[2], a[3] ); compare_and_swap( a[1], a[2] ); break; case 3: compare_and_swap( a[0], a[1] ); compare_and_swap( a[0], a[2] ); compare_and_swap( a[1], a[2] ); break; case 2: compare_and_swap( a[0], a[1] ); break; default: break; } }
void mqueue_writer_commit(struct mqueue *q, void *ptr) { uint32_t head = 0; struct item *item = container_of(ptr, struct item, content); uint32_t idx = ITEM_IDX(q, item); assert(item->next == UNUSED_FLAG && idx < q->nmemb); for (;;) { rmb(); head = item->next = atomic_load(q->head); wmb(); if (compare_and_swap(&q->head, head, idx)) return; cpu_relax(); } }