void cvwait(cond_t* cond, lock_t* lock) { //int value = 1; struct node* temp=cond->head; if(temp!=NULL) { while(temp->next!=NULL) temp=temp->next; } //queue yourself to waitlist temp->p=proc; //release the lock fetch_and_add(&lock->turn,1); //copied relevant parts from sleep if(proc == 0) panic("sleep"); acquire(&ptable.lock); proc->chan = proc; proc->state = SLEEPING; sched(); proc->chan = 0; release(&ptable.lock); }
void * run_one_test(void * arg) #endif { list_element * t[MAX_NTHREADS + 1]; int index = (int)(size_t)arg; int i; # ifdef VERBOSE int j = 0; printf("starting thread %d\n", index); # endif while (fetch_and_add(&ops_performed, index + 1) + index + 1 < LIMIT) { for (i = 0; i < index + 1; ++i) { t[i] = (list_element *)AO_stack_pop(&the_list); if (0 == t[i]) { fprintf(stderr, "FAILED\n"); abort(); } } for (i = 0; i < index + 1; ++i) { AO_stack_push(&the_list, (AO_t *)t[i]); } # ifdef VERBOSE j += index + 1; # endif } # ifdef VERBOSE printf("finished thread %d: %d total ops\n", index, j); # endif return 0; }
// Need to lock ourself before calling delete since the // destructor may make a local REFptr to itself. When the // REFptr goes out of scope, Free() gets called again leading // to an infinite loop. void Free() const { //This chunk of code is just the conditional if the IF statement // If new value is 0... #if defined(_AIX) if ( fetch_and_add((int *) &REF_ME(this)->_u._all, -1) == 1 ) #elif defined(sgi) // hack to do atomic decrement if ( test_then_add((unsigned long *) &REF_ME(this)->_u._all, UINT_MAX)==1 ) #else #ifdef USE_PTHREAD CriticalSection cs((ThreadMutex*)&_mutex); #endif if (--REF_ME(this)->_u._all == 0) #endif // THEN { #if !defined(_AIX) && !defined(sgi) ((ThreadMutex*)&_mutex)->unlock(); #endif REF_ME(this)->Lock(); #if !defined(_AIX) && !defined(sgi) ((ThreadMutex*)&_mutex)->lock(); #endif delete REF_ME(this); } // end crazy IF statement }
void tlock_acquire(tlock_t* lock) { int turn; do { turn = fetch_and_add(&lock->next_ticket, 1); } while(turn != lock->currently_serving); }
// make sure the static xid is initialized before any threads started __attribute__((constructor)) int32_t get_xid() { static int32_t xid = -1; if (xid == -1) { xid = time(0); } return fetch_and_add(&xid,1); }
int32_t inc_ref_counter(zhandle_t* zh,int i) { int incr=(i<0?-1:(i>0?1:0)); // fetch_and_add implements atomic post-increment int v=fetch_and_add(&zh->ref_counter,incr); // inc_ref_counter wants pre-increment v+=incr; // simulate pre-increment return v; }
unsigned long udp_get_packet_index(unsigned long * value) { unsigned long return_value = 0; compare_and_swap(value,65535,0); fetch_and_add(value,1); return_value = *value; return(return_value); }
PRInt32 _AIX_AtomicSet(PRInt32 *val, PRInt32 newval) { PRIntn oldval; boolean_t stored; oldval = fetch_and_add((atomic_p)val, 0); do { stored = compare_and_swap((atomic_p)val, &oldval, newval); } while (!stored); return oldval; } /* _AIX_AtomicSet */
void Own() const { #ifdef _AIX fetch_and_add((int *) &REF_ME(this)->_u._all, 1); #elif defined(sgi) test_then_add((unsigned long *) &REF_ME(this)->_u._all, 1); #else #ifdef USE_PTHREAD CriticalSection cs((ThreadMutex*)&_mutex); #endif REF_ME(this)->_u._a._ref++; #endif }
inline int32_t atomic_decrement( int32_t * pw ) { // return --*pw; int32_t originalValue; __lwsync(); originalValue = fetch_and_add( pw, -1 ); __isync(); return (originalValue - 1); }
inline int32_t atomic_conditional_increment( int32_t * pw ) { // if( *pw != 0 ) ++*pw; // return *pw; int32_t tmp = fetch_and_add( pw, 0 ); for( ;; ) { if( tmp == 0 ) return 0; if( compare_and_swap( pw, &tmp, tmp + 1 ) ) return (tmp + 1); } }
static void buffer_recv(buffer_t* b, data_t* d) { int out; sem_wait(b->data); if (use_locking) pthread_mutex_lock(&b->mutex_out); out = fetch_and_add(&b->out, 1); if (out >= BUFFER_MAX) { fetch_and_add(&b->out, -BUFFER_MAX); out -= BUFFER_MAX; } *d = b->buffer[out]; if (use_locking) pthread_mutex_unlock(&b->mutex_out); if (! quiet) { printf("received %d from buffer[%d]\n", *d, out); fflush(stdout); } sem_post(b->free); }
static void buffer_send(buffer_t* b, data_t* d) { int in; sem_wait(b->free); if (use_locking) pthread_mutex_lock(&b->mutex_in); in = fetch_and_add(&b->in, 1); if (in >= BUFFER_MAX) { fetch_and_add(&b->in, -BUFFER_MAX); in -= BUFFER_MAX; } b->buffer[in] = *d; if (use_locking) pthread_mutex_unlock(&b->mutex_in); if (! quiet) { printf("sent %d to buffer[%d]\n", *d, in); fflush(stdout); } sem_post(b->data); }
int ring_queue_push(ring_queue_t *queue, void * ele) { if (!(queue->num < queue->size)) { return -1; } int cur_tail_index = queue->tail; char * cur_tail_flag_index = queue->flags + cur_tail_index; while (!compare_and_swap(cur_tail_flag_index, 0, 1)) { cur_tail_index = queue->tail; cur_tail_flag_index = queue->flags + cur_tail_index; } int update_tail_index = (cur_tail_index + 1) % queue->size; compare_and_swap(&queue->tail, cur_tail_index, update_tail_index); *(queue->data + cur_tail_index) = ele; fetch_and_add(cur_tail_flag_index, 1); fetch_and_add(&queue->num, 1); return 0; }
// Loops through the array to be sorted and puts every element in the right sub help array based on its relation to the pivots. void* fast_sort_partition(void* vargs) { parallel_args* args = (parallel_args*)vargs; int i; for (i = args->from; i < args->to; i++) { int j = 0; const int cur = args->array[i]; const int offset = i % NB_THREADS; while (cur > pivots[j] || (cur == pivots[j] && offset > j)) { ++j; if (j >= NB_THREADS - 1) { break; } } temp_array[j][fetch_and_add(¤t_index[j], 1)] = cur; } return NULL; }
void start(void) { sys_share(USER_DEFINED_SHARE); sys_priority(USER_DEFINED_PRIORITY); int i; for (i = 0; i < RUNCOUNT; i++) { // Write characters to the console, yielding after each one. if (MECHANISM == 0) *cursorpos++ = PRINTCHAR; else if (MECHANISM == 1) { uint16_t *curpos = (uint16_t *)fetch_and_add((uint32_t *)&cursorpos,2); *curpos = PRINTCHAR; } else if (MECHANISM == 2) sys_print(PRINTCHAR); sys_yield(); } // Yield forever. sys_exit(1); }
HIDDEN void * sos_alloc (size_t size) { size_t pos; size = UNW_ALIGN(size, MAX_ALIGN); #if defined(__GNUC__) && defined(HAVE_FETCH_AND_ADD) /* Assume `sos_memory' is suitably aligned. */ assert(((uintptr_t) &sos_memory[0] & (MAX_ALIGN-1)) == 0); pos = fetch_and_add (&sos_memory_freepos, size); #else static define_lock (sos_lock); intrmask_t saved_mask; lock_acquire (&sos_lock, saved_mask); { /* No assumptions about `sos_memory' alignment. */ if (sos_memory_freepos == 0) { unsigned align = UNW_ALIGN((uintptr_t) &sos_memory[0], MAX_ALIGN) - (uintptr_t) &sos_memory[0]; sos_memory_freepos = align; } pos = sos_memory_freepos; sos_memory_freepos += size; } lock_release (&sos_lock, saved_mask); #endif assert (((uintptr_t) &sos_memory[pos] & (MAX_ALIGN-1)) == 0); assert ((pos+size) <= SOS_MEMORY_SIZE); return &sos_memory[pos]; }
int XMLPlatformUtils::atomicDecrement(int &location) { int retVal = fetch_and_add( (atomic_p)&location, -1); return retVal-1; }
void klock_release(lock_t * lock) { fetch_and_add(&lock->turn, 1); }
void klock_acquire(lock_t * lock) { int myTurn = fetch_and_add(&lock->ticket, 1); while(lock->turn != myTurn) ; //spin }
void armci_completion_handler(lapi_handle_t *t_hndl, void *save) { lapi_handle_t hndl = *t_hndl; int need_data; void *message; int whofrom, msglen; request_header_t *msginfo = (request_header_t *)save; char *descr= (char*)(msginfo+1), *buf=MessageRcvBuffer; int buflen=MSG_BUFLEN; #if ARMCI_ENABLE_GPC_CALLS extern pthread_t data_server; data_server = pthread_self(); #endif if(DEBUG_) fprintf(stderr,"%d:CH:op=%d from=%d datalen=%d dscrlen=%d\n", armci_me, msginfo->operation, msginfo->from,msginfo->datalen,msginfo->dscrlen); /*** assure that descriptor and data are in the right format and place ***/ if( msginfo->dscrlen < 0 || msginfo->datalen <0 ){ /* for large put/acc/scatter need to get the data */ int rc; lapi_cntr_t req_cntr; int bytes=0; char *origin_ptr = msginfo->tag.buf; if (msginfo->dscrlen<0) { descr =MessageRcvBuffer; msginfo->dscrlen = -msginfo->dscrlen; buf = descr + msginfo->dscrlen; buflen += msginfo->dscrlen; bytes += msginfo->dscrlen; } if (msginfo->datalen <0){ msginfo->datalen = -msginfo->datalen; bytes += msginfo->datalen; } if(rc=LAPI_Setcntr(hndl, &req_cntr, 0)) ERROR("CH:setcntr failed",rc); if(rc=LAPI_Get(hndl, (uint)msginfo->from, bytes, origin_ptr, MessageRcvBuffer, msginfo->tag.cntr,&req_cntr))ERROR("CH:LAPI_Get failed",rc); if(rc=LAPI_Waitcntr(hndl, &req_cntr,1,NULL))ERROR("CH:Waitcntr failed",rc); } else{ /* desc is in save, data could be but not for GET */ if(msginfo->operation !=GET)buf = descr + msginfo->dscrlen; buflen = MSG_BUFLEN; } /* fprintf(stderr,"CH: val=%lf\n",*(double*)(buf+msginfo->datalen -8));*/ /*** dispatch request to the appropriate handler function ***/ switch(msginfo->operation){ case LOCK: armci_server_lock(msginfo); break; case UNLOCK: armci_server_unlock(msginfo, descr); break; default: if(msginfo->format == STRIDED) armci_server(msginfo, descr, buf, buflen); else armci_server_vector(msginfo, descr, buf, buflen); } free(msginfo); #ifdef LINUX (void)fetch_and_add(&num_malloc, (long)-1); #else (void)fetch_and_addlp(&num_malloc, (long)-1); #endif }
void* armci_header_handler(lapi_handle_t *t_hndl, void *uhdr, uint *t_uhdrlen, uint *msglen, compl_hndlr_t **handler, void** psave) { lapi_handle_t hndl = *t_hndl; uint uhdrlen = *t_uhdrlen; request_header_t *msginfo = (request_header_t *)uhdr; if(DEBUG_) fprintf(stderr,"%d:HH: op=%d from %d\n",armci_me,msginfo->operation, msginfo->from); if(msginfo->to != armci_me) armci_die("wrong message delivered",msginfo->to); /* process small requests that do not require comms in header handler */ if(msginfo->datalen >0 && msginfo->dscrlen>0 && msginfo->operation != GET && msginfo->operation != LOCK && msginfo->operation != UNLOCK){ /* If another thread is in accumulate use compl. handler path: * Try to avoid blocking inside HH which degrades Lapi performance. * The completion handler path requires malloc to save request info. * Only up to approx. MAX_NUM_MALLOC requests can be rescheduled to * run in CH instead of HH. * MAX_NUM_MALLOC is a soft limit to avoid cost of locking when reading */ if( msginfo->operation==PUT || num_malloc>MAX_NUM_MALLOC || kevin_ok){ char *descr = (char*)(msginfo+1); char *buf = descr + msginfo->dscrlen; int buflen = uhdrlen - sizeof(request_header_t) - msginfo->dscrlen; if(DEBUG_) fprintf(stderr,"%d:HH: buf =%lf\n",armci_me,*(double*)buf); if(msginfo->format == STRIDED) armci_server(msginfo, descr, buf, buflen); else armci_server_vector(msginfo, descr, buf, buflen); /* fprintf(stderr,"%d:HH: getting out of server\n",armci_me);*/ *psave = NULL; *handler = NULL; return(NULL); } } #ifdef LINUX (void)fetch_and_add(&num_malloc, (long)1); #else (void)fetch_and_addlp(&num_malloc, (long)1); /* AIX atomic increment */ #endif msginfo = (request_header_t*) malloc(uhdrlen); /* recycle pointer */ if(!msginfo) ERROR("HH: malloc failed in header handler",num_malloc); /* save the request info for processing in compl. handler */ memcpy((char*)msginfo, uhdr, uhdrlen); *psave = msginfo; *handler = armci_completion_handler; return(NULL); }
long use_count() const // nothrow { return fetch_and_add( const_cast<int32_t*>(&use_count_), 0 ); }
int acquire() { int mytick = fetch_and_add(ticket); while (mytick != serving) {} return mytick; }
inline void atomic_increment( int32_t* pw ) { // ++*pw; fetch_and_add( pw, 1 ); }