Esempio n. 1
0
/* Each thread call this to allocate a private hp record */
void cs_lfqueue_register_thread(cs_lfqueue_t* q) {
	/* First try to reuse a retired HP record */
	for (cs_hp_record_t* record = q->HP->head_hp_record; record; record = record->next) {
		if (record->active || !CAS(&record->active, 0, 1)) {
			continue;
		}
		my_record = record;
		return;
	}

	/* No HP records avaliable for resue.
	   Increment H, then allocate a new HP and push it */
	INC_N_ATOMIC(&q->HP->H, K);
	cs_hp_record_t* new_record = hp_record_new();
	cs_hp_record_t* old_record;

	do {
		old_record = q->HP->head_hp_record;
		new_record->next = old_record;
	} while (!CAS(&q->HP->head_hp_record, old_record, new_record));

	my_record = new_record;
}
bool ListInsert(LinkedList* entryHead,int key) {
	Entry *cur, **prev;
	while (true) {

		if (find(entryHead, &prev, &cur, key)) 	// key exists
			return false; 

		Entry* newEntry = new Entry();			// create entry
		newEntry->key	= key;
		newEntry->nextEntry = cur;

		if (CAS(prev, cur, newEntry)) 			//connect
			return true;
	} // end of while
}
Esempio n. 3
0
		void EnQueue(Node* q) 
		{
			volatile Node* p;
			do {

				p = tail;
				while (p->_next != NULL)
					p = p->_next; 
#ifdef _WIN32
#pragma   warning(disable   :   4311)
#endif
			}while(CAS((volatile LONG*)(&(p->_next)), (LONG)q, NULL) != (LONG)NULL); 			
			tail = q;

			ADD((volatile LONG*)&count);
		}
Esempio n. 4
0
//! Acquire write lock on the given mutex.
bool spin_rw_mutex_v3::internal_acquire_writer()
{
    ITT_NOTIFY(sync_prepare, this);
    for( internal::atomic_backoff backoff;;backoff.pause() ){
        state_t s = const_cast<volatile state_t&>(state); // ensure reloading
        if( !(s & BUSY) ) { // no readers, no writers
            if( CAS(state, WRITER, s)==s )
                break; // successfully stored writer flag
            backoff.reset(); // we could be very close to complete op.
        } else if( !(s & WRITER_PENDING) ) { // no pending writers
            __TBB_AtomicOR(&state, WRITER_PENDING);
        }
    }
    ITT_NOTIFY(sync_acquired, this);
    return false;
}
Esempio n. 5
0
void SolverPNS2::solve(double time){
	if(rootboard.won() >= 0){
		outcome = rootboard.won();
		return;
	}

	start_threads();

	timeout = false;
	Alarm timer(time, std::bind(&SolverPNS2::timedout, this));
	Time start;

//	logerr("max memory: " + to_str(memlimit/(1024*1024)) + " Mb\n");

	//wait for the timer to stop them
	runbarrier.wait();
	CAS(threadstate, Thread_Wait_End, Thread_Wait_Start);
	assert(threadstate == Thread_Wait_Start);

	if(root.phi == 0 && root.delta == LOSS){ //look for the winning move
		for(PNSNode * i = root.children.begin() ; i != root.children.end(); i++){
			if(i->delta == 0){
				bestmove = i->move;
				break;
			}
		}
		outcome = rootboard.toplay();
	}else if(root.phi == 0 && root.delta == DRAW){ //look for the move to tie
		for(PNSNode * i = root.children.begin() ; i != root.children.end(); i++){
			if(i->delta == DRAW){
				bestmove = i->move;
				break;
			}
		}
		outcome = 0;
	}else if(root.delta == 0){ //loss
		bestmove = M_NONE;
		outcome = 3 - rootboard.toplay();
	}else{ //unknown
		bestmove = M_UNKNOWN;
		outcome = -3;
	}

	time_used = Time() - start;
}
Esempio n. 6
0
		Node* DeQueue()
		{
			volatile Node* p;
			do{
				p = head;
#ifdef _WIN32
#pragma   warning(disable   :   4311)
#endif
			}while(CAS((volatile LONG*)&head, (LONG)p->_next, (LONG)p) != (LONG)p);

			SUB((volatile LONG*)&count);

			Node* ret = p->_next;
			delete p->_value;
			delete p;

			return ret;
		}
Esempio n. 7
0
char htable_find_or_put(htable_ctx_t *ctx, uint64_t data) {
	data &= HTABLE_MASK_DATA;

	uint64_t h = hash(data);
	uint64_t i, j;

	query_chunk(ctx, h, 0);

	for (i = 0; i < HTABLE_MAX_NR_OF_CHUNKS; i++) {
		if (i + 1 < HTABLE_MAX_NR_OF_CHUNKS) {
			query_chunk(ctx, h, i + 1);
		}

		ADD_TO_REQUIRED_RTRIPS(1);

		sync_on_chunk(ctx, i);

		for (j = 0; j < HTABLE_CHUNK_SIZE; j++) {
			uint64_t index = i * HTABLE_CHUNK_SIZE + j;

			if (!(ctx->chunks[index] & HTABLE_MASK_OCCUPIED)) {

				// try to claim the empty bucket with CAS
				bucket_t result = CAS(&ctx->table[HTABLE_ADDR(h + index)], 
					ctx->chunks[index], data | HTABLE_MASK_OCCUPIED); 

				// check if the CAS operation succeeded..
				if (ctx->chunks[index] == result) {
					return HTABLE_INSERTED;
				}

				// if not, check if some other thread has inserted 'data' in the bucket we wanted to claim..
				else if ((result & HTABLE_MASK_DATA) == data) {
					return HTABLE_FOUND;
				}
			}
			else if ((ctx->chunks[index] & HTABLE_MASK_DATA) == data) {
				return HTABLE_FOUND;
			}
		}
	}

	return HTABLE_FULL;
}
Esempio n. 8
0
//----------------------------------------------------------------
void fifoput (fifo * ff, fifocell * cl) 
{
	fifocell * volatile tail;

    cl->link = fifo_end(ff);	/* set the cell next pointer to the end marker */
	while (1) {
		tail = ff->tail;		/* read the tail cell */
		/* try to link the cell to the tail cell */
		if (CAS (&tail->link, fifo_end(ff), cl)) {
			break;		
        }
        else {
            /* tail was not pointing to the last cell, try to set tail to the next cell */
            CASLNE (&ff->tail, (void *)tail, fifo_end(ff));
        }
	} 
	CASLNE (&ff->tail, (void *)tail, fifo_end(ff));
	msAtomicInc (&ff->count);
}
Esempio n. 9
0
Player::Node * Player::genmove(double time, int max_runs, bool flexible){
	time_used = 0;
	int toplay = rootboard.toplay();

	if(rootboard.won() >= 0 || (time <= 0 && max_runs == 0))
		return NULL;

	Time starttime;

	stop_threads();

	if(runs)
		logerr("Pondered " + to_str(runs) + " runs\n");

	runs = 0;
	maxruns = max_runs;
	for(unsigned int i = 0; i < threads.size(); i++)
		threads[i]->reset();

	// if the move is forced and the time can be added to the clock, don't bother running at all
	if(!flexible || root.children.num() != 1){
		//let them run!
		start_threads();

		Alarm timer;
		if(time > 0)
			timer(time - (Time() - starttime), std::bind(&Player::timedout, this));

		//wait for the timer to stop them
		runbarrier.wait();
		CAS(threadstate, Thread_Wait_End, Thread_Wait_Start);
		assert(threadstate == Thread_Wait_Start);
	}

	if(ponder && root.outcome < 0)
		start_threads();

	time_used = Time() - starttime;

//return the best one
	return return_move(& root, toplay);
}
Esempio n. 10
0
//----------------------------------------------------------------
void fifoput (fifo * ff, fifocell * cl)
{
    long ic;
    fifocell * volatile tail;

    cl->link = fifo_end(ff);	/* set the cell next pointer to the end marker */
    while (1) {
        ic = ff->ic;			/* read the tail modification count */
        tail = ff->tail;		/* read the tail cell */
        /* try to link the cell to the tail cell */
        if (CAS (&tail->link, fifo_end(ff), cl))
            break;
        else
            /* tail was not pointing to the last cell, try to set tail to the next cell */
            CAS2 (&ff->tail, tail, ic, tail->link, ic+1);
    }
    /* enqeue is done, try to set tail to the enqueued cell */
    CAS2 (&ff->tail, tail, ic, cl, ic+1);
    msAtomicInc (&ff->count);
}
Esempio n. 11
0
//------------------------------------------------------
Task *Scheduler::newTask(Func *func, Value *args) {
	Task *oldtop = freelist;
	if(oldtop != NULL) {
		Task *newtop = oldtop->next;
		if(CAS(freelist, oldtop, newtop)) {
			// init
			Task *task = oldtop;
#ifdef USING_THCODE
			task->pc = func->thcode;
#else
			task->pc = func->code;
#endif
			task->sp = task->stack + 2;
			task->sp[-1].pc = &endcode;
			task->stat = TASK_RUN;
			memcpy(task->sp, args, func->argc * sizeof(Value));
			return task;
		}
	}
	return NULL;
}
Esempio n. 12
0
int dequeue(struct queue* q) {
	int data;
	node* headSave = NULL;
	
	headSave = q->head;
	
	if(!headSave) {
		return 0;
	}
	
	do {
		headSave = q->head; //dürfte erst im 2. durchgang aufgerufen werden
	} while(!CAS(&(q->head), headSave, headSave->next));
	
	if(!q->head) {
		q->tail = NULL;
	}
	
	data = headSave->data;	
	return data;
}
Esempio n. 13
0
void AgentPNS::reset_threads(){ //start and end with threadstate = Thread_Wait_Start
	assert(threadstate == Thread_Wait_Start);

//wait for them to all get to the barrier
	assert(CAS(threadstate, Thread_Wait_Start, Thread_Wait_Start_Cancelled));
	runbarrier.wait();

//make sure they exited cleanly
	for(unsigned int i = 0; i < threads.size(); i++)
		threads[i]->join();

	threads.clear();

	threadstate = Thread_Wait_Start;

	runbarrier.reset(numthreads + 1);
	gcbarrier.reset(numthreads);

//start new threads
	for(int i = 0; i < numthreads; i++)
		threads.push_back(new PNSThread(this));
}
Esempio n. 14
0
static void
help_scan(cs_hp_t* hp, cs_hp_record_t* private_record) {
	cs_hp_record_t* head_record = hp->head_hp_record;
	for (; head_record; head_record = head_record->next) {
		if (head_record->active || !CAS(&head_record->active, 0, 1)) {
			continue;
		}

		while (head_record->rcount > 0) {
			void* node = hp_list_pop(head_record->rlist);
			head_record->rcount--;
			hp_list_insert(private_record->rlist, node);
			private_record->rcount++;

			if (private_record->rcount >= R(hp)) {
				scan(hp, private_record);
			}
		}

		head_record->active = 0;
  	}
}
Esempio n. 15
0
static void *handle(void *ud) {
	struct thread *th = (struct thread*)ud;
	while (true) {
		struct context *ctx = contextQueuePop(M->queue); 
		bool sleep = false;
		if (ctx) {
			do {
				int result = contextDispatchMessage(ctx, th);
				if (result == -1 ||
					result == -2) {
					assert(CAS(&ctx->inGlobal, 1, 0));
					ctxMgrReleaseContext(ctx);
					sleep = true;
					break;
				} else {
					if (workerPoolHaveWork()) { 
						contextQueuePush(M->queue, ctx); 
						break;
					}
				}
			} while(true);
		} else {
			sleep = true;
		}

	
		int num = ctxMgrGetContextNum(); 
		if (num == 0) {
			workerPoolStop(); 
			break;
		} 

		if (sleep) {
			threadSleep();
		}
	}
	return NULL;
}
Esempio n. 16
0
int dom_serial_fifo_read(dom_fifo_t *df, char *dest, int size)
{
	serial_fifo_t *ff;
	long bom, offset, nbom;
	int rsize;

        
	if(df == NULL || dest == NULL || size < 0) 
		return -EINVAL;
	
	ff = df->serf;

	if(!is_bit_set(ff->flag, RT_SERIAL_dom_READ))
		return -EPERM;
					        
	/* There is only one domain, device domain, reads from serial fifo,
	 * so you don't worry about concurrent problem.*/
	/* if there is fewer data left in the FIFO, read them only. */
	do { 		
		rsize = (ff->top + ff->size - ff->bom) % ff->size;
					        
		if(rsize > size) rsize = size;
		
		bom = ff->bom;
		nbom = (bom + rsize) % ff->size;
		if ((ff->top < bom) && (rsize > ff->size - ff->bom)) {
			offset = ff->size - bom;
			memcpy(dest, df->addr + bom, offset);
			memcpy(dest + offset, df->addr, rsize - offset);
		} else {
			memcpy(dest, df->addr + bom, rsize);
		}

	} while (CAS(&ff->bom, bom, nbom) != bom);									        
											        
	return rsize;
}
Esempio n. 17
0
value_t l_insert(node_t *head, key_t key, value_t val) {
    node_t *pred, *item, *new_item;
    while (TRUE) {
        if (l_find(&pred, &item, head, key)) { /* update its value */
            /* 更新值时,使用item->val互斥,NULL_VALUE表示被删除 */
            node_t *sitem = STRIP_MARK(item);
            value_t old_val = sitem->val;
            while (old_val != NULL_VALUE) {
                value_t ret = CAS_V(&sitem->val, old_val, val);
                if (ret == old_val) {
                    trace("update item %p value success\n", item);
                    return ret;
                }
                trace("update item lost race %p\n", item);
                old_val = ret;
            }
            trace("item %p removed, retry\n", item);
            continue; /* the item has been removed, we retry */
        }
        new_item = (node_t*) malloc(sizeof(node_t));
        new_item->key = key;
        new_item->val = val;
        new_item->next = item;
        /* a). pred是否有效问题:无效只会发生在使用pred->next时,而l_remove正移除该pred
               就会在CAS(&item->next)中竞争,如果remove中成功,那么insert CAS就失败,
               然后重试;反之,remove失败重试,所以保证了pred有效
           b). item本身增加tag一定程度上降低ABA问题几率
        */
        if (CAS(&pred->next, item, new_item)) {
            trace("insert item %p(next) %p success\n", item, new_item);
            return val;
        }
        trace("cas item %p failed, retry\n", item);
        free(new_item);
    }
    return NULL_VALUE;
}
bool find(LinkedList *entryHead, Entry ***outprev, Entry **outcur, int key) {
	int ckey;
	Entry **prev, *cur, *next;

	try_again:
	prev = &entryHead->head;
	cur = *(prev);

	while (cur != NULL) {
		next = cur->nextEntry;

		if(isDeleted(next)) {
			if (!CAS(prev, cur, clearDeleted(next)))
				goto try_again; 		// disconnect of the deleted entry failed, try again

			cur = clearDeleted(next);
		} else {
			ckey = cur->key;

			if (*(prev) != cur) {
				goto try_again;
			}

			if (ckey >= key) {
				*outprev = prev;
				*outcur	 = cur;
				return (ckey == key); 	//compare search key
			}

			prev = &(cur->nextEntry);
			cur = next;
		}
	} //end of while
	*outprev=prev;
	*outcur=cur;
	return false;
}
Esempio n. 19
0
File: main.c Progetto: olivo/BP
inline unsigned dec() {
	unsigned dec__v, dec__vn, dec__casret;

	do {
		dec__v = value;

		if(dec__v == 0) {
			bassume(dec__v == 0);
			return 0u-1; /*decrement failed, return max*/
		}else{
			bassume(!(dec__v == 0));
		}

		dec__vn = dec__v - 1;

		CAS(value,dec__v,dec__vn,dec__casret,dec_flag);

	}
	while (dec__casret==0);
	bassume(!(dec__casret==0));

	atomic_assert(inc_flag || value < dec__v);
	return dec__vn;
}
Esempio n. 20
0
File: main.c Progetto: olivo/BP
inline unsigned inc() {
	unsigned inc__v, inc__vn, inc__casret;

	do {
		inc__v = value;

		if(inc__v == 0u-1) {
			bassume(inc__v == 0u-1);
			return 0; /*increment failed, return min*/
		}else{
			bassume(!(inc__v == 0u-1));
		}

		inc__vn = inc__v + 1;

		CAS(value,inc__v,inc__vn,inc__casret,inc_flag);
	}
	while (inc__casret==0);
	bassume(!(inc__casret==0));

	atomic_assert(dec_flag || value > inc__v);

	return inc__vn;
}
Esempio n. 21
0
/** Returns whether the upgrade happened without releasing and re-acquiring the lock */
bool spin_rw_mutex_v3::internal_upgrade()
{
    state_t s = state;
    __TBB_ASSERT( s & READERS, "invalid state before upgrade: no readers " );
    // check and set writer-pending flag
    // required conditions: either no pending writers, or we are the only reader
    // (with multiple readers and pending writer, another upgrade could have been requested)
    while( (s & READERS)==ONE_READER || !(s & WRITER_PENDING) ) {
        state_t old_s = s;
        if( (s=CAS(state, s | WRITER | WRITER_PENDING, s))==old_s ) {
            ITT_NOTIFY(sync_prepare, this);
            for( internal::atomic_backoff backoff; (state & READERS) != ONE_READER; )
                backoff.pause(); // while more than 1 reader
            __TBB_ASSERT((state&(WRITER_PENDING|WRITER))==(WRITER_PENDING|WRITER),"invalid state when upgrading to writer");
            // both new readers and writers are blocked at this time
            __TBB_FetchAndAddW( &state,  - (intptr_t)(ONE_READER+WRITER_PENDING));
            ITT_NOTIFY(sync_acquired, this);
            return true; // successfully upgraded
        }
    }
    // slow reacquire
    internal_release_reader();
    return internal_acquire_writer(); // always returns false
}
Esempio n. 22
0
void AgentPNS::start_threads(){
	assert(threadstate == Thread_Wait_Start);
	runbarrier.wait();
	CAS(threadstate, Thread_Wait_Start, Thread_Running);
}
Esempio n. 23
0
void* lf_table_find(marked_ptr_t* head, hash_key_t key, marked_ptr_t** prev, marked_ptr_t* cur)
{
    marked_ptr_t* tp_prev;
    marked_ptr_t tp_cur;
    marked_ptr_t* tp_next;

    hash_key_t cur_key;
    void* cur_value;

    if(PTR_OF(*head) == NULL)
    {
        if(prev) {*prev = head;};
        if(cur){*cur = *head;};

        return NULL;
    }

    while(1)
    {
        tp_prev = head;
        tp_cur = *head;

        while(1)
        {
            if (PTR_OF(tp_cur) == NULL)
            {
                if(prev){*prev = tp_prev;};
                if(cur){*cur = tp_cur;};

                return NULL;
            }

            tp_next = &PTR_OF(tp_cur)->next;

            cur_key = PTR_OF(tp_cur)->key;
            cur_value = PTR_OF(tp_cur)->value;

            if(*tp_prev != tp_cur)
            {
                break; // someone has mucked with the list, start over
            }

            if(MARK_OF(tp_cur))
            {
                if (CAS(tp_prev, CONSTRUCT(1, tp_cur), tp_next) == CONSTRUCT(1, tp_cur)) {
                    free(PTR_OF(tp_cur));

                    tp_cur = *tp_next;
                    continue;
                } else {
                    break; //start over
                }
            }

            if (key >= cur_key)
            {
                if(prev){*prev = tp_prev;};
                if(cur){*cur = tp_cur;};

                return key == cur_key ? cur_value : NULL;
            }

            tp_prev = tp_next;
            tp_cur = *tp_next;
        }
    }
}
Esempio n. 24
0
  inline bool updateAtomic (uintE s, uintE d, intE edgeLen){ //atomic Update
    intE newDist = ShortestPathLen[s] + edgeLen;
    return (writeMin(&ShortestPathLen[d],newDist) &&
	    CAS(&Visited[d],0,1));
  }
Esempio n. 25
0
 inline bool updateAtomic(const uintE &s, const uintE &d) {
   return (CAS(&Dist[d],make_pair(UINT_E_MAX,UINT_E_MAX),make_pair(round,Dist[s].second))); }
Esempio n. 26
0
void AgentPNS::timedout() {
	CAS(threadstate, Thread_Running, Thread_Wait_End);
	CAS(threadstate, Thread_GC, Thread_GC_End);
	timeout = true;
}
Esempio n. 27
0
void Player::timedout() {
	CAS(threadstate, Thread_Running, Thread_Wait_End);
	CAS(threadstate, Thread_GC, Thread_GC_End);
}
Esempio n. 28
0
 inline bool updateAtomic(const uintE &s, const uintE &d) {
   return (CAS(&Dist[d],UINT_E_MAX,round)); }
Esempio n. 29
0
File: BFSCC.C Progetto: mindis/ligra
 inline bool updateAtomic (uintE s, uintE d){ //atomic version of Update
   return (CAS(&Parents[d],UINT_E_MAX,label));
 }
Esempio n. 30
0
void SolverPNS2::timedout() {
	CAS(threadstate, Thread_Running, Thread_Wait_End);
	CAS(threadstate, Thread_GC, Thread_GC_End);
	timeout = true;
}