Esempio n. 1
0
	ReadGuard ReadWriteLock::Read()
	{
		const u32 lock_key(EXCL_ENCODE(exclusive_write, 0));
		// No need to encode since we know the insert counter exclusively owns the lower 28 bits of mMutualExclusivityMask.
		// mask off the mutualexclusitivity to get the insert count. should aquire (or keep) insert exclusivity by inrementing insert count. 

		const ThreadID threadID = GetCurrentThreadId();
		u32 current_key	= (mMutualExclusivityMask & READ_COUNT_MASK);
		u32 insert_key	= current_key + 1;

		while (atomic_compare_and_swap(&mMutualExclusivityMask, current_key, insert_key) != current_key)
		{
			if (threadID == mWriteEntryThread && mMutualExclusivityMask == lock_key)
			{
				atomic_increment((u32*)&mReentrancyCount);
				break;
			}

			cond_wait();
			current_key = (mMutualExclusivityMask & READ_COUNT_MASK);
			insert_key = current_key + 1;
		}

		return ReadGuard(this);
	}
Esempio n. 2
0
    void free(T* p) {
      // is this from the pool?
      // if it is below the pointer limits
      if (__unlikely__(p < lower_ptrlimit || p > upper_ptrlimit)) {
        delete p;
        return;
      }
    
      index_type cur = index_type(p - &(data[0]));

      // prepare for free list insertion
      // I need to atomically set freelisthead == cur
      // and freelist[cur] = freelisthead
      queue_ref_type oldhead;
      queue_ref_type newhead;
      do{
        oldhead.combined = freelisthead.combined;
        freelist[cur] = oldhead.q.val;
        newhead.q.val = cur;
        newhead.q.counter = oldhead.q.counter + 1;
        // now try to atomically move freelisthead
      } while(!atomic_compare_and_swap(freelisthead.combined, 
                                       oldhead.combined, 
                                       newhead.combined));
    }
Esempio n. 3
0
static inline void wunlock(unsigned int *lock)
{
	// Release write lock
	do {
		test = *lock;
	} while (!atomic_compare_and_swap(lock, test, test + 1));
}
Esempio n. 4
0
static int
ccwgroup_set_offline(struct ccwgroup_device *gdev)
{
	struct ccwgroup_driver *gdrv;
	int ret;

	if (atomic_compare_and_swap(0, 1, &gdev->onoff))
		return -EAGAIN;
	if (gdev->state == CCWGROUP_OFFLINE) {
		ret = 0;
		goto out;
	}
	if (!gdev->dev.driver) {
		ret = -EINVAL;
		goto out;
	}
	gdrv = to_ccwgroupdrv (gdev->dev.driver);
	if ((ret = gdrv->set_offline(gdev)))
		goto out;

	gdev->state = CCWGROUP_OFFLINE;
 out:
	atomic_set(&gdev->onoff, 0);
	return ret;
}
Esempio n. 5
0
void intrusive_ptr_release(T *t) {
  if (atomic_compare_and_swap(&t->intrusive_count_, 1, 0)) {
    delete t;
  } else {
    atomic_dec(&t->intrusive_count_, 1);
  }
}
Esempio n. 6
0
static inline void wlock(unsigned int *lock)
{
	unsigned int value;

	do {
		value = (*lock);
	} while ((value&0x80000001) || !atomic_compare_and_swap(lock, value, value + 1));
}
Esempio n. 7
0
 bool atomic_eat() {
   if (num_edges == forks_acquired.value) {
     return atomic_compare_and_swap(state, 
                                    (unsigned char)HUNGRY, 
                                    (unsigned char)EATING);
   }
   return false;
 }
Esempio n. 8
0
static inline void runlock(unsigned int *lock)
{
	unsigned int test;

	// Decrement the reader counter
	do {
		test = *lock;
	} while (!atomic_compare_and_swap(lock, test, test - 1));
}
Esempio n. 9
0
	WriteGuard ReadWriteLock::TryWrite()
	{
		const u32 lock_key(EXCL_ENCODE(exclusive_write, 0));
		const bool success = (atomic_compare_and_swap(&mMutualExclusivityMask, exclusive_none, lock_key) == exclusive_none);
		if (success)
			mWriteEntryThread = GetCurrentThreadId();

		return success ? WriteGuard(this) : WriteGuard(nullptr);
	}
Esempio n. 10
0
	ReadGuard ReadWriteLock::TryRead()
	{
		u32 current_key = (mMutualExclusivityMask & READ_COUNT_MASK);
		u32 insert_key = current_key + 1;

		const bool success = (atomic_compare_and_swap(&mMutualExclusivityMask, current_key, insert_key) == current_key);

		return success ? ReadGuard(this) : ReadGuard(nullptr);
	}
Esempio n. 11
0
 //! Performs an atomic decrement by 'val', returning the new value
 T dec_ret_last(const T val) { 
   T prev_value;
   T new_value;
   do {
     prev_value = value;
     new_value = prev_value - val;
   } while(!atomic_compare_and_swap(value, prev_value, new_value));
   return prev_value; 
 }
Esempio n. 12
0
 //! Performs an atomic increment by 'val', returning the new value
 T inc(const T val) { 
   T prev_value;
   T new_value;
   do {
     prev_value = value;
     new_value = prev_value + val;
   } while(!atomic_compare_and_swap(value, prev_value, new_value));
   return new_value; 
 }
Esempio n. 13
0
  void dc_buffered_stream_send2::send_data(procid_t target,
                                           unsigned char packet_type_mask,
                                           char* data, size_t len) {
    if ((packet_type_mask & CONTROL_PACKET) == 0) {
      if (packet_type_mask & (STANDARD_CALL)) {
        dc->inc_calls_sent(target);
      }
      bytessent.inc(len - sizeof(packet_hdr));
    }
    
    // build the packet header
    packet_hdr* hdr = reinterpret_cast<packet_hdr*>(data);
    memset(hdr, 0, sizeof(packet_hdr));

    hdr->len = len - sizeof(packet_hdr);
    hdr->src = dc->procid();
    hdr->sequentialization_key = dc->get_sequentialization_key();
    hdr->packet_type_mask = packet_type_mask;
    iovec msg;
    msg.iov_base = data;
    msg.iov_len = len;
    bool trigger = false;
    size_t insertloc;
    while(1) {
      size_t curid;
      while(1) {
        curid = bufid;
        int32_t cref = buffer[curid].ref_count;
        if (cref < 0 || 
            !atomic_compare_and_swap(buffer[curid].ref_count, cref, cref + 1)) continue;

        if (curid != bufid) {
          __sync_fetch_and_sub(&(buffer[curid].ref_count), 1);
        }
        else {
          break;
        }
      }
      // ok, we have a reference count into curid, we can write to it
      insertloc = buffer[curid].numel.inc_ret_last();
      // ooops out of buffer room. release the reference count, flush and retry
      if (insertloc >= buffer[curid].buf.size()) {
        __sync_fetch_and_sub(&(buffer[curid].ref_count), 1);
        usleep(1);
        continue;
      }
      buffer[curid].buf[insertloc] = msg;
      buffer[curid].numbytes.inc(len);    
      trigger = ((writebuffer_totallen.inc_ret_last(len)) == 0);
      // decrement the reference count
      __sync_fetch_and_sub(&(buffer[curid].ref_count), 1);
      break;
    }
    
    if (trigger || (packet_type_mask & CONTROL_PACKET)) comm->trigger_send_timeout(target);
  }
Esempio n. 14
0
static inline void rlock(unsigned int *lock)
{
	unsigned int test;

	// 1. Wait for exclusive write lock to be released, if any
	// 2. Increment reader counter
	do {
		test = *lock;
	} while (test & WBIT || !atomic_compare_and_swap(lock, test, test + 1));
}
Esempio n. 15
0
static inline void wlock(unsigned int *lock)
{
	unsigned int test;

	// 1. Wait for exclusive write lock to be released, if any
	// 2. Take exclusive write lock
	do {
		test = *lock;
	} while (test & 1 || !atomic_compare_and_swap(lock, test, test + 1));
}
Esempio n. 16
0
static inline void wlock(unsigned int *lock)
{
	unsigned int test;

	// 1. Wait for exclusive write lock to be released, if any
	// 2. Take exclusive write lock
	do {
		test = *lock;
	} while (test & WBIT || !atomic_compare_and_swap(lock, test, test | WBIT));
	// 3. Wait for readers to complete before proceeding
	while ((*lock) & RBITS);
}
Esempio n. 17
0
/** Write lock 
 **/
extern "C" void wlock(unsigned int *lock)
{
	unsigned int timeout = MAXSPIN;
	unsigned int value;
	extern unsigned int wlock_count, wlock_spin;
	check_lock(lock,true,false);
	atomic_increment(&wlock_count);
	do {
		value = (*lock);
		atomic_increment(&wlock_spin);
		if ( timeout--==0 ) 
			throw_exception("write lock timeout");
	} while ((value&1) || !atomic_compare_and_swap(lock, value, value + 1));
}
Esempio n. 18
0
	void ReadWriteLock::Release(WriteGuard&)
	{
		KS_ASSERT(mWriteEntryThread == GetCurrentThreadId() && mReentrancyCount >= 0);
		int reentrants = mReentrancyCount > 0 ? atomic_decrement((u32*)&mReentrancyCount) + 1 : 0;
		if (reentrants == 0)
		{
			mWriteEntryThread = 0;
			const u32 lock_key(EXCL_ENCODE(exclusive_write, 0));
			if (atomic_compare_and_swap(&mMutualExclusivityMask, lock_key, exclusive_none) != lock_key)
			{
				KS_ASSERT( ! "ReadWriteLockException::eAlreadyUnlocked" );
			}
		}
	}
Esempio n. 19
0
 T* alloc() {
   // I need to atomically advance freelisthead to the freelist[head]
   queue_ref_type oldhead;
   queue_ref_type newhead;
   do {
     oldhead.combined = freelisthead.combined;
     if (oldhead.q.val == index_type(-1)) return new T; // ran out of pool elements
     newhead.q.val = freelist[oldhead.q.val];
     newhead.q.counter = oldhead.q.counter + 1;
   } while(!atomic_compare_and_swap(freelisthead.combined, 
                                    oldhead.combined, 
                                    newhead.combined));
   freelist[oldhead.q.val] = index_type(-1);
   return &(data[oldhead.q.val]);
 }
Esempio n. 20
0
    uint32_t find(uint32_t x) {
      if (is_root(x)) return x;
      
      uint32_t y = x;
      // get the id of the root element
      while (!is_root(x)) { x = setid[x].d.next; }

      // update the parents and ranks all the way up
      while (setid[y].d.rank < setid[x].d.rank) {
        uint32_t t = setid[y].d.next;
        atomic_compare_and_swap(setid[y].d.next, t, x);
        y = setid[t].d.next;
      }
      return x;
    }
Esempio n. 21
0
int
irq_alloc_vectors(unsigned count, unsigned* _base)
{
	uint32_t base;

	assert(count < MAX_IRQ_COUNT);

	do {
		base = *(volatile uint32_t*)&free_vector;
		if (base + count >= max_vector)
			return E_NO_DEV;
	} while (!atomic_compare_and_swap(&free_vector, base, base + count));

	*_base = base;
	return E_OK;
}
Esempio n. 22
0
 inline void readlock(request *I)  {
   I->lockclass =QUEUED_RW_LOCK_REQUEST_READ;
   I->next = NULL;
   I->s.stateu = 0;
   I->s.state.successor_class = QUEUED_RW_LOCK_REQUEST_NONE;
   I->s.state.blocked = true;
   __sync_synchronize(); 
   request* predecessor = __sync_lock_test_and_set(&tail, I);
   if (predecessor == NULL) {
     reader_count.inc();
     I->s.state.blocked = false;
   }
   else {
     
     state_union tempold, tempnew;
     tempold.state.blocked = true;
     tempold.state.successor_class = QUEUED_RW_LOCK_REQUEST_NONE;
     tempnew.state.blocked = true;
     tempnew.state.successor_class = QUEUED_RW_LOCK_REQUEST_READ;
     __sync_synchronize();
     if (predecessor->lockclass == QUEUED_RW_LOCK_REQUEST_WRITE ||
         atomic_compare_and_swap(predecessor->s.stateu,
                                 tempold.stateu,
                                 tempnew.stateu)) {
       
       predecessor->next = I;
       // wait
       __sync_synchronize(); 
       volatile state_union& is = I->s;
       while(is.state.blocked) sched_yield();
     }
     else {
       reader_count.inc();
       predecessor->next = I;
       __sync_synchronize();
       I->s.state.blocked = false;
     }
   }
   __sync_synchronize();
   if (I->s.state.successor_class == QUEUED_RW_LOCK_REQUEST_READ) {
     
     // wait
     while(I->next == NULL) sched_yield();
     reader_count.inc();
     I->next->s.state.blocked = false;
   }
 }
Esempio n. 23
0
	WriteGuard ReadWriteLock::Write()
	{
		const ThreadID threadID = GetCurrentThreadId();
		const u32 lock_key(EXCL_ENCODE(exclusive_write, 0));
		while (atomic_compare_and_swap(&mMutualExclusivityMask, exclusive_none, lock_key) != exclusive_none)
		{
			if (threadID == mWriteEntryThread)
			{
				atomic_increment((u32*)&mReentrancyCount);
				break;
			}
			cond_wait();
		}

		KS_ASSERT(mWriteEntryThread == 0 || mWriteEntryThread == threadID);
		mWriteEntryThread = threadID;

		return WriteGuard(this);
	}
Esempio n. 24
0
/** Check a lock trace
 **/
void check_lock(unsigned int *lock, bool write, bool unlock)
{
	LOCKLIST *item;

	// lock locklist
	static unsigned int check_lock=0;
	unsigned int timeout = MAXSPIN;
	unsigned int value;
	do {
		value = check_lock;
		if ( timeout--==0 ) 
			throw_exception("check lock timeout");
	} while ((value&1) || !atomic_compare_and_swap(&check_lock, value, value + 1));

	for ( item=locklist ; item!=NULL ; item=item->next )
	{
		if ( item->lock==lock )
			break;
	}
	if ( item==NULL )
	{
		printf("%s %slock(%p) = %d (unregistered)\n", 
			write?"write":"read ", 
			unlock?"un":"  ",
			lock,
			*lock);
		register_lock("unregistered",lock);
	}
	else 
	{
		bool no_lock = unlock&&((*lock&1)!=1);
//		bool damage = abs(item->last_value-*lock)>1;
//		if ( damage ) // found a registered lock that was damaged
//			printf("%s %slock(%p) = %d (%s) - possible damage (last=%d)\n", write?"write":"read ", unlock?"un":"  ",lock,*lock,item->name, item->last_value);
		if ( no_lock ) // found a registered lock that was not locked
			printf("%s %slock(%p) = %d (%s) - no lock detected (last=%d)\n", write?"write":"read ", unlock?"un":"  ",lock,*lock,item->name, item->last_value);
		item->last_value = *lock;
	}

	// unlock locklist
	atomic_increment(&check_lock);
}
Esempio n. 25
0
/*
 * Provide an 'ungroup' attribute so the user can remove group devices no
 * longer needed or accidentially created. Saves memory :)
 */
static ssize_t
ccwgroup_ungroup_store(struct device *dev, const char *buf, size_t count)
{
	struct ccwgroup_device *gdev;

	gdev = to_ccwgroupdev(dev);
	/* Prevent concurrent online/offline processing and ungrouping. */
	if (atomic_compare_and_swap(0, 1, &gdev->onoff))
		return -EAGAIN;
	if (gdev->state != CCWGROUP_OFFLINE) {
		/* Release onoff "lock" when ungrouping failed. */
		atomic_set(&gdev->onoff, 0);
		return -EINVAL;
	}

	__ccwgroup_remove_symlinks(gdev);
	device_unregister(dev);

	return count;
}
Esempio n. 26
0
	void ReadWriteLock::Release(ReadGuard&)
	{
		const u32 lock_key = EXCL_ENCODE(exclusive_write, 0);
		u32 mutual_mask = mMutualExclusivityMask;
		u32 current_key = mutual_mask & READ_COUNT_MASK;
		u32 exit_key = current_key - 1;
		while (atomic_compare_and_swap(&mMutualExclusivityMask, current_key, exit_key) != current_key )
		{
			if (mutual_mask == lock_key && mWriteEntryThread == GetCurrentThreadId())	// it's a re-entrant read
			{
				int rentrants = atomic_decrement((u32*)&mReentrancyCount);
				KS_ASSERT(rentrants >= 0);
				break;
			}

			cond_wait();
			mutual_mask = mMutualExclusivityMask;
			current_key = mutual_mask & READ_COUNT_MASK;
			exit_key = current_key - 1;
		}
	}
    /** changes the fork owner if it is dirty, and the other side
     *  has requested for it. Fork must be locked.
     * Returns true if fork moved. false otherwise.
     */
    inline bool advance_fork_state_on_lock(size_t forkid,
                                           vertex_id_type source,
                                           vertex_id_type target) {
        while(1) {
            unsigned char forkval = forkset[forkid];
            unsigned char currentowner = forkval & OWNER_BIT;
            // edge_ids for the request bits
            unsigned char my_request_bit = request_bit(currentowner);
            unsigned char other_request_bit = request_bit(!currentowner);

            bool current_owner_is_eating =
                (currentowner == OWNER_SOURCE && philosopherset[source].state == EATING) ||
                (currentowner == OWNER_TARGET && philosopherset[target].state == EATING);
            bool current_owner_is_hungry =
                (currentowner == OWNER_SOURCE && philosopherset[source].state == HUNGRY) ||
                (currentowner == OWNER_TARGET && philosopherset[target].state == HUNGRY);

            // if the current owner is not eating, and the
            // fork is dirty and other side has placed a request
            if (current_owner_is_eating == false &&
                    (forkval & DIRTY_BIT) &&
                    (forkval & other_request_bit)) {
                //  change the owner and clean the fork)
                unsigned char newforkval = (!currentowner);
                if (current_owner_is_hungry) {
                    newforkval |= my_request_bit;
                }

                if (atomic_compare_and_swap(forkset[forkid], forkval, newforkval)) {
                    return true;
                }
            }
            else {
                return false;
            }
        }
    }
Esempio n. 28
0
 bool updateroot(uint32_t x, uint32_t oldrank,
                 uint32_t y, uint32_t newrank) {
   elem old; old.d.next = x; old.d.rank = oldrank;
   elem newval; newval.d.next = y; newval.d.rank = newrank;
   return atomic_compare_and_swap(setid[x].val, old.val, newval.val);
 }