/** * Increase the reference count of a piece of memory. * Returns false if already released. */ bool lock(pointer m) { Item* it = reinterpret_cast<Item*>(m); if ( oro_atomic_read(&it->rc) == 0 ) return false; oro_atomic_inc(&(it->rc) ); return true; }
/** * Acquire a pointer to memory of type \a T. */ pointer allocate() { void* result; // iterate over the whole pool and try to get a free slot. if ( mpool.dequeue( result ) ) { Item* it = static_cast<Item*>(result); oro_atomic_inc( &(it->rc) ); return (&it->content); } return 0; }
/** * Acquire and lock() a pointer to previously reserve()'d memory of type \a T. */ pointer allocate() { void* result; // iterate over the whole pool and try to get a free slot. for ( typename PoolType::iterator it = mpool.begin(); it != mpool.end(); ++it ) { if ( it->first->dequeue( result ) ) { oro_atomic_inc( &static_cast<Item*>(result)->rc); return static_cast<pointer>( result ); } } return 0; }
/** * Get a copy of the Data (non allocating). * If pull has reserved enough memory to store the copy, * no memory will be allocated. * * @param pull A copy of the data. */ virtual void Get( DataType& pull ) const { PtrType reading; // loop to combine Read/Modify of counter // This avoids a race condition where read_ptr // could become write_ptr ( then we would read corrupted data). do { reading = read_ptr; // copy buffer location oro_atomic_inc(&reading->counter); // lock buffer, no more writes if ( reading != read_ptr ) // if read_ptr changed, oro_atomic_dec(&reading->counter); // better to start over. else break; } while ( true ); // from here on we are sure that 'reading' // is a valid buffer to read from. pull = reading->data; // takes some time oro_atomic_dec(&reading->counter); // release buffer }
void ChannelElementBase::ref() { oro_atomic_inc(&refcount); }
void inc() { oro_atomic_inc( &_val ); }