//------------------------------------------------------------------------ // micro_queue //------------------------------------------------------------------------ void micro_queue::push( const void* item, ticket k, concurrent_queue_base& base ) { k &= -concurrent_queue_rep::n_queue; page* p = NULL; size_t index = (k/concurrent_queue_rep::n_queue & base.items_per_page-1); if ( !index ) { size_t n = sizeof(page) + base.items_per_page*base.item_size; p = static_cast<page*>(operator new( n )); p->mask = 0; p->next = NULL; } { push_finalizer finalizer( *this, k+concurrent_queue_rep::n_queue ); spin_wait_until_eq( tail_counter, k ); if ( p ) { spin_mutex::scoped_lock lock( page_mutex ); if ( page* q = tail_page ) q->next = p; else head_page = p; tail_page = p; } else { p = tail_page; } base.copy_item( *p, index, item ); // If no exception was thrown, mark item as present. p->mask |= uintptr(1)<<index; } }
void invokeEvent(EventType type, const char* path, int32 ret, int32 param) { Event event; event.type = type; event.handle = uintptr(this); event.path = path; event.ret = ret; event.param = param; m_cb.invoke(event); }
bool micro_queue::pop( void* dst, ticket k, concurrent_queue_base& base ) { k &= -concurrent_queue_rep::n_queue; spin_wait_until_eq( head_counter, k ); spin_wait_while_eq( tail_counter, k ); page& p = *head_page; __TBB_ASSERT( &p, NULL ); size_t index = (k/concurrent_queue_rep::n_queue & base.items_per_page-1); bool success = false; { pop_finalizer finalizer( *this, k+concurrent_queue_rep::n_queue, index==base.items_per_page-1 ? &p : NULL ); if ( p.mask & uintptr(1)<<index ) { success = true; base.assign_and_destroy_item( dst, p, index ); } } return success; }
INLINE void setptr(const T *ptr) { prim = (prim&uintptr(MASK))|uintptr(ptr); }