/** * The multipush method, which pushes a batch of elements (array) in the * queue. NOTE: len should be a multiple of longxCacheLine/sizeof(void*) * */ inline bool multipush(void * const data[], int len) { if ((unsigned)len>=size) return false; unsigned long last = pwrite + ((pwrite+ --len >= size) ? (len-size): len); unsigned long r = len-(last+1), l=last; unsigned long i; if (buf[last]==NULL) { if (last < pwrite) { for(i=len;i>r;--i,--l) buf[l] = data[i]; for(i=(size-1);i>=pwrite;--i,--r) buf[i] = data[r]; } else for(int i=len;i>=0;--i) buf[pwrite+i] = data[i]; WMB(); pwrite = (last+1 >= size) ? 0 : (last+1); #if defined(SWSR_MULTIPUSH) mcnt = 0; // reset mpush counter #endif return true; } return false; }
/** * TODO */ inline bool push(void * const data) { assert(data!=NULL); if(likely(tail->next_data == NULL)){ tail->data = data; tail = tail->next; WMB(); return true; } Node * n = (Node *)::malloc(sizeof(Node*)); n->data = data; n->next = tail->next; n->next_data = &tail->next.data; tail->next = n; WMB(); return true; }
/** * TODO */ inline bool push(void * const data) { #if POINTERS_VERSION do{} while(likely(CAST_TO_VUL(tail->data) != 0)); WMB(); tail->data = CAST_TO_UL(data); tail = tail->next; #else do{} while(likely(CAST_TO_VUL(min_cache[tail].data) != 0)); WMB(); min_cache[tail].data = CAST_TO_UL(data); tail = min_cache[tail].next; #endif return true; }
void _init_ptst_subsystem(void) { ptst_list = NULL; next_id = 0; WMB(); if ( pthread_key_create(&ptst_key, (void (*)(void *))ptst_destructor) ) { exit(1); } }
/** * TODO */ inline bool push(void * const data) { if (!data) return false; Node * n = allocnode(); n->data = data; n->next = NULL; WMB(); tail->next = n; tail = n; return true; }
/** * TODO */ inline bool push(void * const data) { #if POINTERS_VERSION if(likely(CAST_TO_VUL(tail->data) == 0)){ WMB(); tail->data = CAST_TO_UL(data); tail = tail->next; return true; } return false; #else if(likely(CAST_TO_VUL(min_cache[tail].data) == 0)){ WMB(); min_cache[tail].data = CAST_TO_UL(data); tail = min_cache[tail].next; return true; } return false; #endif }
/** * TODO */ inline bool push(void * const data) { if (!data) return false; union { Node * n; void * n2; } p; if (!cachepop(&p.n2)) p.n = (Node *)::malloc(sizeof(Node)); p.n->data = data; p.n->next = NULL; WMB(); tail->next = p.n; tail = p.n; return true; }
/** * Put an item into the buffer * * Precondition: item != NULL * * Implementation note: * - modifies only tail pointer (not head) * * @param buf buffer to write to * @param item data item (a pointer) to write * @pre no concurrent writes * @pre item != NULL */ void LpelBufferPut( buffer_t *buf, void *item) { assert( item != NULL ); /* WRITE TO BUFFER */ /* Write Memory Barrier: ensure all previous memory write * are visible to the other processors before any later * writes are executed. This is an "expensive" memory fence * operation needed in all the architectures with a weak-ordering * memory model where stores can be executed out-or-order * (e.g. PowerPC). This is a no-op on Intel x86/x86-64 CPUs. */ WMB(); buf->tail->next = createEntry(item); buf->tail = buf->tail->next; }
inline bool push(void * const data) { if (!data) return false; const unsigned long next = pwrite + ((pwrite+1>=size)?(1-size):1); if (next != pread) { buf[pwrite] = data; /* We have to ensure that all writes have been committed * in memory before we change the value of the pwrite * reference otherwise the reader can read stale data. */ WMB(); pwrite =next; return true; } return false; }
/** * TODO */ inline bool cachepush(void * const data) { if (!cache[pwrite]) { /* Write Memory Barrier: ensure all previous memory write * are visible to the other processors before any later * writes are executed. This is an "expensive" memory fence * operation needed in all the architectures with a weak-ordering * memory model where stores can be executed out-or-order * (e.g. Powerpc). This is a no-op on Intel x86/x86-64 CPUs. */ WMB(); cache[pwrite] = data; pwrite += (pwrite+1 >= cachesize) ? (1-cachesize): 1; return true; } return false; }
void _init_ptst_subsystem(gc_global_t *gc_global) { int e; gc_global->ptst_list = NULL; #ifdef NEED_ID gc_global->next_id = 0; #endif WMB(); if ( pthread_key_create(&gc_global->ptst_key, (void (*)(void *))ptst_destructor) ) { #if !defined(KERNEL) printf("MCAS can't make ptst key error=%d, aborting\n", e); #endif abort(); } }
/** * Push method: push the input value into the queue. A Write Memory * Barrier (WMB) ensures that all previous memory writes are visible to * the other processors before any later write is executed. This is an * "expensive" memory fence operation needed in all the architectures with * a weak-ordering memory model, where stores can be executed out-of-order * (e.g. PowerPc). This is a no-op on Intel x86/x86-64 CPUs. * * \param data Element to be pushed in the buffer * * \return TODO */ inline bool push(void * const data) { /* modify only pwrite pointer */ if (!data) return false; if (available()) { /** * Write Memory Barrier: ensure all previous memory write * are visible to the other processors before any later * writes are executed. This is an "expensive" memory fence * operation needed in all the architectures with a weak-ordering * memory model where stores can be executed out-or-order * (e.g. Powerpc). This is a no-op on Intel x86/x86-64 CPUs. */ WMB(); buf[pwrite] = data; pwrite += (pwrite+1 >= size) ? (1-size): 1; // circular buffer return true; } return false; }
/* Nodes p, x, y must be locked. */ static void right_rotate(ptst_t *ptst, node_t *x) { node_t *y = x->l, *p = x->p, *nx; nx = gc_alloc(ptst, gc_id); nx->p = y; nx->l = y->r; nx->r = x->r; nx->k = x->k; nx->v = x->v; mcs_init(&nx->lock); WMB(); y->p = p; x->r->p = nx; y->r->p = nx; y->r = nx; if ( x == p->l ) p->l = y; else p->r = y; MK_GARBAGE(x); gc_free(ptst, x, gc_id); }
void ContainerObjectsMap::loadObjects() { if (oids == NULL) return; Locker locker(&loadMutex); WMB(); if (oids == NULL) return; for (int i = 0; i < oids->size(); ++i) { uint64 oid = oids->elementAt(i).getKey(); Reference<SceneObject*> object = Core::getObjectBroker()->lookUp(oid).castTo<SceneObject*>(); if (object != NULL) containerObjects.put(oid, object); } delete oids; oids = NULL; }
inline void unlock(){ WMB(); _lock[0]=UNLOCKED; }
setval_t set_update(set_t *s, setkey_t k, setval_t v, int overwrite) { ptst_t *ptst; qnode_t y_qn, z_qn; node_t *y, *z, *new_internal, *new_leaf; int fix_up = 0; setval_t ov = NULL; k = CALLER_TO_INTERNAL_KEY(k); ptst = critical_enter(); retry: z = &s->root; while ( (y = (k <= z->k) ? z->l : z->r) != NULL ) z = y; y = z->p; mcs_lock(&y->lock, &y_qn); if ( (((k <= y->k) ? y->l : y->r) != z) || IS_GARBAGE(y) ) { mcs_unlock(&y->lock, &y_qn); goto retry; } mcs_lock(&z->lock, &z_qn); assert(!IS_GARBAGE(z) && IS_LEAF(z)); if ( z->k == k ) { ov = GET_VALUE(z->v); if ( overwrite || (ov == NULL) ) SET_VALUE(z->v, v); } else { new_leaf = gc_alloc(ptst, gc_id); new_internal = gc_alloc(ptst, gc_id); new_leaf->k = k; new_leaf->v = MK_BLACK(v); new_leaf->l = NULL; new_leaf->r = NULL; new_leaf->p = new_internal; mcs_init(&new_leaf->lock); if ( z->k < k ) { new_internal->k = z->k; new_internal->l = z; new_internal->r = new_leaf; } else { new_internal->k = k; new_internal->l = new_leaf; new_internal->r = z; } new_internal->p = y; mcs_init(&new_internal->lock); if ( IS_UNBALANCED(z->v) ) { z->v = MK_BALANCED(z->v); new_internal->v = MK_BLACK(INTERNAL_VALUE); } else if ( IS_RED(y->v) ) { new_internal->v = MK_UNBALANCED(MK_RED(INTERNAL_VALUE)); fix_up = 1; } else { new_internal->v = MK_RED(INTERNAL_VALUE); } WMB(); z->p = new_internal; if ( y->l == z ) y->l = new_internal; else y->r = new_internal; } mcs_unlock(&y->lock, &y_qn); mcs_unlock(&z->lock, &z_qn); if ( fix_up ) fix_unbalance_up(ptst, new_internal); out: critical_exit(ptst); return ov; }