int _db_gc(pgctx_t *ctx, gcstats_t *stats) { int64_t t0, t1, t2, t3, t4, t5; memheap_t *heap = _ptr(ctx, ctx->root->heap); t0 = utime_now(); pmem_gc_mark(&ctx->mm, heap, 0); t1 = utime_now(); // Synchronize here. All this does is make sure anyone who was // in the database during the mark phase is out before we do the // walk phase. _dblockop(ctx, MLCK_WR, ctx->root->lock); _dblockop(ctx, MLCK_UN, ctx->root->lock); // Eliminate the structures used by the memory subsystem itself gc_keep(ctx, heap); gc_keep(ctx, _ptr(ctx, heap->pool)); // Eliminated references in the meta table if (isPtr(ctx->root->meta.id.type)) { gc_keep(ctx, dbptr(ctx, ctx->root->meta.id)); } t2 = utime_now(); gc_walk(ctx, ctx->cache); t3 = utime_now(); // Eliminate references that have parents that extend back to // the root "data" objects gc_walk(ctx, ctx->data); // Also any references owned by all currently running processes gc_walk(ctx, ctx->root->pidcache); t4 = utime_now(); // Free everything that remains //pmem_gc_free(&ctx->mm, heap, 0, (gcfreecb_t)dbcache_del, ctx); pmem_gc_free(&ctx->mm, heap, 0, NULL, ctx); t5 = utime_now(); log_debug("GC timing:"); log_debug(" mark: %lldus", t1-t0); log_debug(" sync: %lldus", t2-t1); log_debug(" cache: %lldus", t3-t2); log_debug(" walk: %lldus", t4-t3); log_debug(" free: %lldus", t5-t4); log_debug(" total: %lldus", t5-t0); return 0; }
void PrlHandleSmartPtrTest::testPointerAssignOperatorOnNullPointer() { PrlHandleTestPtr _ptr((PrlHandleTest *)m_pHandle); QCOMPARE(quint32(m_pHandle->GetRefCount()), quint32(2)); _ptr = NULL; QCOMPARE(quint32(m_pHandle->GetRefCount()), quint32(1)); }
void PrlHandleSmartPtrTest::testConstructorDestructor() { { PrlHandleTestPtr _ptr((PrlHandleTest *)m_pHandle); QCOMPARE(quint32(m_pHandle->GetRefCount()), quint32(2)); } QCOMPARE(quint32(m_pHandle->GetRefCount()), quint32(1)); }
inline void doCast(const PMaybe &lib, PMaybe &val) { PMaybe func(nullptr); libGet<id_cast, type>(PVal(val), lib, func); PMaybe caller(_ptr(_quote(PVal(val)), val->getType())); // TODO simplify val = nullptr; func.call<true>(caller, lib, val); }
void dbfile_close(pgctx_t *ctx) { int i; for(i=0; i<NR_DB_CONTEXT; i++) { if (dbctx[i] == ctx) { dbctx[i] = NULL; } } pmem_retire(&ctx->mm, _ptr(ctx, ctx->root->heap), 0); mm_close(&ctx->mm); }
inline void doDispatch( const PVal &val, const PVal &self, const PMaybe &lib, PMaybe &tunnel ) { PMaybe vlib1(nullptr); PMaybe func(nullptr); libGet<id_dispatch>(val, lib, vlib1); libGet(self, vlib1, func); func.call(_ptr(_quote(val), self->getType()), lib, tunnel); }
int _db_gc_fast(pgctx_t *ctx) { memheap_t *heap = _ptr(ctx, ctx->root->heap); // Synchronize here. All this does is make sure anyone who was // in the database using the blocks that were suggested to be // freed is now out of the database and the blocks can be // safely freed. pmem_gc_mark(&ctx->mm, heap, 1); _dblockop(ctx, MLCK_WR, ctx->root->lock); _dblockop(ctx, MLCK_UN, ctx->root->lock); pmem_gc_free(&ctx->mm, heap, 1, NULL, ctx); //pmem_gc_free(&ctx->mm, heap, 1, (gcfreecb_t)dbcache_del, ctx); return 0; }
static PyObject * PongoIter_next_item(PyObject *ob) { PongoIter *self = (PongoIter*)ob; dbval_t *internal, *pn; dbtype_t node; PyObject *ret=NULL, *k, *v; _list_t *list; _obj_t *obj; dblock(self->ctx); internal = dbptr(self->ctx, self->dbptr); if (!internal) { // The {List,Object,Collection} internal pointer is NULL, so // there is no iteration to do PyErr_SetNone(PyExc_StopIteration); } else if (self->tag == List) { list = (_list_t*)internal; if (self->pos < self->len) { ret = to_python(self->ctx, list->item[self->pos], TP_PROXY); self->pos++; } else { PyErr_SetNone(PyExc_StopIteration); } } else if (self->tag == Object) { obj = (_obj_t*)internal; if (self->pos < self->len) { k = to_python(self->ctx, obj->item[self->pos].key, TP_PROXY); v = to_python(self->ctx, obj->item[self->pos].value, TP_PROXY); ret = PyTuple_Pack(2, k, v); self->pos++; } else { PyErr_SetNone(PyExc_StopIteration); } } else if (self->tag == Collection || self->tag == MultiCollection) { if (self->pos && self->depth == -1) { PyErr_SetNone(PyExc_StopIteration); } else { // NOTE: I'm overloading the lowest bit to mean "already traversed the left // side". Normally, this bit is part of the 'type' field and would encode // this value as "Int". However, the BonsaiNode left/right can only point // to other BonsaiNodes and the stack (where the bit overloading is happening) // lives only in main memory, so we'll never write this bit modification // to disk. // If were at position 0, put the internal node onto the stack // I'm reusing pos as a flag since pos is otherwise unused by the tree iterator if (self->pos == 0) { self->stack[++self->depth] = self->dbptr; self->pos = 1; } // Get current top of stack. If we've already traversed the left side // of this node, go directly to the emit stage and traverse the right side. node = self->stack[self->depth]; pn = _ptr(self->ctx, node.all & ~1); if (node.all & 1) { node.all &= ~1; } else { // Walk as far left as possible, pushing onto stack as we // follow each link while (pn->left.all) { node = pn->left; self->stack[++self->depth] = node; pn = _ptr(self->ctx, node.all); } } // Now node, pn and top of stack all reference the same object, // so convert the object to python, pop the top of stack and // mark the new top as "left side traversed" ret = to_python(self->ctx, node, TP_NODEKEY|TP_NODEVAL|TP_PROXY); if (--self->depth >= 0) { self->stack[self->depth].all |= 1; } // Now check if there is a right branch in the tree and push // it for the next call to the iterator if (pn->right.all) { self->stack[++self->depth] = pn->right; } } } dbunlock(self->ctx); return ret; }
TRet operator()(void) { return (_ptr()); }
void PrlHandleSmartPtrTest::testConstructorDestructorOnNullPointer() { PrlHandleTestPtr _ptr(NULL); }
static PyObject * PongoIter_next_expr(PyObject *ob) { PongoIter *self = (PongoIter*)ob; dbval_t *internal, *pn; dbtype_t node, key, val; PyObject *ret=NULL, *k, *v; int ls, rs; _list_t *list; _obj_t *obj; dblock(self->ctx); internal = dbptr(self->ctx, self->dbptr); if (!internal) { PyErr_SetNone(PyExc_StopIteration); } else if (self->tag == List) { list = (_list_t*)internal; for(;;) { if (self->pos == self->len) { PyErr_SetNone(PyExc_StopIteration); break; } node = list->item[self->pos++]; if ((!self->lhdata.all || dbcmp(self->ctx, node, self->lhdata) >= self->lhex) && (!self->rhdata.all || dbcmp(self->ctx, node, self->rhdata) <= self->rhex)) { ret = to_python(self->ctx, node, TP_PROXY); break; } } } else if (self->tag == Object) { obj = (_obj_t*)internal; for(;;) { // If we've reached the end, quit with StopIteration if (self->pos == self->len) { PyErr_SetNone(PyExc_StopIteration); break; } key = obj->item[self->pos].key; val = obj->item[self->pos].value; self->pos++; // If the key doesn't satisfy the RHS, and since Objects are // sorted, we can quit with StopIteration if (!(!self->rhdata.all || dbcmp(self->ctx, key, self->rhdata) <= self->rhex)) { PyErr_SetNone(PyExc_StopIteration); break; } // If the key does satisfy the LHS, return it if (!self->lhdata.all || dbcmp(self->ctx, key, self->lhdata) >= self->lhex) { k = to_python(self->ctx, key, TP_PROXY); v = to_python(self->ctx, val, TP_PROXY); ret = PyTuple_Pack(2, k, v); break; } } } else if (self->tag == Collection || self->tag == MultiCollection) { if (self->pos && self->depth == -1) { PyErr_SetNone(PyExc_StopIteration); } else { // NOTE: I'm overloading the lowest bit to mean "already traversed the left // side". Normally, this bit is part of the 'type' field and would encode // this value as "Int". However, the BonsaiNode left/right can only point // to other BonsaiNodes and the stack (where the bit overloading is happening) // lives only in main memory, so we'll never write this bit modification // to disk. // If were at position 0, put the internal node onto the stack // I'm reusing pos as a flag since pos is otherwise unused by the tree iterator if (self->pos == 0) { node = self->dbptr; for(;;) { pn = _ptr(self->ctx, node.all); ls = (!self->lhdata.all || dbcmp(self->ctx, pn->key, self->lhdata) >= self->lhex); rs = (!self->rhdata.all || dbcmp(self->ctx, pn->key, self->rhdata) <= self->rhex); if (ls && rs) { self->stack[++self->depth] = node; self->pos = 1; break; } else if (ls && pn->left.all) { node = pn->left; } else if (rs && pn->right.all) { node = pn->right; } else { PyErr_SetNone(PyExc_StopIteration); goto exitproc; } } } // Get current top of stack. If we've already traversed the left side // of this node, go directly to the emit stage and traverse the right side. node = self->stack[self->depth]; pn = _ptr(self->ctx, node.all & ~1); if (node.all & 1) { node.all &= ~1; } else { // Walk as far left as possible, pushing onto stack as we // follow each link if (pn->left.all) { node = pn->left; for(;;) { pn = _ptr(self->ctx, node.all); ls = (!self->lhdata.all || dbcmp(self->ctx, pn->key, self->lhdata) >= self->lhex); rs = (!self->rhdata.all || dbcmp(self->ctx, pn->key, self->rhdata) <= self->rhex); if (ls && rs) { self->stack[++self->depth] = node; } if (ls && pn->left.all) { node = pn->left; } else if (rs && pn->right.all) { node = pn->right; } else { break; } } // Reset node and pn to whatever is on the top of stack now node = self->stack[self->depth]; pn = _ptr(self->ctx, node.all); } } // Now node, pn and top of stack all reference the same object, // so convert the object to python, pop the top of stack and // mark the new top as "left side traversed" ret = to_python(self->ctx, node, TP_NODEKEY|TP_NODEVAL|TP_PROXY); if (--self->depth >= 0) { self->stack[self->depth].all |= 1; } // Now check if there is a right branch in the tree and push // it for the next call to the iterator if (pn->right.all) { node = pn->right; for(;;) { pn = _ptr(self->ctx, node.all); ls = (!self->lhdata.all || dbcmp(self->ctx, pn->key, self->lhdata) >= self->lhex); rs = (!self->rhdata.all || dbcmp(self->ctx, pn->key, self->rhdata) <= self->rhex); if (ls && rs) { self->stack[++self->depth] = node; } if (ls && pn->left.all) { node = pn->left; } else if (rs && pn->right.all) { node = pn->right; } else { break; } } } } } exitproc: dbunlock(self->ctx); return ret; }
TRet operator()(TArg1 arg1, TArg2 arg2, TArg3 arg3, TArg4 arg4) { return (_ptr(arg1, arg2, arg3, arg4)); }
unsigned int size() const { return _ptr()->size(); }
void operator()(TArg1 arg1) { _ptr(arg1); }
void operator()(TArg1 arg1, TArg2 arg2) { _ptr(arg1, arg2); }
SmartArray<T>& operator+=(const SmartArray<T>& rhs) { cow(); _ptr()->operator+=(*(rhs._ptr())); return *this; }
void operator()(void) { _ptr(); }
void *dballoc(pgctx_t *ctx, unsigned size) { void *addr; addr = pmem_alloc(&ctx->mm, _ptr(ctx, ctx->root->heap), size); return addr; }
void set_size(unsigned int size) { cow(); _ptr()->set_size(size); }
bool operator==(const SmartArray<T>& rhs) const { return _ptr()->operator==(*(rhs._ptr())); }
const T* ptr() const { return _ptr()->ptr(); }
T* ptr() { return _ptr()->ptr(); }
TRet operator()(TArg1 arg1) { return (_ptr(arg1)); }
void dbmem_info(pgctx_t *ctx) { pmem_print_mem(&ctx->mm, _ptr(ctx, ctx->root->heap)); }
TRet operator()(TArg1 arg1, TArg2 arg2) { return (_ptr(arg1, arg2)); }
void PrlHandleSmartPtrTest::testPointerAssignOperatorOnNullPointer3() { PrlHandleTestPtr _ptr(NULL); _ptr = NULL; }
void operator()(TArg1 arg1, TArg2 arg2, TArg3 arg3) { _ptr(arg1, arg2, arg3); }
SmartArray<T>& set_all(const T& rhs) { cow(); _ptr()->set_all(rhs); return *this; }
void operator()(TArg1 arg1, TArg2 arg2, TArg3 arg3, TArg4 arg4) { _ptr(arg1, arg2, arg3, arg4); }
void memset(unsigned char value) { cow(); _ptr()->memset(value); }