static int PongoDict_SetItem(PongoDict *self, PyObject *key, PyObject *value) { dbtype_t k; dbtype_t v; int ret = -1; dblock(self->ctx); k = from_python(self->ctx, key); if (!PyErr_Occurred()) { if (value == NULL) { if (dbobject_delitem(SELF_CTX_AND_DBPTR, k, &v, self->ctx->sync) == 0) { ret = 0; } else { PyErr_SetObject(PyExc_KeyError, key); } } else { v = from_python(self->ctx, value); if (!PyErr_Occurred() && dbobject_setitem(SELF_CTX_AND_DBPTR, k, v, self->ctx->sync) == 0) ret = 0; } } dbunlock(self->ctx); return ret; }
static PyObject * PongoIter_expr(PyObject *ob, PyObject *args, PyObject *kwargs) { PongoIter *self = (PongoIter*)ob; PyObject *lhs = Py_None, *rhs = Py_None; int lhex = 0, rhex = 0; char *kwlist[] = {"lhs", "rhs", "lhex", "rhex", NULL}; if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|OOii:expr", kwlist, &lhs, &rhs, &lhex, &rhex)) return NULL; dblock(self->ctx); // Force lhex to be 0 or 1, and rhex to be 0 or -1 self->lhex = !!lhex; self->rhex = -(!!rhex); // Convert the lhs and rhs to pongo objects and reference them in the pidcache self->lhdata = from_python(self->ctx, lhs); if (self->lhdata.all) pidcache_put(self->ctx, &self->lhdata, self->lhdata); self->rhdata = from_python(self->ctx, rhs); if (self->rhdata.all) pidcache_put(self->ctx, &self->rhdata, self->rhdata); dbunlock(self->ctx); Py_INCREF(self); return (PyObject*)self; }
static PyObject * PongoDict_pop(PongoDict *self, PyObject *args, PyObject *kwargs) { PyObject *key, *dflt = NULL; PyObject *ret = NULL; dbtype_t k, v; int sync = self->ctx->sync; char *kwlist[] = {"key", "default", "sync", NULL}; if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O|Oi:pop", kwlist, &key, &dflt, &sync)) return NULL; dblock(self->ctx); k = from_python(self->ctx, key); if (!PyErr_Occurred()) { if (dbobject_delitem(SELF_CTX_AND_DBPTR, k, &v, sync) < 0) { if (dflt) { Py_INCREF(dflt); ret = dflt; } else { PyErr_SetObject(PyExc_KeyError, key); } } else { ret = to_python(self->ctx, v, TP_PROXY); } } dbunlock(self->ctx); return ret; }
void PongoDict_Del(PyObject *ob) { PongoDict *self = (PongoDict*)ob; dblock(self->ctx); pidcache_del(self->ctx, self); dbunlock(self->ctx); PyObject_Del(ob); }
static PyObject * PongoDict_search(PongoDict *self, PyObject *args) { PyObject *path, *value, *ret=NULL; dbtype_t dbpath, dbvalue, dbrslt; char *rel; char *sep = "."; int decpath = 0; relop_t relop; if (!PyArg_ParseTuple(args, "OsO:search", &path, &rel, &value)) return NULL; if (!strcmp(rel, "==") || !strcmp(rel, "eq")) { relop = db_EQ; } else if (!strcmp(rel, "!=") || !strcmp(rel, "ne")) { relop = db_NE; } else if (!strcmp(rel, "<") || !strcmp(rel, "lt")) { relop = db_LT; } else if (!strcmp(rel, "<=") || !strcmp(rel, "le")) { relop = db_LE; } else if (!strcmp(rel, ">") || !strcmp(rel, "gt")) { relop = db_GT; } else if (!strcmp(rel, ">=") || !strcmp(rel, "ge")) { relop = db_GE; } else { PyErr_Format(PyExc_ValueError, "Unknown relop '%s'", rel); return NULL; } if (PyString_Check(path)) { path = PyObject_CallMethod(path, "split", "s", sep); decpath = 1; } if (!PySequence_Check(path)) { PyErr_Format(PyExc_TypeError, "path must be a sequence"); return NULL; } dblock(self->ctx); dbpath = from_python(self->ctx, path); if (decpath) Py_DECREF(path); if (dbtype(self->ctx, dbpath) == List) { dbvalue = from_python(self->ctx, value); if (!PyErr_Occurred()) { dbrslt = dbcollection_new(self->ctx, 0); db_search(SELF_CTX_AND_DBPTR, dbpath, -1, relop, dbvalue, dbrslt); // PROXYCHLD means turn the root object into a real dict, but // create proxy objects for all children. ret = to_python(self->ctx, dbrslt, TP_PROXYCHLD); } } else { PyErr_Format(PyExc_Exception, "path type isn't List (%d)", dbtype(self->ctx, dbpath)); } dbunlock(self->ctx); return ret; }
static PyObject * PongoDict_native(PongoDict *self) { PyObject *ret; dblock(self->ctx); ret = to_python(SELF_CTX_AND_DBPTR, 0); dbunlock(self->ctx); return ret; }
PyObject* PongoIter_Iter(PyObject *ob) { PongoDict *po = (PongoDict*)ob; PongoIter *self = NULL; dbtype_t internal; _list_t *list = NULL; _obj_t *obj = NULL; dbnode_t *node = NULL; dbtag_t tag; int len; dblock(po->ctx); // Take advantage of the fact that all of the Pongo container types // have the same object layout of the first few fields. internal.ptr = dbptr(po->ctx, po->dbptr); tag = internal.ptr->type; if (tag == List) { internal = internal.ptr->list; list = dbptr(po->ctx, internal); len = list ? list->len : 0; } else if (tag == Object) { internal = internal.ptr->obj; obj = dbptr(po->ctx, internal); len = obj ? obj->len : 0; } else if (tag == Collection) { internal = internal.ptr->obj; node = dbptr(po->ctx, internal); len = node ? node->size : 0; } else if (tag == MultiCollection) { internal = internal.ptr->obj; node = dbptr(po->ctx, internal); len = node ? node->size : 0; } else { goto exitproc; } self = (PongoIter *)PyObject_New(PongoIter, &PongoIter_Type); if (!self) goto exitproc; self->ctx = po->ctx; self->dbptr = internal; self->tag = tag; self->pos = 0; self->len = len; self->depth = -1; self->lhex = 0; self->rhex = 0; self->lhdata = self->rhdata = DBNULL; pidcache_put(po->ctx, self, self->dbptr); exitproc: dbunlock(self->ctx); return (PyObject *)self; }
static int PongoDict_length(PongoDict *self) { int len; dblock(self->ctx); len = dbobject_len(SELF_CTX_AND_DBPTR); dbunlock(self->ctx); return len; }
void PongoIter_Del(PyObject *ob) { PongoIter *self = (PongoIter*)ob; dblock(self->ctx); pidcache_del(self->ctx, self); pidcache_del(self->ctx, &self->lhdata); pidcache_del(self->ctx, &self->rhdata); dbunlock(self->ctx); PyObject_Del(ob); }
static PyObject * PongoDict_set(PongoDict *self, PyObject *args, PyObject *kwargs) { PyObject *key, *value; PyObject *klist = NULL; PyObject *ret = NULL; dbtype_t k, v; int sync = self->ctx->sync; int fail = 0; multi_t op = multi_SET; char *kwlist[] = {"key", "value", "sep", "sync", "fail", NULL}; char *sep = "."; if (!PyArg_ParseTupleAndKeywords(args, kwargs, "OO|sii:set", kwlist, &key, &value, &sep, &sync, &fail)) return NULL; k = DBNULL; dblock(self->ctx); if (PyString_Check(key) || PyUnicode_Check(key)) { klist = PyObject_CallMethod(key, "split", "s", sep); k = from_python(self->ctx, klist); Py_XDECREF(klist); } else { if (key == pongo_id) { sync |= PUT_ID; } else { k = from_python(self->ctx, key); } } v = from_python(self->ctx, value); if (!PyErr_Occurred()) { if (dbtype(self->ctx, k) == List) { if (fail) op = multi_SET_OR_FAIL; if (db_multi(SELF_CTX_AND_DBPTR, k, op, &v, sync) == 0) { ret = Py_None; } else { PyErr_SetObject(PyExc_KeyError, key); } } else if (db_multi(SELF_CTX_AND_DBPTR, k, op, &v, sync) == 0) { // db_mutli will tell us the newly created value of // "_id" when PUT_ID is enabled. ret = (sync & PUT_ID) ? to_python(self->ctx, v, TP_PROXY) : Py_None; } else { if (sync & PUT_ID) { PyErr_Format(PyExc_ValueError, "value must be a dictionary"); } else { PyErr_SetObject(PyExc_KeyError, key); } } } dbunlock(self->ctx); Py_XINCREF(ret); return ret; }
static PyObject * pongo_meta(PyObject *self, PyObject *args) { PongoCollection *data; pgctx_t *ctx; const char *key; PyObject *value = NULL, *ret = Py_None; if (!PyArg_ParseTuple(args, "Os|O:meta", &data, &key, &value)) return NULL; if (pongo_check(data)) return NULL; ctx = data->ctx; dblock(ctx); if (!strcmp(key, "chunksize")) { ret = PyLong_FromLongLong(ctx->root->meta.chunksize); if (value && value != Py_None) ctx->root->meta.chunksize = PyInt_AsLong(value); } else if (!strcmp(key, "id")) { ret = to_python(ctx, ctx->root->meta.id, 0); if (value) ctx->root->meta.id = from_python(ctx, value); } else if (!strcmp(key, ".sync")) { ret = PyInt_FromLong(ctx->sync); if (value && value != Py_None) ctx->sync = PyInt_AsLong(value); #ifdef WANT_UUID_TYPE } else if (!strcmp(key, ".uuid_class")) { ret = (PyObject*)uuid_class; if (value) uuid_class = (PyTypeObject*)value; } else if (!strcmp(key, ".uuid_constructor")) { ret = (PyObject*)uuid_constructor; if (value) uuid_constructor = value; #endif } else if (!strcmp(key, ".newkey")) { ret = pongo_newkey; if (value) { pongo_newkey = value; if (value == Py_None) { #ifdef WANT_UUID_TYPE ctx->newkey = _newkey; #else ctx->newkey = NULL; #endif } else { ctx->newkey = pongo_newkey_helper; } } } else { PyErr_Format(PyExc_Exception, "Unknown meta key %s", key); ret = NULL; } dbunlock(ctx); return ret; }
void pongo_atexit(void) { int i; pgctx_t *ctx; for(i=0; i<NR_DB_CONTEXT; i++) { ctx = dbctx[i]; if (ctx) { dblock(ctx); pidcache_destroy(ctx); dbunlock(ctx); } } }
bool run(OperationContext* opCtx, const std::string& dbName, const BSONObj& cmdObj, BSONObjBuilder& result) final { AutoGetDb dblock(opCtx, dbName, LockMode::MODE_IS); auto db = dblock.getDb(); uassert(ErrorCodes::NamespaceNotFound, str::stream() << "database " << dbName << " does not exist", db); db->getViewCatalog()->invalidate(); return true; }
static int PongoDict_contains(PongoDict *self, PyObject *key) { dbtype_t k; int ret = 0; dblock(self->ctx); k = from_python(self->ctx, key); if (!PyErr_Occurred()) { ret = dbobject_contains(SELF_CTX_AND_DBPTR, k); } dbunlock(self->ctx); return ret; }
static PyObject * PongoDict_get(PongoDict *self, PyObject *args, PyObject *kwargs) { PyObject *key, *dflt = Py_None; PyObject *klist = NULL; PyObject *ret = NULL; dbtype_t k, v; char *sep = "."; char *kwlist[] = {"key", "default", "sep", NULL}; int r; if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O|Os:get", kwlist, &key, &dflt, &sep)) return NULL; dblock(self->ctx); if (PyString_Check(key) || PyUnicode_Check(key)) { klist = PyObject_CallMethod(key, "split", "s", sep); k = from_python(self->ctx, klist); Py_XDECREF(klist); } else { k = from_python(self->ctx, key); } if (!PyErr_Occurred()) { if (dbtype(self->ctx, k) == List) { r = db_multi(SELF_CTX_AND_DBPTR, k, multi_GET, &v, 0); if (r == 0) { ret = to_python(self->ctx, v, TP_PROXY); } else if (dflt) { Py_INCREF(dflt); ret = dflt; } else { PyErr_SetObject(PyExc_KeyError, key); } } else if (dbobject_getitem(SELF_CTX_AND_DBPTR, k, &v) == 0) { ret = to_python(self->ctx, v, TP_PROXY); } else { if (dflt) { ret = dflt; Py_INCREF(ret); } else { PyErr_SetObject(PyExc_KeyError, key); } } } dbunlock(self->ctx); return ret; }
static PyObject * PongoDict_create(PyObject *self, PyObject *arg) { PyObject *ret; PongoCollection *ref = (PongoCollection*)arg; dbtype_t dict; if (pongo_check(ref)) return NULL; dblock(ref->ctx); dict = dbobject_new(ref->ctx); ret = to_python(ref->ctx, dict, TP_PROXY); dbunlock(ref->ctx); return ret; }
static PyObject * pongo_close(PyObject *self, PyObject *args) { PongoCollection *data; if (!PyArg_ParseTuple(args, "O:close", &data)) return NULL; if (pongo_check(data)) return NULL; dblock(data->ctx); pidcache_destroy(data->ctx); dbunlock(data->ctx); dbfile_close(data->ctx); Py_RETURN_NONE; }
static PyObject * pongo_open(PyObject *self, PyObject *args) { char *filename; pgctx_t *ctx; uint32_t initsize = 0; if (!PyArg_ParseTuple(args, "s|i:open", &filename, &initsize)) return NULL; ctx = dbfile_open(filename, initsize); dblock(ctx); pidcache_new(ctx); dbunlock(ctx); // Create a python proxy of the root data object return PongoCollection_Proxy(ctx, ctx->data); }
static PyObject * PongoDict_GetItem(PongoDict *self, PyObject *key) { dbtype_t k, v; PyObject *ret = NULL; dblock(self->ctx); k = from_python(self->ctx, key); if (!PyErr_Occurred()) { if (dbobject_getitem(SELF_CTX_AND_DBPTR, k, &v) == 0) { ret = to_python(self->ctx, v, TP_PROXY); } else { PyErr_SetObject(PyExc_KeyError, key); } } dbunlock(self->ctx); return ret; }
static PyObject * pongo__object(PyObject *self, PyObject *args) { PyObject *ob; PongoCollection *data; uint64_t offset; dbtype_t db; if (!PyArg_ParseTuple(args, "OL:_object", &data, &offset)) return NULL; if (pongo_check(data)) return NULL; db.all = offset; dblock(data->ctx); ob = to_python(data->ctx, db, 1); dbunlock(data->ctx); return ob; }
static PyObject * PongoDict_json(PongoDict *self, PyObject *args) { char *key = NULL, *val = NULL; Py_ssize_t klen, vlen; PyObject *ret = Py_None; dbtype_t dict, obj, k; jsonctx_t *jctx; if (!PyArg_ParseTuple(args, "|s#s#:json", &key, &klen, &val, &vlen)) return NULL; dblock(self->ctx); dict = self->dbptr; jctx = json_init(self->ctx); if (key) { if (val) { // 2-arg form is dict.json('key', 'value') // inserts dict['key'] = json_parse('value') k = dbstring_new(self->ctx, key, klen); obj = json_parse(jctx, val, vlen); dbobject_setitem(SELF_CTX_AND_DBPTR, k, obj, self->ctx->sync); } else { // 1-arg form is replace dict.items with parsed json obj = json_parse(jctx, key, klen); dict.ptr = dbptr(self->ctx, dict); obj.ptr = dbptr(self->ctx, obj); dict.ptr->obj = obj.ptr->obj; } Py_INCREF(ret); } else { // The 0-arg form is to generate the json string from dictionary // contents json_emit(jctx, dict); if (jctx->outstr) ret = PyUnicode_FromStringAndSize( (const char*)jctx->outstr, jctx->outlen); } json_cleanup(jctx); dbunlock(self->ctx); return ret; }
static PyObject * pongo_atoms(PyObject *self, PyObject *args) { PyObject *ret; PongoCollection *data; if (!PyArg_ParseTuple(args, "O:atoms", &data)) return NULL; if (pongo_check(data)) return NULL; dblock(data->ctx); // Create the collection directly because the cache is // already accounted for by pongogc, so we don't need to // have the proxy reference inserted into the pidcache ret = PongoCollection_Proxy(data->ctx, data->ctx->cache); dbunlock(data->ctx); return ret; }
static PyObject * PongoDict_keys(PongoDict *self) { dbtype_t db; _obj_t *obj; PyObject *ret = PyList_New(0); PyObject *item; int i; dblock(self->ctx); db.ptr = dbptr(self->ctx, self->dbptr); obj = dbptr(self->ctx, db.ptr->obj); for(i=0; i<obj->len; i++) { // FIXME: NULL is a valid key? if (obj->item[i].key.all) { item = to_python(self->ctx, obj->item[i].key, TP_PROXY); PyList_Append(ret, item); Py_DECREF(item); } } dbunlock(self->ctx); return ret; }
static PyObject * PongoDict_update(PongoDict *self, PyObject *args, PyObject *kwargs) { PyObject *iter, *items; PyObject *ret = NULL; int length; int sync = self->ctx->sync; char *kwlist[] = {"iter", "sync", NULL}; if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O|i:update", kwlist, &iter, &sync)) return NULL; dblock(self->ctx); if (PyMapping_Check(iter)) { length = PyMapping_Length(iter); items = PyMapping_Items(iter); if (items) { // mapping object implementes "items" if (dbobject_update(SELF_CTX_AND_DBPTR, length, _py_mapping_cb, items, sync) == 0) ret = Py_None; Py_DECREF(items); } else { // mapping object implements iterator protocol // don't have to decref the iterator because it self-decrefs // upon StopIteration PyErr_Clear(); items = PyObject_GetIter(iter); if (dbobject_update(SELF_CTX_AND_DBPTR, length, _py_itermapping_cb, items, sync) == 0) ret = Py_None; } } dbunlock(self->ctx); Py_XINCREF(ret); return ret; }
static PyObject * PongoIter_next_item(PyObject *ob) { PongoIter *self = (PongoIter*)ob; dbval_t *internal, *pn; dbtype_t node; PyObject *ret=NULL, *k, *v; _list_t *list; _obj_t *obj; dblock(self->ctx); internal = dbptr(self->ctx, self->dbptr); if (!internal) { // The {List,Object,Collection} internal pointer is NULL, so // there is no iteration to do PyErr_SetNone(PyExc_StopIteration); } else if (self->tag == List) { list = (_list_t*)internal; if (self->pos < self->len) { ret = to_python(self->ctx, list->item[self->pos], TP_PROXY); self->pos++; } else { PyErr_SetNone(PyExc_StopIteration); } } else if (self->tag == Object) { obj = (_obj_t*)internal; if (self->pos < self->len) { k = to_python(self->ctx, obj->item[self->pos].key, TP_PROXY); v = to_python(self->ctx, obj->item[self->pos].value, TP_PROXY); ret = PyTuple_Pack(2, k, v); self->pos++; } else { PyErr_SetNone(PyExc_StopIteration); } } else if (self->tag == Collection || self->tag == MultiCollection) { if (self->pos && self->depth == -1) { PyErr_SetNone(PyExc_StopIteration); } else { // NOTE: I'm overloading the lowest bit to mean "already traversed the left // side". Normally, this bit is part of the 'type' field and would encode // this value as "Int". However, the BonsaiNode left/right can only point // to other BonsaiNodes and the stack (where the bit overloading is happening) // lives only in main memory, so we'll never write this bit modification // to disk. // If were at position 0, put the internal node onto the stack // I'm reusing pos as a flag since pos is otherwise unused by the tree iterator if (self->pos == 0) { self->stack[++self->depth] = self->dbptr; self->pos = 1; } // Get current top of stack. If we've already traversed the left side // of this node, go directly to the emit stage and traverse the right side. node = self->stack[self->depth]; pn = _ptr(self->ctx, node.all & ~1); if (node.all & 1) { node.all &= ~1; } else { // Walk as far left as possible, pushing onto stack as we // follow each link while (pn->left.all) { node = pn->left; self->stack[++self->depth] = node; pn = _ptr(self->ctx, node.all); } } // Now node, pn and top of stack all reference the same object, // so convert the object to python, pop the top of stack and // mark the new top as "left side traversed" ret = to_python(self->ctx, node, TP_NODEKEY|TP_NODEVAL|TP_PROXY); if (--self->depth >= 0) { self->stack[self->depth].all |= 1; } // Now check if there is a right branch in the tree and push // it for the next call to the iterator if (pn->right.all) { self->stack[++self->depth] = pn->right; } } } dbunlock(self->ctx); return ret; }
static PyObject * PongoIter_next_expr(PyObject *ob) { PongoIter *self = (PongoIter*)ob; dbval_t *internal, *pn; dbtype_t node, key, val; PyObject *ret=NULL, *k, *v; int ls, rs; _list_t *list; _obj_t *obj; dblock(self->ctx); internal = dbptr(self->ctx, self->dbptr); if (!internal) { PyErr_SetNone(PyExc_StopIteration); } else if (self->tag == List) { list = (_list_t*)internal; for(;;) { if (self->pos == self->len) { PyErr_SetNone(PyExc_StopIteration); break; } node = list->item[self->pos++]; if ((!self->lhdata.all || dbcmp(self->ctx, node, self->lhdata) >= self->lhex) && (!self->rhdata.all || dbcmp(self->ctx, node, self->rhdata) <= self->rhex)) { ret = to_python(self->ctx, node, TP_PROXY); break; } } } else if (self->tag == Object) { obj = (_obj_t*)internal; for(;;) { // If we've reached the end, quit with StopIteration if (self->pos == self->len) { PyErr_SetNone(PyExc_StopIteration); break; } key = obj->item[self->pos].key; val = obj->item[self->pos].value; self->pos++; // If the key doesn't satisfy the RHS, and since Objects are // sorted, we can quit with StopIteration if (!(!self->rhdata.all || dbcmp(self->ctx, key, self->rhdata) <= self->rhex)) { PyErr_SetNone(PyExc_StopIteration); break; } // If the key does satisfy the LHS, return it if (!self->lhdata.all || dbcmp(self->ctx, key, self->lhdata) >= self->lhex) { k = to_python(self->ctx, key, TP_PROXY); v = to_python(self->ctx, val, TP_PROXY); ret = PyTuple_Pack(2, k, v); break; } } } else if (self->tag == Collection || self->tag == MultiCollection) { if (self->pos && self->depth == -1) { PyErr_SetNone(PyExc_StopIteration); } else { // NOTE: I'm overloading the lowest bit to mean "already traversed the left // side". Normally, this bit is part of the 'type' field and would encode // this value as "Int". However, the BonsaiNode left/right can only point // to other BonsaiNodes and the stack (where the bit overloading is happening) // lives only in main memory, so we'll never write this bit modification // to disk. // If were at position 0, put the internal node onto the stack // I'm reusing pos as a flag since pos is otherwise unused by the tree iterator if (self->pos == 0) { node = self->dbptr; for(;;) { pn = _ptr(self->ctx, node.all); ls = (!self->lhdata.all || dbcmp(self->ctx, pn->key, self->lhdata) >= self->lhex); rs = (!self->rhdata.all || dbcmp(self->ctx, pn->key, self->rhdata) <= self->rhex); if (ls && rs) { self->stack[++self->depth] = node; self->pos = 1; break; } else if (ls && pn->left.all) { node = pn->left; } else if (rs && pn->right.all) { node = pn->right; } else { PyErr_SetNone(PyExc_StopIteration); goto exitproc; } } } // Get current top of stack. If we've already traversed the left side // of this node, go directly to the emit stage and traverse the right side. node = self->stack[self->depth]; pn = _ptr(self->ctx, node.all & ~1); if (node.all & 1) { node.all &= ~1; } else { // Walk as far left as possible, pushing onto stack as we // follow each link if (pn->left.all) { node = pn->left; for(;;) { pn = _ptr(self->ctx, node.all); ls = (!self->lhdata.all || dbcmp(self->ctx, pn->key, self->lhdata) >= self->lhex); rs = (!self->rhdata.all || dbcmp(self->ctx, pn->key, self->rhdata) <= self->rhex); if (ls && rs) { self->stack[++self->depth] = node; } if (ls && pn->left.all) { node = pn->left; } else if (rs && pn->right.all) { node = pn->right; } else { break; } } // Reset node and pn to whatever is on the top of stack now node = self->stack[self->depth]; pn = _ptr(self->ctx, node.all); } } // Now node, pn and top of stack all reference the same object, // so convert the object to python, pop the top of stack and // mark the new top as "left side traversed" ret = to_python(self->ctx, node, TP_NODEKEY|TP_NODEVAL|TP_PROXY); if (--self->depth >= 0) { self->stack[self->depth].all |= 1; } // Now check if there is a right branch in the tree and push // it for the next call to the iterator if (pn->right.all) { node = pn->right; for(;;) { pn = _ptr(self->ctx, node.all); ls = (!self->lhdata.all || dbcmp(self->ctx, pn->key, self->lhdata) >= self->lhex); rs = (!self->rhdata.all || dbcmp(self->ctx, pn->key, self->rhdata) <= self->rhex); if (ls && rs) { self->stack[++self->depth] = node; } if (ls && pn->left.all) { node = pn->left; } else if (rs && pn->right.all) { node = pn->right; } else { break; } } } } } exitproc: dbunlock(self->ctx); return ret; }