static int hdb_dn2idl_internal( struct dn2id_cookie *cx ) { BDB_IDL_ZERO( cx->tmp ); if ( cx->bdb->bi_idl_cache_size ) { char *ptr = ((char *)&cx->id)-1; cx->key.data = ptr; cx->key.size = sizeof(ID)+1; if ( cx->prefix == DN_SUBTREE_PREFIX ) { ID *ids = cx->depth ? cx->tmp : cx->ids; *ptr = cx->prefix; cx->rc = bdb_idl_cache_get(cx->bdb, cx->db, &cx->key, ids); if ( cx->rc == LDAP_SUCCESS ) { if ( cx->depth ) { bdb_idl_delete( cx->tmp, cx->id ); /* ITS#6983, drop our own ID */ bdb_idl_append( cx->ids, cx->tmp ); cx->need_sort = 1; } return cx->rc; } } *ptr = DN_ONE_PREFIX; cx->rc = bdb_idl_cache_get(cx->bdb, cx->db, &cx->key, cx->tmp); if ( cx->rc == LDAP_SUCCESS ) { goto gotit; } if ( cx->rc == DB_NOTFOUND ) { return cx->rc; } } bdb_cache_entryinfo_lock( cx->ei ); /* If number of kids in the cache differs from on-disk, load * up all the kids from the database */ if ( cx->ei->bei_ckids+1 != cx->ei->bei_dkids ) { EntryInfo ei; db_recno_t dkids = cx->ei->bei_dkids; ei.bei_parent = cx->ei; /* Only one thread should load the cache */ while ( cx->ei->bei_state & CACHE_ENTRY_ONELEVEL ) { bdb_cache_entryinfo_unlock( cx->ei ); ldap_pvt_thread_yield(); bdb_cache_entryinfo_lock( cx->ei ); if ( cx->ei->bei_ckids+1 == cx->ei->bei_dkids ) { goto synced; } } cx->ei->bei_state |= CACHE_ENTRY_ONELEVEL; bdb_cache_entryinfo_unlock( cx->ei ); cx->rc = cx->db->cursor( cx->db, NULL, &cx->dbc, cx->bdb->bi_db_opflags ); if ( cx->rc ) goto done_one; cx->data.data = &cx->dbuf; cx->data.ulen = sizeof(ID); cx->data.dlen = sizeof(ID); cx->data.flags = DB_DBT_USERMEM | DB_DBT_PARTIAL; /* The first item holds the parent ID. Ignore it. */ cx->key.data = &cx->nid; cx->key.size = sizeof(ID); cx->rc = cx->dbc->c_get( cx->dbc, &cx->key, &cx->data, DB_SET ); if ( cx->rc ) { cx->dbc->c_close( cx->dbc ); goto done_one; } /* If the on-disk count is zero we've never checked it. * Count it now. */ if ( !dkids ) { cx->dbc->c_count( cx->dbc, &dkids, 0 ); cx->ei->bei_dkids = dkids; } cx->data.data = cx->buf; cx->data.ulen = BDB_IDL_UM_SIZE * sizeof(ID); cx->data.flags = DB_DBT_USERMEM; if ( dkids > 1 ) { /* Fetch the rest of the IDs in a loop... */ while ( (cx->rc = cx->dbc->c_get( cx->dbc, &cx->key, &cx->data, DB_MULTIPLE | DB_NEXT_DUP )) == 0 ) { u_int8_t *j; size_t len; void *ptr; DB_MULTIPLE_INIT( ptr, &cx->data ); while (ptr) { DB_MULTIPLE_NEXT( ptr, &cx->data, j, len ); if (j) { EntryInfo *ei2; diskNode *d = (diskNode *)j; short nrlen; BDB_DISK2ID( j + len - sizeof(ID), &ei.bei_id ); nrlen = ((d->nrdnlen[0] ^ 0x80) << 8) | d->nrdnlen[1]; ei.bei_nrdn.bv_len = nrlen; /* nrdn/rdn are set in-place. * hdb_cache_load will copy them as needed */ ei.bei_nrdn.bv_val = d->nrdn; ei.bei_rdn.bv_len = len - sizeof(diskNode) - ei.bei_nrdn.bv_len; ei.bei_rdn.bv_val = d->nrdn + ei.bei_nrdn.bv_len + 1; bdb_idl_append_one( cx->tmp, ei.bei_id ); hdb_cache_load( cx->bdb, &ei, &ei2 ); } } } } cx->rc = cx->dbc->c_close( cx->dbc ); done_one: bdb_cache_entryinfo_lock( cx->ei ); cx->ei->bei_state &= ~CACHE_ENTRY_ONELEVEL; bdb_cache_entryinfo_unlock( cx->ei ); if ( cx->rc ) return cx->rc; } else { /* The in-memory cache is in sync with the on-disk data. * do we have any kids? */ synced: cx->rc = 0; if ( cx->ei->bei_ckids > 0 ) { /* Walk the kids tree; order is irrelevant since bdb_idl_sort * will sort it later. */ avl_apply( cx->ei->bei_kids, apply_func, cx->tmp, -1, AVL_POSTORDER ); } bdb_cache_entryinfo_unlock( cx->ei ); } if ( !BDB_IDL_IS_RANGE( cx->tmp ) && cx->tmp[0] > 3 ) bdb_idl_sort( cx->tmp, cx->buf ); if ( cx->bdb->bi_idl_cache_max_size && !BDB_IDL_IS_ZERO( cx->tmp )) { char *ptr = ((char *)&cx->id)-1; cx->key.data = ptr; cx->key.size = sizeof(ID)+1; *ptr = DN_ONE_PREFIX; bdb_idl_cache_put( cx->bdb, cx->db, &cx->key, cx->tmp, cx->rc ); } gotit: if ( !BDB_IDL_IS_ZERO( cx->tmp )) { if ( cx->prefix == DN_SUBTREE_PREFIX ) { bdb_idl_append( cx->ids, cx->tmp ); cx->need_sort = 1; if ( !(cx->ei->bei_state & CACHE_ENTRY_NO_GRANDKIDS)) { ID *save, idcurs; EntryInfo *ei = cx->ei; int nokids = 1; save = cx->op->o_tmpalloc( BDB_IDL_SIZEOF( cx->tmp ), cx->op->o_tmpmemctx ); BDB_IDL_CPY( save, cx->tmp ); idcurs = 0; cx->depth++; for ( cx->id = bdb_idl_first( save, &idcurs ); cx->id != NOID; cx->id = bdb_idl_next( save, &idcurs )) { EntryInfo *ei2; cx->ei = NULL; if ( bdb_cache_find_id( cx->op, cx->txn, cx->id, &cx->ei, ID_NOENTRY, NULL )) continue; if ( cx->ei ) { ei2 = cx->ei; if ( !( ei2->bei_state & CACHE_ENTRY_NO_KIDS )) { BDB_ID2DISK( cx->id, &cx->nid ); hdb_dn2idl_internal( cx ); if ( !BDB_IDL_IS_ZERO( cx->tmp )) nokids = 0; } bdb_cache_entryinfo_lock( ei2 ); ei2->bei_finders--; bdb_cache_entryinfo_unlock( ei2 ); } } cx->depth--; cx->op->o_tmpfree( save, cx->op->o_tmpmemctx ); if ( nokids ) { bdb_cache_entryinfo_lock( ei ); ei->bei_state |= CACHE_ENTRY_NO_GRANDKIDS; bdb_cache_entryinfo_unlock( ei ); } } /* Make sure caller knows it had kids! */ cx->tmp[0]=1; cx->rc = 0; } else { BDB_IDL_CPY( cx->ids, cx->tmp ); } } return cx->rc; }
int bdb_idl_fetch_key( BackendDB *be, DB *db, DB_TXN *txn, DBT *key, ID *ids, DBC **saved_cursor, int get_flag ) { struct bdb_info *bdb = (struct bdb_info *) be->be_private; int rc; DBT data, key2, *kptr; DBC *cursor; ID *i; void *ptr; size_t len; int rc2; int flags = bdb->bi_db_opflags | DB_MULTIPLE; int opflag; /* If using BerkeleyDB 4.0, the buf must be large enough to * grab the entire IDL in one get(), otherwise BDB will leak * resources on subsequent get's. We can safely call get() * twice - once for the data, and once to get the DB_NOTFOUND * result meaning there's no more data. See ITS#2040 for details. * This bug is fixed in BDB 4.1 so a smaller buffer will work if * stack space is too limited. * * configure now requires Berkeley DB 4.1. */ #if DB_VERSION_FULL < 0x04010000 # define BDB_ENOUGH 5 #else /* We sometimes test with tiny IDLs, and BDB always wants buffers * that are at least one page in size. */ # if BDB_IDL_DB_SIZE < 4096 # define BDB_ENOUGH 2048 # else # define BDB_ENOUGH 1 # endif #endif ID buf[BDB_IDL_DB_SIZE*BDB_ENOUGH]; char keybuf[16]; Debug( LDAP_DEBUG_ARGS, "bdb_idl_fetch_key: %s\n", bdb_show_key( key, keybuf ), 0, 0 ); assert( ids != NULL ); if ( saved_cursor && *saved_cursor ) { opflag = DB_NEXT; } else if ( get_flag == LDAP_FILTER_GE ) { opflag = DB_SET_RANGE; } else if ( get_flag == LDAP_FILTER_LE ) { opflag = DB_FIRST; } else { opflag = DB_SET; } /* only non-range lookups can use the IDL cache */ if ( bdb->bi_idl_cache_size && opflag == DB_SET ) { rc = bdb_idl_cache_get( bdb, db, key, ids ); if ( rc != LDAP_NO_SUCH_OBJECT ) return rc; } DBTzero( &data ); data.data = buf; data.ulen = sizeof(buf); data.flags = DB_DBT_USERMEM; /* If we're not reusing an existing cursor, get a new one */ if( opflag != DB_NEXT ) { rc = db->cursor( db, txn, &cursor, bdb->bi_db_opflags ); if( rc != 0 ) { Debug( LDAP_DEBUG_ANY, "=> bdb_idl_fetch_key: " "cursor failed: %s (%d)\n", db_strerror(rc), rc, 0 ); return rc; } } else { cursor = *saved_cursor; } /* If this is a LE lookup, save original key so we can determine * when to stop. If this is a GE lookup, save the key since it * will be overwritten. */ if ( get_flag == LDAP_FILTER_LE || get_flag == LDAP_FILTER_GE ) { DBTzero( &key2 ); key2.flags = DB_DBT_USERMEM; key2.ulen = sizeof(keybuf); key2.data = keybuf; key2.size = key->size; AC_MEMCPY( keybuf, key->data, key->size ); kptr = &key2; } else { kptr = key; } len = key->size; rc = cursor->c_get( cursor, kptr, &data, flags | opflag ); /* skip presence key on range inequality lookups */ while (rc == 0 && kptr->size != len) { rc = cursor->c_get( cursor, kptr, &data, flags | DB_NEXT_NODUP ); } /* If we're doing a LE compare and the new key is greater than * our search key, we're done */ if (rc == 0 && get_flag == LDAP_FILTER_LE && memcmp( kptr->data, key->data, key->size ) > 0 ) { rc = DB_NOTFOUND; } if (rc == 0) { i = ids; while (rc == 0) { u_int8_t *j; DB_MULTIPLE_INIT( ptr, &data ); while (ptr) { DB_MULTIPLE_NEXT(ptr, &data, j, len); if (j) { ++i; BDB_DISK2ID( j, i ); } } rc = cursor->c_get( cursor, key, &data, flags | DB_NEXT_DUP ); } if ( rc == DB_NOTFOUND ) rc = 0; ids[0] = i - ids; /* On disk, a range is denoted by 0 in the first element */ if (ids[1] == 0) { if (ids[0] != BDB_IDL_RANGE_SIZE) { Debug( LDAP_DEBUG_ANY, "=> bdb_idl_fetch_key: " "range size mismatch: expected %d, got %ld\n", BDB_IDL_RANGE_SIZE, ids[0], 0 ); cursor->c_close( cursor ); return -1; } BDB_IDL_RANGE( ids, ids[2], ids[3] ); } data.size = BDB_IDL_SIZEOF(ids); } if ( saved_cursor && rc == 0 ) { if ( !*saved_cursor ) *saved_cursor = cursor; rc2 = 0; } else rc2 = cursor->c_close( cursor ); if (rc2) { Debug( LDAP_DEBUG_ANY, "=> bdb_idl_fetch_key: " "close failed: %s (%d)\n", db_strerror(rc2), rc2, 0 ); return rc2; } if( rc == DB_NOTFOUND ) { return rc; } else if( rc != 0 ) { Debug( LDAP_DEBUG_ANY, "=> bdb_idl_fetch_key: " "get failed: %s (%d)\n", db_strerror(rc), rc, 0 ); return rc; } else if ( data.size == 0 || data.size % sizeof( ID ) ) { /* size not multiple of ID size */ Debug( LDAP_DEBUG_ANY, "=> bdb_idl_fetch_key: " "odd size: expected %ld multiple, got %ld\n", (long) sizeof( ID ), (long) data.size, 0 ); return -1; } else if ( data.size != BDB_IDL_SIZEOF(ids) ) { /* size mismatch */ Debug( LDAP_DEBUG_ANY, "=> bdb_idl_fetch_key: " "get size mismatch: expected %ld, got %ld\n", (long) ((1 + ids[0]) * sizeof( ID )), (long) data.size, 0 ); return -1; } if ( bdb->bi_idl_cache_max_size ) { bdb_idl_cache_put( bdb, db, key, ids, rc ); } return rc; }
int hdb_dn2idl( Operation *op, DB_TXN *txn, struct berval *ndn, EntryInfo *ei, ID *ids, ID *stack ) { struct bdb_info *bdb = (struct bdb_info *)op->o_bd->be_private; struct dn2id_cookie cx; Debug( LDAP_DEBUG_TRACE, "=> hdb_dn2idl(\"%s\")\n", ndn->bv_val, 0, 0 ); #ifndef BDB_MULTIPLE_SUFFIXES if ( op->ors_scope != LDAP_SCOPE_ONELEVEL && ( ei->bei_id == 0 || ( ei->bei_parent->bei_id == 0 && op->o_bd->be_suffix[0].bv_len ))) { BDB_IDL_ALL( bdb, ids ); return 0; } #endif cx.id = ei->bei_id; BDB_ID2DISK( cx.id, &cx.nid ); cx.ei = ei; cx.bdb = bdb; cx.db = cx.bdb->bi_dn2id->bdi_db; cx.prefix = (op->ors_scope == LDAP_SCOPE_ONELEVEL) ? DN_ONE_PREFIX : DN_SUBTREE_PREFIX; cx.ids = ids; cx.tmp = stack; cx.buf = stack + BDB_IDL_UM_SIZE; cx.op = op; cx.txn = txn; cx.need_sort = 0; cx.depth = 0; if ( cx.prefix == DN_SUBTREE_PREFIX ) { ids[0] = 1; ids[1] = cx.id; } else { BDB_IDL_ZERO( ids ); } if ( cx.ei->bei_state & CACHE_ENTRY_NO_KIDS ) return LDAP_SUCCESS; DBTzero(&cx.key); cx.key.ulen = sizeof(ID); cx.key.size = sizeof(ID); cx.key.flags = DB_DBT_USERMEM; DBTzero(&cx.data); hdb_dn2idl_internal(&cx); if ( cx.need_sort ) { char *ptr = ((char *)&cx.id)-1; if ( !BDB_IDL_IS_RANGE( cx.ids ) && cx.ids[0] > 3 ) bdb_idl_sort( cx.ids, cx.tmp ); cx.key.data = ptr; cx.key.size = sizeof(ID)+1; *ptr = cx.prefix; cx.id = ei->bei_id; if ( cx.bdb->bi_idl_cache_max_size ) bdb_idl_cache_put( cx.bdb, cx.db, &cx.key, cx.ids, cx.rc ); } if ( cx.rc == DB_NOTFOUND ) cx.rc = LDAP_SUCCESS; return cx.rc; }