int bdb_idl_fetch_key( BackendDB *be, DB *db, DB_TXN *txn, DBT *key, ID *ids, DBC **saved_cursor, int get_flag ) { struct bdb_info *bdb = (struct bdb_info *) be->be_private; int rc; DBT data, key2, *kptr; DBC *cursor; ID *i; void *ptr; size_t len; int rc2; int flags = bdb->bi_db_opflags | DB_MULTIPLE; int opflag; /* If using BerkeleyDB 4.0, the buf must be large enough to * grab the entire IDL in one get(), otherwise BDB will leak * resources on subsequent get's. We can safely call get() * twice - once for the data, and once to get the DB_NOTFOUND * result meaning there's no more data. See ITS#2040 for details. * This bug is fixed in BDB 4.1 so a smaller buffer will work if * stack space is too limited. * * configure now requires Berkeley DB 4.1. */ #if DB_VERSION_FULL < 0x04010000 # define BDB_ENOUGH 5 #else /* We sometimes test with tiny IDLs, and BDB always wants buffers * that are at least one page in size. */ # if BDB_IDL_DB_SIZE < 4096 # define BDB_ENOUGH 2048 # else # define BDB_ENOUGH 1 # endif #endif ID buf[BDB_IDL_DB_SIZE*BDB_ENOUGH]; char keybuf[16]; Debug( LDAP_DEBUG_ARGS, "bdb_idl_fetch_key: %s\n", bdb_show_key( key, keybuf ), 0, 0 ); assert( ids != NULL ); if ( saved_cursor && *saved_cursor ) { opflag = DB_NEXT; } else if ( get_flag == LDAP_FILTER_GE ) { opflag = DB_SET_RANGE; } else if ( get_flag == LDAP_FILTER_LE ) { opflag = DB_FIRST; } else { opflag = DB_SET; } /* only non-range lookups can use the IDL cache */ if ( bdb->bi_idl_cache_size && opflag == DB_SET ) { rc = bdb_idl_cache_get( bdb, db, key, ids ); if ( rc != LDAP_NO_SUCH_OBJECT ) return rc; } DBTzero( &data ); data.data = buf; data.ulen = sizeof(buf); data.flags = DB_DBT_USERMEM; /* If we're not reusing an existing cursor, get a new one */ if( opflag != DB_NEXT ) { rc = db->cursor( db, txn, &cursor, bdb->bi_db_opflags ); if( rc != 0 ) { Debug( LDAP_DEBUG_ANY, "=> bdb_idl_fetch_key: " "cursor failed: %s (%d)\n", db_strerror(rc), rc, 0 ); return rc; } } else { cursor = *saved_cursor; } /* If this is a LE lookup, save original key so we can determine * when to stop. If this is a GE lookup, save the key since it * will be overwritten. */ if ( get_flag == LDAP_FILTER_LE || get_flag == LDAP_FILTER_GE ) { DBTzero( &key2 ); key2.flags = DB_DBT_USERMEM; key2.ulen = sizeof(keybuf); key2.data = keybuf; key2.size = key->size; AC_MEMCPY( keybuf, key->data, key->size ); kptr = &key2; } else { kptr = key; } len = key->size; rc = cursor->c_get( cursor, kptr, &data, flags | opflag ); /* skip presence key on range inequality lookups */ while (rc == 0 && kptr->size != len) { rc = cursor->c_get( cursor, kptr, &data, flags | DB_NEXT_NODUP ); } /* If we're doing a LE compare and the new key is greater than * our search key, we're done */ if (rc == 0 && get_flag == LDAP_FILTER_LE && memcmp( kptr->data, key->data, key->size ) > 0 ) { rc = DB_NOTFOUND; } if (rc == 0) { i = ids; while (rc == 0) { u_int8_t *j; DB_MULTIPLE_INIT( ptr, &data ); while (ptr) { DB_MULTIPLE_NEXT(ptr, &data, j, len); if (j) { ++i; BDB_DISK2ID( j, i ); } } rc = cursor->c_get( cursor, key, &data, flags | DB_NEXT_DUP ); } if ( rc == DB_NOTFOUND ) rc = 0; ids[0] = i - ids; /* On disk, a range is denoted by 0 in the first element */ if (ids[1] == 0) { if (ids[0] != BDB_IDL_RANGE_SIZE) { Debug( LDAP_DEBUG_ANY, "=> bdb_idl_fetch_key: " "range size mismatch: expected %d, got %ld\n", BDB_IDL_RANGE_SIZE, ids[0], 0 ); cursor->c_close( cursor ); return -1; } BDB_IDL_RANGE( ids, ids[2], ids[3] ); } data.size = BDB_IDL_SIZEOF(ids); } if ( saved_cursor && rc == 0 ) { if ( !*saved_cursor ) *saved_cursor = cursor; rc2 = 0; } else rc2 = cursor->c_close( cursor ); if (rc2) { Debug( LDAP_DEBUG_ANY, "=> bdb_idl_fetch_key: " "close failed: %s (%d)\n", db_strerror(rc2), rc2, 0 ); return rc2; } if( rc == DB_NOTFOUND ) { return rc; } else if( rc != 0 ) { Debug( LDAP_DEBUG_ANY, "=> bdb_idl_fetch_key: " "get failed: %s (%d)\n", db_strerror(rc), rc, 0 ); return rc; } else if ( data.size == 0 || data.size % sizeof( ID ) ) { /* size not multiple of ID size */ Debug( LDAP_DEBUG_ANY, "=> bdb_idl_fetch_key: " "odd size: expected %ld multiple, got %ld\n", (long) sizeof( ID ), (long) data.size, 0 ); return -1; } else if ( data.size != BDB_IDL_SIZEOF(ids) ) { /* size mismatch */ Debug( LDAP_DEBUG_ANY, "=> bdb_idl_fetch_key: " "get size mismatch: expected %ld, got %ld\n", (long) ((1 + ids[0]) * sizeof( ID )), (long) data.size, 0 ); return -1; } if ( bdb->bi_idl_cache_max_size ) { bdb_idl_cache_put( bdb, db, key, ids, rc ); } return rc; }
int bdb_idl_fetch_key( BackendDB *be, DB *db, DB_TXN *tid, DBT *key, ID *ids ) { struct bdb_info *bdb = (struct bdb_info *) be->be_private; int rc; DBT data; DBC *cursor; ID *i; void *ptr; size_t len; int rc2; int flags = bdb->bi_db_opflags | DB_MULTIPLE; #ifdef SLAP_IDL_CACHE bdb_idl_cache_entry_t idl_tmp; #endif /* If using BerkeleyDB 4.0, the buf must be large enough to * grab the entire IDL in one get(), otherwise BDB will leak * resources on subsequent get's. We can safely call get() * twice - once for the data, and once to get the DB_NOTFOUND * result meaning there's no more data. See ITS#2040 for details. * This bug is fixed in BDB 4.1 so a smaller buffer will work if * stack space is too limited. * * configure now requires Berkeley DB 4.1. */ #if (DB_VERSION_MAJOR == 4) && (DB_VERSION_MINOR == 0) # define BDB_ENOUGH 5 #else # define BDB_ENOUGH 1 #endif ID buf[BDB_IDL_DB_SIZE*BDB_ENOUGH]; char keybuf[16]; #ifdef NEW_LOGGING LDAP_LOG( INDEX, ARGS, "bdb_idl_fetch_key: %s\n", bdb_show_key( key, keybuf ), 0, 0 ); #else Debug( LDAP_DEBUG_ARGS, "bdb_idl_fetch_key: %s\n", bdb_show_key( key, keybuf ), 0, 0 ); #endif assert( ids != NULL ); #ifdef SLAP_IDL_CACHE if ( bdb->bi_idl_cache_max_size ) { bdb_idl_cache_entry_t *matched_idl_entry; DBT2bv( key, &idl_tmp.kstr ); idl_tmp.db = db; ldap_pvt_thread_mutex_lock( &bdb->bi_idl_tree_mutex ); matched_idl_entry = avl_find( bdb->bi_idl_tree, &idl_tmp, bdb_idl_entry_cmp ); if ( matched_idl_entry != NULL ) { BDB_IDL_CPY( ids, matched_idl_entry->idl ); IDL_LRU_DELETE( bdb, matched_idl_entry ); IDL_LRU_ADD( bdb, matched_idl_entry ); ldap_pvt_thread_mutex_unlock( &bdb->bi_idl_tree_mutex ); return LDAP_SUCCESS; } ldap_pvt_thread_mutex_unlock( &bdb->bi_idl_tree_mutex ); } #endif DBTzero( &data ); data.data = buf; data.ulen = sizeof(buf); data.flags = DB_DBT_USERMEM; if ( tid ) flags |= DB_RMW; rc = db->cursor( db, tid, &cursor, bdb->bi_db_opflags ); if( rc != 0 ) { #ifdef NEW_LOGGING LDAP_LOG( INDEX, ERR, "bdb_idl_fetch_key: cursor failed: %s (%d)\n", db_strerror(rc), rc, 0 ); #else Debug( LDAP_DEBUG_ANY, "=> bdb_idl_fetch_key: " "cursor failed: %s (%d)\n", db_strerror(rc), rc, 0 ); #endif return rc; } rc = cursor->c_get( cursor, key, &data, flags | DB_SET ); if (rc == 0) { i = ids; while (rc == 0) { u_int8_t *j; DB_MULTIPLE_INIT( ptr, &data ); while (ptr) { DB_MULTIPLE_NEXT(ptr, &data, j, len); if (j) { ++i; AC_MEMCPY( i, j, sizeof(ID) ); } } rc = cursor->c_get( cursor, key, &data, flags | DB_NEXT_DUP ); } if ( rc == DB_NOTFOUND ) rc = 0; ids[0] = i - ids; /* On disk, a range is denoted by 0 in the first element */ if (ids[1] == 0) { if (ids[0] != BDB_IDL_RANGE_SIZE) { #ifdef NEW_LOGGING LDAP_LOG( INDEX, ERR, "=> bdb_idl_fetch_key: range size mismatch: " "expected %ld, got %ld\n", BDB_IDL_RANGE_SIZE, ids[0], 0 ); #else Debug( LDAP_DEBUG_ANY, "=> bdb_idl_fetch_key: " "range size mismatch: expected %d, got %ld\n", BDB_IDL_RANGE_SIZE, ids[0], 0 ); #endif cursor->c_close( cursor ); return -1; } BDB_IDL_RANGE( ids, ids[2], ids[3] ); } data.size = BDB_IDL_SIZEOF(ids); } rc2 = cursor->c_close( cursor ); if (rc2) { #ifdef NEW_LOGGING LDAP_LOG( INDEX, ERR, "bdb_idl_fetch_key: close failed: %s (%d)\n", db_strerror(rc2), rc2, 0 ); #else Debug( LDAP_DEBUG_ANY, "=> bdb_idl_fetch_key: " "close failed: %s (%d)\n", db_strerror(rc2), rc2, 0 ); #endif return rc2; } if( rc == DB_NOTFOUND ) { return rc; } else if( rc != 0 ) { #ifdef NEW_LOGGING LDAP_LOG( INDEX, ERR, "bdb_idl_fetch_key: get failed: %s (%d)\n", db_strerror(rc), rc, 0 ); #else Debug( LDAP_DEBUG_ANY, "=> bdb_idl_fetch_key: " "get failed: %s (%d)\n", db_strerror(rc), rc, 0 ); #endif return rc; } else if ( data.size == 0 || data.size % sizeof( ID ) ) { /* size not multiple of ID size */ #ifdef NEW_LOGGING LDAP_LOG( INDEX, ERR, "bdb_idl_fetch_key: odd size: expected %ld multiple, got %ld\n", (long) sizeof( ID ), (long) data.size, 0 ); #else Debug( LDAP_DEBUG_ANY, "=> bdb_idl_fetch_key: " "odd size: expected %ld multiple, got %ld\n", (long) sizeof( ID ), (long) data.size, 0 ); #endif return -1; } else if ( data.size != BDB_IDL_SIZEOF(ids) ) { /* size mismatch */ #ifdef NEW_LOGGING LDAP_LOG( INDEX, ERR, "bdb_idl_fetch_key: get size mismatch: expected %ld, got %ld\n", (long) ((1 + ids[0]) * sizeof( ID )), (long) data.size, 0 ); #else Debug( LDAP_DEBUG_ANY, "=> bdb_idl_fetch_key: " "get size mismatch: expected %ld, got %ld\n", (long) ((1 + ids[0]) * sizeof( ID )), (long) data.size, 0 ); #endif return -1; } #ifdef SLAP_IDL_CACHE if ( bdb->bi_idl_cache_max_size ) { bdb_idl_cache_entry_t *ee; ee = (bdb_idl_cache_entry_t *) ch_malloc( sizeof( bdb_idl_cache_entry_t ) ); ee->db = db; ee->idl = (ID*) ch_malloc( BDB_IDL_SIZEOF ( ids ) ); ee->idl_lru_prev = NULL; ee->idl_lru_next = NULL; BDB_IDL_CPY( ee->idl, ids ); ber_dupbv( &ee->kstr, &idl_tmp.kstr ); ldap_pvt_thread_mutex_lock( &bdb->bi_idl_tree_mutex ); if ( avl_insert( &bdb->bi_idl_tree, (caddr_t) ee, bdb_idl_entry_cmp, avl_dup_error )) { ch_free( ee->kstr.bv_val ); ch_free( ee->idl ); ch_free( ee ); } else { IDL_LRU_ADD( bdb, ee ); if ( ++bdb->bi_idl_cache_size > bdb->bi_idl_cache_max_size ) { int i = 0; while ( bdb->bi_idl_lru_tail != NULL && i < 10 ) { ee = bdb->bi_idl_lru_tail; if ( avl_delete( &bdb->bi_idl_tree, (caddr_t) ee, bdb_idl_entry_cmp ) == NULL ) { #ifdef NEW_LOGGING LDAP_LOG( INDEX, ERR, "bdb_idl_fetch_key: AVL delete failed\n", 0, 0, 0 ); #else Debug( LDAP_DEBUG_ANY, "=> bdb_idl_fetch_key: " "AVL delete failed\n", 0, 0, 0 ); #endif } IDL_LRU_DELETE( bdb, ee ); i++; --bdb->bi_idl_cache_size; ch_free( ee->kstr.bv_val ); ch_free( ee->idl ); ch_free( ee ); } } } ldap_pvt_thread_mutex_unlock( &bdb->bi_idl_tree_mutex ); } #endif return rc; }