/** * flush the backend * * @remark this function is called during ham_flush @note This is a B+-tree 'backend' method. */ static ham_status_t my_fun_flush(ham_btree_t *be) { ham_db_t *db=be_get_db(be); db_indexdata_t *indexdata=env_get_indexdata_ptr(db_get_env(db), db_get_indexdata_offset(db)); /* * nothing to do if the backend was not touched */ if (!be_is_dirty(be)) return (0); index_set_max_keys(indexdata, btree_get_maxkeys(be)); index_set_keysize(indexdata, be_get_keysize(be)); index_set_self(indexdata, btree_get_rootpage(be)); index_set_flags(indexdata, be_get_flags(be)); index_set_recno(indexdata, be_get_recno(be)); index_clear_reserved(indexdata); env_set_dirty(db_get_env(db)); be_set_dirty(be, HAM_FALSE); return (0); }
/** * estimate the number of keys per page, given the keysize * * @remark this function is only available when * hamsterdb is compiled with HAM_ENABLE_INTERNAL turned on. * * @note This is a B+-tree 'backend' method. */ static ham_status_t my_fun_calc_keycount_per_page(ham_btree_t *be, ham_size_t *maxkeys, ham_u16_t keysize) { ham_db_t *db=be_get_db(be); if (keysize == 0) { *maxkeys=btree_get_maxkeys(be); } else { /* * prevent overflow - maxkeys only has 16 bit! */ *maxkeys=btree_calc_maxkeys(env_get_pagesize(db_get_env(db)), keysize); if (*maxkeys>MAX_KEYS_PER_NODE) { ham_trace(("keysize/pagesize ratio too high")); return HAM_INV_KEYSIZE; } else if (*maxkeys==0) { ham_trace(("keysize too large for the current pagesize")); return HAM_INV_KEYSIZE; } } return (0); }
static void print_database(ham_db_t *db, ham_u16_t dbname, int full) { ham_btree_t *be; ham_cursor_t *cursor; ham_status_t st; ham_key_t key; ham_record_t rec; unsigned num_items=0, ext_keys=0, min_key_size=0xffffffff, max_key_size=0, min_rec_size=0xffffffff, max_rec_size=0, total_key_size=0, total_rec_size=0; be=(ham_btree_t *)db_get_backend(db); memset(&key, 0, sizeof(key)); memset(&rec, 0, sizeof(rec)); printf("\n"); printf(" database %d (0x%x)\n", (int)dbname, (int)dbname); printf(" max key size: %u\n", be_get_keysize(be)); printf(" max keys per page: %u\n", btree_get_maxkeys(be)); printf(" address of root page: %llu\n", (long long unsigned int)btree_get_rootpage(be)); printf(" flags: 0x%04x\n", db_get_rt_flags(db)); if (!full) return; st=ham_cursor_create(db, 0, 0, &cursor); if (st!=HAM_SUCCESS) error("ham_cursor_create", st); while (1) { st=ham_cursor_move(cursor, &key, &rec, HAM_CURSOR_NEXT); if (st!=HAM_SUCCESS) { /* reached end of the database? */ if (st==HAM_KEY_NOT_FOUND) break; else error("ham_cursor_next", st); } num_items++; if (key.size<min_key_size) min_key_size=key.size; if (key.size>max_key_size) max_key_size=key.size; if (rec.size<min_rec_size) min_rec_size=rec.size; if (rec.size>max_rec_size) max_rec_size=rec.size; if (key.size>db_get_keysize(db)) ext_keys++; total_key_size+=key.size; total_rec_size+=rec.size; } ham_cursor_close(cursor); printf(" number of items: %u\n", num_items); if (num_items==0) return; printf(" average key size: %u\n", total_key_size/num_items); printf(" minimum key size: %u\n", min_key_size); printf(" maximum key size: %u\n", max_key_size); printf(" number of extended keys:%u\n", ext_keys); printf(" total keys (bytes): %u\n", total_key_size); printf(" average record size: %u\n", total_rec_size/num_items); printf(" minimum record size: %u\n", min_rec_size); printf(" maximum record size: %u\n", min_rec_size); printf(" total records (bytes): %u\n", total_rec_size); }
static ham_status_t __insert_in_page(ham_page_t *page, ham_key_t *key, ham_offset_t rid, insert_scratchpad_t *scratchpad, insert_hints_t *hints) { ham_status_t st; ham_size_t maxkeys=btree_get_maxkeys(scratchpad->be); btree_node_t *node=ham_page_get_btree_node(page); ham_assert(maxkeys>1, ("invalid result of db_get_maxkeys(): %d", maxkeys)); ham_assert(hints->force_append == HAM_FALSE, (0)); ham_assert(hints->force_prepend == HAM_FALSE, (0)); /* * prepare the page for modifications */ st=ham_log_add_page_before(page); if (st) return (st); /* * if we can insert the new key without splitting the page: * __insert_nosplit() will do the work for us */ if (btree_node_get_count(node)<maxkeys) { st=__insert_nosplit(page, key, rid, scratchpad->record, scratchpad->cursor, hints); scratchpad->cursor=0; /* don't overwrite cursor if __insert_nosplit is called again */ return (st); } /* * otherwise, we have to split the page. * but BEFORE we split, we check if the key already exists! */ if (btree_node_is_leaf(node)) { ham_s32_t idx; hints->cost++; idx = btree_node_search_by_key(page_get_owner(page), page, key, HAM_FIND_EXACT_MATCH); /* key exists! */ if (idx>=0) { ham_assert((hints->flags & (HAM_DUPLICATE_INSERT_BEFORE |HAM_DUPLICATE_INSERT_AFTER |HAM_DUPLICATE_INSERT_FIRST |HAM_DUPLICATE_INSERT_LAST)) ? (hints->flags & HAM_DUPLICATE) : 1, (0)); if (!(hints->flags & (HAM_OVERWRITE | HAM_DUPLICATE))) return (HAM_DUPLICATE_KEY); st=__insert_nosplit(page, key, rid, scratchpad->record, scratchpad->cursor, hints); /* don't overwrite cursor if __insert_nosplit is called again */ scratchpad->cursor=0; return (st); } } return (__insert_split(page, key, rid, scratchpad, hints)); }
static ham_status_t __append_key(ham_btree_t *be, ham_key_t *key, ham_record_t *record, ham_bt_cursor_t *cursor, insert_hints_t *hints) { ham_status_t st=0; ham_page_t *page; btree_node_t *node; ham_db_t *db; #ifdef HAM_DEBUG if (cursor && !bt_cursor_is_nil(cursor)) { ham_assert(be_get_db(be) == bt_cursor_get_db(cursor), (0)); } #endif db = be_get_db(be); /* * see if we get this btree leaf; if not, revert to regular scan * * As this is a speed-improvement hint re-using recent material, the page * should still sit in the cache, or we're using old info, which should be * discarded. */ st = db_fetch_page(&page, db, hints->leaf_page_addr, DB_ONLY_FROM_CACHE); if (st) return st; if (!page) { hints->force_append = HAM_FALSE; hints->force_prepend = HAM_FALSE; return (__insert_cursor(be, key, record, cursor, hints)); } page_add_ref(page); node=ham_page_get_btree_node(page); ham_assert(btree_node_is_leaf(node), ("iterator points to internal node")); /* * if the page is already full OR this page is not the right-most page * when we APPEND or the left-most node when we PREPEND * OR the new key is not the highest key: perform a normal insert */ if ((hints->force_append && btree_node_get_right(node)) || (hints->force_prepend && btree_node_get_left(node)) || btree_node_get_count(node) >= btree_get_maxkeys(be)) { page_release_ref(page); hints->force_append = HAM_FALSE; hints->force_prepend = HAM_FALSE; return (__insert_cursor(be, key, record, cursor, hints)); } /* * if the page is not empty: check if we append the key at the end / start * (depending on force_append/force_prepend), * or if it's actually inserted in the middle (when neither force_append * or force_prepend is specified: that'd be SEQUENTIAL insertion * hinting somewhere in the middle of the total key range. */ if (btree_node_get_count(node)!=0) { int cmp_hi; int cmp_lo; hints->cost++; if (!hints->force_prepend) { cmp_hi = key_compare_pub_to_int(db, page, key, btree_node_get_count(node)-1); /* key is in the middle */ if (cmp_hi < -1) { page_release_ref(page); return (ham_status_t)cmp_hi; } /* key is at the end */ if (cmp_hi > 0) { if (btree_node_get_right(node)) { /* not at top end of the btree, so we can't do the * fast track */ page_release_ref(page); //hints->flags &= ~HAM_HINT_APPEND; hints->force_append = HAM_FALSE; hints->force_prepend = HAM_FALSE; return (__insert_cursor(be, key, record, cursor, hints)); } hints->force_append = HAM_TRUE; hints->force_prepend = HAM_FALSE; } } else { /* hints->force_prepend is true */ /* not bigger than the right-most node while we * were trying to APPEND */ cmp_hi = -1; } if (!hints->force_append) { cmp_lo = key_compare_pub_to_int(db, page, key, 0); /* in the middle range */ if (cmp_lo < -1) { page_release_ref(page); return ((ham_status_t)cmp_lo); } /* key is at the start of page */ if (cmp_lo < 0) { if (btree_node_get_left(node)) { /* not at bottom end of the btree, so we can't * do the fast track */ page_release_ref(page); //hints->flags &= ~HAM_HINT_PREPEND; hints->force_append = HAM_FALSE; hints->force_prepend = HAM_FALSE; return (__insert_cursor(be, key, record, cursor, hints)); } hints->force_append = HAM_FALSE; hints->force_prepend = HAM_TRUE; } } else { /* hints->force_prepend is true */ /* not smaller than the left-most node while we were * trying to PREPEND */ cmp_lo = +1; } /* handle inserts in the middle range */ if (cmp_lo >= 0 && cmp_hi <= 0) { /* * Depending on where we are in the btree, the current key either * is going to end up in the middle of the given node/page, * OR the given key is out of range of the given leaf node. */ if (hints->force_append || hints->force_prepend) { /* * when prepend or append is FORCED, we are expected to * add keys ONLY at the beginning or end of the btree * key range. Clearly the current key does not fit that * criterium. */ page_release_ref(page); //hints->flags &= ~HAM_HINT_PREPEND; hints->force_append = HAM_FALSE; hints->force_prepend = HAM_FALSE; return (__insert_cursor(be, key, record, cursor, hints)); } /* * we discovered that the key must be inserted in the middle * of the current leaf. * * It does not matter whether the current leaf is at the start or * end of the btree range; as we need to add the key in the middle * of the current leaf, that info alone is enough to continue with * the fast track insert operation. */ ham_assert(!hints->force_prepend && !hints->force_append, (0)); } ham_assert((hints->force_prepend + hints->force_append) < 2, ("Either APPEND or PREPEND flag MAY be set, but not both")); } else { /* empty page: force insertion in slot 0 */ hints->force_append = HAM_FALSE; hints->force_prepend = HAM_TRUE; } /* * the page will be changed - write it to the log (if a log exists) */ st=ham_log_add_page_before(page); if (st) { page_release_ref(page); return (st); } /* * OK - we're really appending/prepending the new key. */ ham_assert(hints->force_append || hints->force_prepend, (0)); st=__insert_nosplit(page, key, 0, record, cursor, hints); page_release_ref(page); return (st); }