/* * BT_BROOT -- Fix up the btree root page after it has been split. * * Parameters: * t: tree * h: root page * l: left page * r: right page * * Returns: * RET_ERROR, RET_SUCCESS */ static int bt_broot(BTREE *t, PAGE *h, PAGE *l, PAGE *r) { BINTERNAL *bi; BLEAF *bl; u_int32_t nbytes; char *dest; /* * If the root page was a leaf page, change it into an internal page. * We copy the key we split on (but not the key's data, in the case of * a leaf page) to the new root page. * * The btree comparison code guarantees that the left-most key on any * level of the tree is never used, so it doesn't need to be filled in. */ nbytes = NBINTERNAL(0); h->linp[0] = h->upper = t->bt_psize - nbytes; dest = (char *)h + h->upper; WR_BINTERNAL(dest, 0, l->pgno, 0); switch (h->flags & P_TYPE) { case P_BLEAF: bl = GETBLEAF(r, 0); nbytes = NBINTERNAL(bl->ksize); __PAST_END(h->linp, 1) = h->upper -= nbytes; dest = (char *)h + h->upper; WR_BINTERNAL(dest, bl->ksize, r->pgno, 0); memmove(dest, bl->bytes, bl->ksize); /* * If the key is on an overflow page, mark the overflow chain * so it isn't deleted when the leaf copy of the key is deleted. */ if (bl->flags & P_BIGKEY && bt_preserve(t, *(pgno_t *)bl->bytes) == RET_ERROR) return (RET_ERROR); break; case P_BINTERNAL: bi = GETBINTERNAL(r, 0); nbytes = NBINTERNAL(bi->ksize); __PAST_END(h->linp, 1) = h->upper -= nbytes; dest = (char *)h + h->upper; memmove(dest, bi, nbytes); ((BINTERNAL *)dest)->pgno = r->pgno; break; default: abort(); } /* There are two keys on the page. */ h->lower = BTDATAOFF + 2 * sizeof(indx_t); /* Unpin the root page, set to btree internal page. */ h->flags &= ~P_TYPE; h->flags |= P_BINTERNAL; mpool_put(t->bt_mp, h, MPOOL_DIRTY); return (RET_SUCCESS); }
/* * __BT_CMP -- Compare a key to a given record. * * Parameters: * t: tree * k1: DBT pointer of first arg to comparison * e: pointer to EPG for comparison * * Returns: * < 0 if k1 is < record * = 0 if k1 is = record * > 0 if k1 is > record */ int __bt_cmp(BTREE *t, const DBT *k1, EPG *e) { BINTERNAL *bi; BLEAF *bl; DBT k2; PAGE *h; void *bigkey; /* * The left-most key on internal pages, at any level of the tree, is * guaranteed by the following code to be less than any user key. * This saves us from having to update the leftmost key on an internal * page when the user inserts a new key in the tree smaller than * anything we've yet seen. */ h = e->page; if (e->index == 0 && h->prevpg == P_INVALID && !(h->flags & P_BLEAF)) return (1); bigkey = NULL; if (h->flags & P_BLEAF) { bl = GETBLEAF(h, e->index); if (bl->flags & P_BIGKEY) bigkey = bl->bytes; else { k2.data = bl->bytes; k2.size = bl->ksize; } } else { bi = GETBINTERNAL(h, e->index); if (bi->flags & P_BIGKEY) bigkey = bi->bytes; else { k2.data = bi->bytes; k2.size = bi->ksize; } } if (bigkey) { if (__ovfl_get(t, bigkey, &k2.size, &t->bt_rdata.data, &t->bt_rdata.size)) return (RET_ERROR); k2.data = t->bt_rdata.data; } return ((*t->bt_cmp)(k1, &k2)); }
/* * __bt_dleaf -- * Delete a single record from a leaf page. * * Parameters: * t: tree * key: referenced key * h: page * idx: index on page to delete * * Returns: * RET_SUCCESS, RET_ERROR. */ int __bt_dleaf(BTREE *t, const DBT *key, PAGE *h, u_int idx) { BLEAF *bl; indx_t cnt, *ip, offset; u_int32_t nbytes; void *to; char *from; /* If this record is referenced by the cursor, delete the cursor. */ if (F_ISSET(&t->bt_cursor, CURS_INIT) && !F_ISSET(&t->bt_cursor, CURS_ACQUIRE) && t->bt_cursor.pg.pgno == h->pgno && t->bt_cursor.pg.index == idx && __bt_curdel(t, key, h, idx)) return (RET_ERROR); /* If the entry uses overflow pages, make them available for reuse. */ to = bl = GETBLEAF(h, idx); if (bl->flags & P_BIGKEY && __ovfl_delete(t, bl->bytes) == RET_ERROR) return (RET_ERROR); if (bl->flags & P_BIGDATA && __ovfl_delete(t, bl->bytes + bl->ksize) == RET_ERROR) return (RET_ERROR); /* Pack the remaining key/data items at the end of the page. */ nbytes = NBLEAF(bl); from = (char *)h + h->upper; memmove(from + nbytes, from, (char *)to - from); h->upper += nbytes; /* Adjust the indices' offsets, shift the indices down. */ offset = h->linp[idx]; for (cnt = idx, ip = &h->linp[0]; cnt--; ++ip) if (ip[0] < offset) ip[0] += nbytes; for (cnt = NEXTINDEX(h) - idx; --cnt; ++ip) ip[0] = ip[1] < offset ? ip[1] + nbytes : ip[1]; h->lower -= sizeof(indx_t); /* If the cursor is on this page, adjust it as necessary. */ if (F_ISSET(&t->bt_cursor, CURS_INIT) && !F_ISSET(&t->bt_cursor, CURS_ACQUIRE) && t->bt_cursor.pg.pgno == h->pgno && t->bt_cursor.pg.index > idx) --t->bt_cursor.pg.index; return (RET_SUCCESS); }
/* * __bt_ret -- * Build return key/data pair. * * Parameters: * t: tree * e: key/data pair to be returned * key: user's key structure (NULL if not to be filled in) * rkey: memory area to hold key * data: user's data structure (NULL if not to be filled in) * rdata: memory area to hold data * copy: always copy the key/data item * * Returns: * RET_SUCCESS, RET_ERROR. */ int __bt_ret(BTREE *t, EPG *e, DBT *key, DBT *rkey, DBT *data, DBT *rdata, int copy) { BLEAF *bl; void *p; bl = GETBLEAF(e->page, e->index); /* * We must copy big keys/data to make them contiguous. Otherwise, * leave the page pinned and don't copy unless the user specified * concurrent access. */ if (key == NULL) goto dataonly; if (bl->flags & P_BIGKEY) { if (__ovfl_get(t, bl->bytes, &key->size, &rkey->data, &rkey->size)) return (RET_ERROR); key->data = rkey->data; } else if (copy || F_ISSET(t, B_DB_LOCK)) { if (bl->ksize > rkey->size) { p = (void *)(rkey->data == NULL ? malloc(bl->ksize) : realloc(rkey->data, bl->ksize)); if (p == NULL) return (RET_ERROR); rkey->data = p; rkey->size = bl->ksize; } memmove(rkey->data, bl->bytes, bl->ksize); key->size = bl->ksize; key->data = rkey->data; } else { key->size = bl->ksize; key->data = bl->bytes; } dataonly: if (data == NULL) return (RET_SUCCESS); if (bl->flags & P_BIGDATA) { if (__ovfl_get(t, bl->bytes + bl->ksize, &data->size, &rdata->data, &rdata->size)) return (RET_ERROR); data->data = rdata->data; } else if (copy || F_ISSET(t, B_DB_LOCK)) { /* Use +1 in case the first record retrieved is 0 length. */ if (bl->dsize + 1 > rdata->size) { p = (void *)(rdata->data == NULL ? malloc(bl->dsize + 1) : realloc(rdata->data, bl->dsize + 1)); if (p == NULL) return (RET_ERROR); rdata->data = p; rdata->size = bl->dsize + 1; } memmove(rdata->data, bl->bytes + bl->ksize, bl->dsize); data->size = bl->dsize; data->data = rdata->data; } else { data->size = bl->dsize; data->data = bl->bytes + bl->ksize; } return (RET_SUCCESS); }
/* * __BT_BPGIN, __BT_BPGOUT -- * Convert host-specific number layout to/from the host-independent * format stored on disk. * * Parameters: * t: tree * pg: page number * h: page to convert */ void __bt_pgin(void *t, pgno_t pg, void *pp) { PAGE *h; indx_t i, top; u_char flags; char *p; if (!F_ISSET(((BTREE *)t), B_NEEDSWAP)) return; if (pg == P_META) { mswap(pp); return; } h = pp; M_32_SWAP(h->pgno); M_32_SWAP(h->prevpg); M_32_SWAP(h->nextpg); M_32_SWAP(h->flags); M_16_SWAP(h->lower); M_16_SWAP(h->upper); top = NEXTINDEX(h); if ((h->flags & P_TYPE) == P_BINTERNAL) for (i = 0; i < top; i++) { M_16_SWAP(h->linp[i]); p = (char *)GETBINTERNAL(h, i); P_32_SWAP(p); p += sizeof(u_int32_t); P_32_SWAP(p); p += sizeof(pgno_t); if (*(u_char *)p & P_BIGKEY) { p += sizeof(u_char); P_32_SWAP(p); p += sizeof(pgno_t); P_32_SWAP(p); } } else if ((h->flags & P_TYPE) == P_BLEAF) for (i = 0; i < top; i++) { M_16_SWAP(h->linp[i]); p = (char *)GETBLEAF(h, i); P_32_SWAP(p); p += sizeof(u_int32_t); P_32_SWAP(p); p += sizeof(u_int32_t); flags = *(u_char *)p; if (flags & (P_BIGKEY | P_BIGDATA)) { p += sizeof(u_char); if (flags & P_BIGKEY) { P_32_SWAP(p); p += sizeof(pgno_t); P_32_SWAP(p); } if (flags & P_BIGDATA) { p += sizeof(u_int32_t); P_32_SWAP(p); p += sizeof(pgno_t); P_32_SWAP(p); } } } }
/* * __BT_SPLIT -- Split the tree. * * Parameters: * t: tree * sp: page to split * key: key to insert * data: data to insert * flags: BIGKEY/BIGDATA flags * ilen: insert length * skip: index to leave open * * Returns: * RET_ERROR, RET_SUCCESS */ int __bt_split(BTREE *t, PAGE *sp, const DBT *key, const DBT *data, int flags, size_t ilen, u_int32_t argskip) { BINTERNAL *bi; BLEAF *bl, *tbl; DBT a, b; EPGNO *parent; PAGE *h, *l, *r, *lchild, *rchild; indx_t nxtindex; u_int16_t skip; u_int32_t n, nbytes, nksize; int parentsplit; char *dest; /* * Split the page into two pages, l and r. The split routines return * a pointer to the page into which the key should be inserted and with * skip set to the offset which should be used. Additionally, l and r * are pinned. */ skip = argskip; h = sp->pgno == P_ROOT ? bt_root(t, sp, &l, &r, &skip, ilen) : bt_page(t, sp, &l, &r, &skip, ilen); if (h == NULL) return (RET_ERROR); /* * Insert the new key/data pair into the leaf page. (Key inserts * always cause a leaf page to split first.) */ h->linp[skip] = h->upper -= ilen; dest = (char *)h + h->upper; if (F_ISSET(t, R_RECNO)) WR_RLEAF(dest, data, flags) else WR_BLEAF(dest, key, data, flags) /* If the root page was split, make it look right. */ if (sp->pgno == P_ROOT && (F_ISSET(t, R_RECNO) ? bt_rroot(t, sp, l, r) : bt_broot(t, sp, l, r)) == RET_ERROR) goto err2; /* * Now we walk the parent page stack -- a LIFO stack of the pages that * were traversed when we searched for the page that split. Each stack * entry is a page number and a page index offset. The offset is for * the page traversed on the search. We've just split a page, so we * have to insert a new key into the parent page. * * If the insert into the parent page causes it to split, may have to * continue splitting all the way up the tree. We stop if the root * splits or the page inserted into didn't have to split to hold the * new key. Some algorithms replace the key for the old page as well * as the new page. We don't, as there's no reason to believe that the * first key on the old page is any better than the key we have, and, * in the case of a key being placed at index 0 causing the split, the * key is unavailable. * * There are a maximum of 5 pages pinned at any time. We keep the left * and right pages pinned while working on the parent. The 5 are the * two children, left parent and right parent (when the parent splits) * and the root page or the overflow key page when calling bt_preserve. * This code must make sure that all pins are released other than the * root page or overflow page which is unlocked elsewhere. */ while ((parent = BT_POP(t)) != NULL) { lchild = l; rchild = r; /* Get the parent page. */ if ((h = mpool_get(t->bt_mp, parent->pgno, 0)) == NULL) goto err2; /* * The new key goes ONE AFTER the index, because the split * was to the right. */ skip = parent->index + 1; /* * Calculate the space needed on the parent page. * * Prefix trees: space hack when inserting into BINTERNAL * pages. Retain only what's needed to distinguish between * the new entry and the LAST entry on the page to its left. * If the keys compare equal, retain the entire key. Note, * we don't touch overflow keys, and the entire key must be * retained for the next-to-left most key on the leftmost * page of each level, or the search will fail. Applicable * ONLY to internal pages that have leaf pages as children. * Further reduction of the key between pairs of internal * pages loses too much information. */ switch (rchild->flags & P_TYPE) { case P_BINTERNAL: bi = GETBINTERNAL(rchild, 0); nbytes = NBINTERNAL(bi->ksize); break; case P_BLEAF: bl = GETBLEAF(rchild, 0); nbytes = NBINTERNAL(bl->ksize); if (t->bt_pfx && !(bl->flags & P_BIGKEY) && (h->prevpg != P_INVALID || skip > 1)) { tbl = GETBLEAF(lchild, NEXTINDEX(lchild) - 1); a.size = tbl->ksize; a.data = tbl->bytes; b.size = bl->ksize; b.data = bl->bytes; nksize = t->bt_pfx(&a, &b); n = NBINTERNAL(nksize); if (n < nbytes) { #ifdef STATISTICS bt_pfxsaved += nbytes - n; #endif nbytes = n; } else nksize = 0; } else nksize = 0; break; case P_RINTERNAL: case P_RLEAF: nbytes = NRINTERNAL; break; default: abort(); } /* Split the parent page if necessary or shift the indices. */ if ((u_int32_t)(h->upper - h->lower) < nbytes + sizeof(indx_t)) { sp = h; h = h->pgno == P_ROOT ? bt_root(t, h, &l, &r, &skip, nbytes) : bt_page(t, h, &l, &r, &skip, nbytes); if (h == NULL) goto err1; parentsplit = 1; } else { if (skip < (nxtindex = NEXTINDEX(h))) memmove(h->linp + skip + 1, h->linp + skip, (nxtindex - skip) * sizeof(indx_t)); h->lower += sizeof(indx_t); parentsplit = 0; } /* Insert the key into the parent page. */ switch (rchild->flags & P_TYPE) { case P_BINTERNAL: h->linp[skip] = h->upper -= nbytes; dest = (char *)h + h->linp[skip]; memmove(dest, bi, nbytes); ((BINTERNAL *)dest)->pgno = rchild->pgno; break; case P_BLEAF: h->linp[skip] = h->upper -= nbytes; dest = (char *)h + h->linp[skip]; WR_BINTERNAL(dest, nksize ? nksize : bl->ksize, rchild->pgno, bl->flags & P_BIGKEY); memmove(dest, bl->bytes, nksize ? nksize : bl->ksize); if (bl->flags & P_BIGKEY) { pgno_t pgno; memcpy(&pgno, bl->bytes, sizeof(pgno)); if (bt_preserve(t, pgno) == RET_ERROR) goto err1; } break; case P_RINTERNAL: /* * Update the left page count. If split * added at index 0, fix the correct page. */ if (skip > 0) dest = (char *)h + h->linp[skip - 1]; else dest = (char *)l + l->linp[NEXTINDEX(l) - 1]; ((RINTERNAL *)dest)->nrecs = rec_total(lchild); ((RINTERNAL *)dest)->pgno = lchild->pgno; /* Update the right page count. */ h->linp[skip] = h->upper -= nbytes; dest = (char *)h + h->linp[skip]; ((RINTERNAL *)dest)->nrecs = rec_total(rchild); ((RINTERNAL *)dest)->pgno = rchild->pgno; break; case P_RLEAF: /* * Update the left page count. If split * added at index 0, fix the correct page. */ if (skip > 0) dest = (char *)h + h->linp[skip - 1]; else dest = (char *)l + l->linp[NEXTINDEX(l) - 1]; ((RINTERNAL *)dest)->nrecs = NEXTINDEX(lchild); ((RINTERNAL *)dest)->pgno = lchild->pgno; /* Update the right page count. */ h->linp[skip] = h->upper -= nbytes; dest = (char *)h + h->linp[skip]; ((RINTERNAL *)dest)->nrecs = NEXTINDEX(rchild); ((RINTERNAL *)dest)->pgno = rchild->pgno; break; default: abort(); } /* Unpin the held pages. */ if (!parentsplit) { mpool_put(t->bt_mp, h, MPOOL_DIRTY); break; } /* If the root page was split, make it look right. */ if (sp->pgno == P_ROOT && (F_ISSET(t, R_RECNO) ? bt_rroot(t, sp, l, r) : bt_broot(t, sp, l, r)) == RET_ERROR) goto err1; mpool_put(t->bt_mp, lchild, MPOOL_DIRTY); mpool_put(t->bt_mp, rchild, MPOOL_DIRTY); } /* Unpin the held pages. */ mpool_put(t->bt_mp, l, MPOOL_DIRTY); mpool_put(t->bt_mp, r, MPOOL_DIRTY); /* Clear any pages left on the stack. */ return (RET_SUCCESS); /* * If something fails in the above loop we were already walking back * up the tree and the tree is now inconsistent. Nothing much we can * do about it but release any memory we're holding. */ err1: mpool_put(t->bt_mp, lchild, MPOOL_DIRTY); mpool_put(t->bt_mp, rchild, MPOOL_DIRTY); err2: mpool_put(t->bt_mp, l, 0); mpool_put(t->bt_mp, r, 0); __dbpanic(t->bt_dbp); return (RET_ERROR); }
/* * BT_PSPLIT -- Do the real work of splitting the page. * * Parameters: * t: tree * h: page to be split * l: page to put lower half of data * r: page to put upper half of data * pskip: pointer to index to leave open * ilen: insert length * * Returns: * Pointer to page in which to insert. */ static PAGE * bt_psplit(BTREE *t, PAGE *h, PAGE *l, PAGE *r, indx_t *pskip, size_t ilen) { BINTERNAL *bi; BLEAF *bl; CURSOR *c; RLEAF *rl; PAGE *rval; void *src; indx_t full, half, nxt, off, skip, top, used; u_int32_t nbytes; int bigkeycnt, isbigkey; /* * Split the data to the left and right pages. Leave the skip index * open. Additionally, make some effort not to split on an overflow * key. This makes internal page processing faster and can save * space as overflow keys used by internal pages are never deleted. */ bigkeycnt = 0; skip = *pskip; full = t->bt_psize - BTDATAOFF; half = full / 2; used = 0; for (nxt = off = 0, top = NEXTINDEX(h); nxt < top; ++off) { if (skip == off) { nbytes = ilen; isbigkey = 0; /* XXX: not really known. */ } else switch (h->flags & P_TYPE) { case P_BINTERNAL: src = bi = GETBINTERNAL(h, nxt); nbytes = NBINTERNAL(bi->ksize); isbigkey = bi->flags & P_BIGKEY; break; case P_BLEAF: src = bl = GETBLEAF(h, nxt); nbytes = NBLEAF(bl); isbigkey = bl->flags & P_BIGKEY; break; case P_RINTERNAL: src = GETRINTERNAL(h, nxt); nbytes = NRINTERNAL; isbigkey = 0; break; case P_RLEAF: src = rl = GETRLEAF(h, nxt); nbytes = NRLEAF(rl); isbigkey = 0; break; default: abort(); } /* * If the key/data pairs are substantial fractions of the max * possible size for the page, it's possible to get situations * where we decide to try and copy too much onto the left page. * Make sure that doesn't happen. */ if ((skip <= off && used + nbytes + sizeof(indx_t) >= full) || nxt == top - 1) { --off; break; } /* Copy the key/data pair, if not the skipped index. */ if (skip != off) { ++nxt; l->linp[off] = l->upper -= nbytes; memmove((char *)l + l->upper, src, nbytes); } used += nbytes + sizeof(indx_t); if (used >= half) { if (!isbigkey || bigkeycnt == 3) break; else ++bigkeycnt; } } /* * Off is the last offset that's valid for the left page. * Nxt is the first offset to be placed on the right page. */ l->lower += (off + 1) * sizeof(indx_t); /* * If splitting the page that the cursor was on, the cursor has to be * adjusted to point to the same record as before the split. If the * cursor is at or past the skipped slot, the cursor is incremented by * one. If the cursor is on the right page, it is decremented by the * number of records split to the left page. */ c = &t->bt_cursor; if (F_ISSET(c, CURS_INIT) && c->pg.pgno == h->pgno) { if (c->pg.index >= skip) ++c->pg.index; if (c->pg.index < nxt) /* Left page. */ c->pg.pgno = l->pgno; else { /* Right page. */ c->pg.pgno = r->pgno; c->pg.index -= nxt; } } /* * If the skipped index was on the left page, just return that page. * Otherwise, adjust the skip index to reflect the new position on * the right page. */ if (skip <= off) { skip = MAX_PAGE_OFFSET; rval = l; } else { rval = r; *pskip -= nxt; } for (off = 0; nxt < top; ++off) { if (skip == nxt) { ++off; skip = MAX_PAGE_OFFSET; } switch (h->flags & P_TYPE) { case P_BINTERNAL: src = bi = GETBINTERNAL(h, nxt); nbytes = NBINTERNAL(bi->ksize); break; case P_BLEAF: src = bl = GETBLEAF(h, nxt); nbytes = NBLEAF(bl); break; case P_RINTERNAL: src = GETRINTERNAL(h, nxt); nbytes = NRINTERNAL; break; case P_RLEAF: src = rl = GETRLEAF(h, nxt); nbytes = NRLEAF(rl); break; default: abort(); } ++nxt; r->linp[off] = r->upper -= nbytes; memmove((char *)r + r->upper, src, nbytes); } r->lower += off * sizeof(indx_t); /* If the key is being appended to the page, adjust the index. */ if (skip == top) r->lower += sizeof(indx_t); return (rval); }
/* * BT_DPAGE -- Dump the page * * Parameters: * h: pointer to the PAGE */ void __bt_dpage(PAGE *h) { BINTERNAL *bi; BLEAF *bl; RINTERNAL *ri; RLEAF *rl; indx_t cur, top; char *sep; (void)fprintf(stderr, " page %u: (", h->pgno); #undef X #define X(flag, name) \ if (h->flags & flag) { \ (void)fprintf(stderr, "%s%s", sep, name); \ sep = ", "; \ } sep = ""; X(P_BINTERNAL, "BINTERNAL") /* types */ X(P_BLEAF, "BLEAF") X(P_RINTERNAL, "RINTERNAL") /* types */ X(P_RLEAF, "RLEAF") X(P_OVERFLOW, "OVERFLOW") X(P_PRESERVE, "PRESERVE"); (void)fprintf(stderr, ")\n"); #undef X (void)fprintf(stderr, "\tprev %2u next %2u", h->prevpg, h->nextpg); if (h->flags & P_OVERFLOW) return; top = NEXTINDEX(h); (void)fprintf(stderr, " lower %3d upper %3d nextind %d\n", h->lower, h->upper, top); for (cur = 0; cur < top; cur++) { (void)fprintf(stderr, "\t[%03d] %4d ", cur, h->linp[cur]); switch (h->flags & P_TYPE) { case P_BINTERNAL: bi = GETBINTERNAL(h, cur); (void)fprintf(stderr, "size %03d pgno %03d", bi->ksize, bi->pgno); if (bi->flags & P_BIGKEY) (void)fprintf(stderr, " (indirect)"); else if (bi->ksize) (void)fprintf(stderr, " {%.*s}", (int)bi->ksize, bi->bytes); break; case P_RINTERNAL: ri = GETRINTERNAL(h, cur); (void)fprintf(stderr, "entries %03d pgno %03d", ri->nrecs, ri->pgno); break; case P_BLEAF: bl = GETBLEAF(h, cur); if (bl->flags & P_BIGKEY) (void)fprintf(stderr, "big key page %u size %u/", *(pgno_t *)bl->bytes, *(u_int32_t *)(bl->bytes + sizeof(pgno_t))); else if (bl->ksize) (void)fprintf(stderr, "%s/", bl->bytes); if (bl->flags & P_BIGDATA) (void)fprintf(stderr, "big data page %u size %u", *(pgno_t *)(bl->bytes + bl->ksize), *(u_int32_t *)(bl->bytes + bl->ksize + sizeof(pgno_t))); else if (bl->dsize) (void)fprintf(stderr, "%.*s", (int)bl->dsize, bl->bytes + bl->ksize); break; case P_RLEAF: rl = GETRLEAF(h, cur); if (rl->flags & P_BIGDATA) (void)fprintf(stderr, "big data page %u size %u", *(pgno_t *)rl->bytes, *(u_int32_t *)(rl->bytes + sizeof(pgno_t))); else if (rl->dsize) (void)fprintf(stderr, "%.*s", (int)rl->dsize, rl->bytes); break; } (void)fprintf(stderr, "\n"); } }