/* * __clsm_close_bulk -- * WT_CURSOR->close method for LSM bulk cursors. */ static int __clsm_close_bulk(WT_CURSOR *cursor) { WT_CURSOR_LSM *clsm; WT_CURSOR *bulk_cursor; WT_LSM_TREE *lsm_tree; WT_SESSION_IMPL *session; clsm = (WT_CURSOR_LSM *)cursor; lsm_tree = clsm->lsm_tree; session = (WT_SESSION_IMPL *)clsm->iface.session; /* Close the bulk cursor to ensure the chunk is written to disk. */ bulk_cursor = clsm->cursors[0]; WT_RET(bulk_cursor->close(bulk_cursor)); clsm->cursors[0] = NULL; clsm->nchunks = 0; /* Set ondisk, and flush the metadata */ F_SET(lsm_tree->chunk[0], WT_LSM_CHUNK_ONDISK); WT_RET(__wt_lsm_meta_write(session, lsm_tree)); ++lsm_tree->dsk_gen; /* Close the LSM cursor */ WT_RET(__wt_clsm_close(cursor)); return (0); }
/* * __clsm_close_bulk -- * WT_CURSOR->close method for LSM bulk cursors. */ static int __clsm_close_bulk(WT_CURSOR *cursor) { WT_CURSOR_LSM *clsm; WT_CURSOR *bulk_cursor; WT_LSM_CHUNK *chunk; WT_LSM_TREE *lsm_tree; WT_SESSION_IMPL *session; uint64_t avg_chunks, total_chunks; clsm = (WT_CURSOR_LSM *)cursor; lsm_tree = clsm->lsm_tree; chunk = lsm_tree->chunk[0]; session = (WT_SESSION_IMPL *)clsm->iface.session; /* Close the bulk cursor to ensure the chunk is written to disk. */ bulk_cursor = clsm->cursors[0]; WT_RET(bulk_cursor->close(bulk_cursor)); clsm->cursors[0] = NULL; clsm->nchunks = 0; /* Set ondisk, and flush the metadata */ F_SET(chunk, WT_LSM_CHUNK_ONDISK); /* * Setup a generation in our chunk based on how many chunk_size * pieces fit into a chunk of a given generation. This allows future * LSM merges choose reasonable sets of chunks. */ avg_chunks = (lsm_tree->merge_min + lsm_tree->merge_max) / 2; for (total_chunks = chunk->size / lsm_tree->chunk_size; total_chunks > 1; total_chunks /= avg_chunks) ++chunk->generation; WT_RET(__wt_lsm_meta_write(session, lsm_tree)); ++lsm_tree->dsk_gen; /* Close the LSM cursor */ WT_RET(__wt_clsm_close(cursor)); return (0); }