/* * __wt_las_stats_update -- * Update the lookaside table statistics for return to the application. */ void __wt_las_stats_update(WT_SESSION_IMPL *session) { WT_CONNECTION_IMPL *conn; WT_CONNECTION_STATS **cstats; WT_DSRC_STATS **dstats; conn = S2C(session); /* * Lookaside table statistics are copied from the underlying lookaside * table data-source statistics. If there's no lookaside table, values * remain 0. */ if (!F_ISSET(conn, WT_CONN_LAS_OPEN)) return; /* * We have a cursor, and we need the underlying data handle; we can get * to it by way of the underlying btree handle, but it's a little ugly. */ cstats = conn->stats; dstats = ((WT_CURSOR_BTREE *) conn->las_session->las_cursor)->btree->dhandle->stats; WT_STAT_SET(session, cstats, cache_lookaside_insert, WT_STAT_READ(dstats, cursor_insert)); WT_STAT_SET(session, cstats, cache_lookaside_remove, WT_STAT_READ(dstats, cursor_remove)); }
/* * __stat_tree_walk -- * Gather btree statistics that require traversing the tree. */ static int __stat_tree_walk(WT_SESSION_IMPL *session) { WT_BTREE *btree; WT_DECL_RET; WT_DSRC_STATS **stats; WT_REF *next_walk; btree = S2BT(session); stats = btree->dhandle->stats; /* * Clear the statistics we're about to count. */ WT_STAT_SET(session, stats, btree_column_deleted, 0); WT_STAT_SET(session, stats, btree_column_fix, 0); WT_STAT_SET(session, stats, btree_column_internal, 0); WT_STAT_SET(session, stats, btree_column_rle, 0); WT_STAT_SET(session, stats, btree_column_variable, 0); WT_STAT_SET(session, stats, btree_entries, 0); WT_STAT_SET(session, stats, btree_overflow, 0); WT_STAT_SET(session, stats, btree_row_internal, 0); WT_STAT_SET(session, stats, btree_row_leaf, 0); next_walk = NULL; while ((ret = __wt_tree_walk( session, &next_walk, 0)) == 0 && next_walk != NULL) { WT_WITH_PAGE_INDEX(session, ret = __stat_page(session, next_walk->page, stats)); WT_RET(ret); } return (ret == WT_NOTFOUND ? 0 : ret); }
/* * __wt_btree_stat_init -- * Initialize the Btree statistics. */ int __wt_btree_stat_init(WT_SESSION_IMPL *session, WT_CURSOR_STAT *cst) { WT_BM *bm; WT_BTREE *btree; WT_DSRC_STATS **stats; btree = S2BT(session); bm = btree->bm; stats = btree->dhandle->stats; WT_RET(bm->stat(bm, session, stats[0])); WT_STAT_SET(session, stats, btree_fixed_len, btree->bitcnt); WT_STAT_SET(session, stats, btree_maximum_depth, btree->maximum_depth); WT_STAT_SET(session, stats, btree_maxintlkey, btree->maxintlkey); WT_STAT_SET(session, stats, btree_maxintlpage, btree->maxintlpage); WT_STAT_SET(session, stats, btree_maxleafkey, btree->maxleafkey); WT_STAT_SET(session, stats, btree_maxleafpage, btree->maxleafpage); WT_STAT_SET(session, stats, btree_maxleafvalue, btree->maxleafvalue); WT_STAT_SET(session, stats, cache_bytes_inuse, __wt_btree_bytes_inuse(session)); if (F_ISSET(cst, WT_STAT_TYPE_CACHE_WALK)) __wt_curstat_cache_walk(session); if (F_ISSET(cst, WT_STAT_TYPE_TREE_WALK)) WT_RET(__stat_tree_walk(session)); return (0); }
/* * __wt_txn_stats_update -- * Update the transaction statistics for return to the application. */ void __wt_txn_stats_update(WT_SESSION_IMPL *session) { WT_TXN_GLOBAL *txn_global; WT_CONNECTION_IMPL *conn; WT_CONNECTION_STATS **stats; uint64_t checkpoint_pinned, snapshot_pinned; conn = S2C(session); txn_global = &conn->txn_global; stats = conn->stats; checkpoint_pinned = txn_global->checkpoint_pinned; snapshot_pinned = txn_global->nsnap_oldest_id; WT_STAT_SET(session, stats, txn_pinned_range, txn_global->current - txn_global->oldest_id); WT_STAT_SET(session, stats, txn_pinned_snapshot_range, snapshot_pinned == WT_TXN_NONE ? 0 : txn_global->current - snapshot_pinned); WT_STAT_SET(session, stats, txn_pinned_checkpoint_range, checkpoint_pinned == WT_TXN_NONE ? 0 : txn_global->current - checkpoint_pinned); WT_STAT_SET( session, stats, txn_checkpoint_time_max, conn->ckpt_time_max); WT_STAT_SET( session, stats, txn_checkpoint_time_min, conn->ckpt_time_min); WT_STAT_SET( session, stats, txn_checkpoint_time_recent, conn->ckpt_time_recent); WT_STAT_SET( session, stats, txn_checkpoint_time_total, conn->ckpt_time_total); }
/* * __wt_las_stats_update -- * Update the lookaside table statistics for return to the application. */ void __wt_las_stats_update(WT_SESSION_IMPL *session) { WT_CONNECTION_IMPL *conn; WT_CONNECTION_STATS **cstats; WT_DSRC_STATS **dstats; int64_t v; conn = S2C(session); /* * Lookaside table statistics are copied from the underlying lookaside * table data-source statistics. If there's no lookaside table, values * remain 0. */ if (!F_ISSET(conn, WT_CONN_LAS_OPEN)) return; /* * We have a cursor, and we need the underlying data handle; we can get * to it by way of the underlying btree handle, but it's a little ugly. */ cstats = conn->stats; dstats = ((WT_CURSOR_BTREE *) conn->las_session->las_cursor)->btree->dhandle->stats; v = WT_STAT_READ(dstats, cursor_insert); WT_STAT_SET(session, cstats, cache_lookaside_insert, v); v = WT_STAT_READ(dstats, cursor_remove); WT_STAT_SET(session, cstats, cache_lookaside_remove, v); /* * If we're clearing stats we need to clear the cursor values we just * read. This does not clear the rest of the statistics in the * lookaside data source stat cursor, but we own that namespace so we * don't have to worry about users seeing inconsistent data source * information. */ if (FLD_ISSET(conn->stat_flags, WT_CONN_STAT_CLEAR)) { WT_STAT_SET(session, dstats, cursor_insert, 0); WT_STAT_SET(session, dstats, cursor_remove, 0); } }
/* * __wt_txn_stats_update -- * Update the transaction statistics for return to the application. */ void __wt_txn_stats_update(WT_SESSION_IMPL *session) { WT_TXN_GLOBAL *txn_global; WT_CONNECTION_IMPL *conn; WT_CONNECTION_STATS *stats; uint64_t checkpoint_pinned; conn = S2C(session); txn_global = &conn->txn_global; stats = &conn->stats; checkpoint_pinned = txn_global->checkpoint_pinned; WT_STAT_SET(stats, txn_pinned_range, txn_global->current - txn_global->oldest_id); WT_STAT_SET(stats, txn_pinned_checkpoint_range, checkpoint_pinned == WT_TXN_NONE ? 0 : txn_global->current - checkpoint_pinned); }
/* * __wt_btree_stat_init -- * Initialize the Btree statistics. */ int __wt_btree_stat_init(WT_SESSION_IMPL *session, uint32_t flags) { WT_BM *bm; WT_BTREE *btree; WT_DECL_RET; WT_DSRC_STATS *stats; WT_PAGE *page; btree = S2BT(session); bm = btree->bm; stats = &btree->dhandle->stats; WT_RET(bm->stat(bm, session, stats)); WT_STAT_SET(stats, btree_fixed_len, btree->bitcnt); WT_STAT_SET(stats, btree_maximum_depth, btree->maximum_depth); WT_STAT_SET(stats, btree_maxintlitem, btree->maxintlitem); WT_STAT_SET(stats, btree_maxintlpage, btree->maxintlpage); WT_STAT_SET(stats, btree_maxleafitem, btree->maxleafitem); WT_STAT_SET(stats, btree_maxleafpage, btree->maxleafpage); page = NULL; if (LF_ISSET(WT_STATISTICS_FAST)) return (0); while ((ret = __wt_tree_walk(session, &page, 0)) == 0 && page != NULL) WT_RET(__stat_page(session, page, stats)); return (ret == WT_NOTFOUND ? 0 : ret); }
/* * __wt_cache_stats_update -- * Update the cache statistics for return to the application. */ void __wt_cache_stats_update(WT_SESSION_IMPL *session) { WT_CACHE *cache; WT_CONNECTION_IMPL *conn; WT_CONNECTION_STATS **stats; uint64_t inuse, leaf, used; conn = S2C(session); cache = conn->cache; stats = conn->stats; inuse = __wt_cache_bytes_inuse(cache); /* * There are races updating the different cache tracking values so * be paranoid calculating the leaf byte usage. */ used = cache->bytes_overflow + cache->bytes_internal; leaf = inuse > used ? inuse - used : 0; WT_STAT_SET(session, stats, cache_bytes_max, conn->cache_size); WT_STAT_SET(session, stats, cache_bytes_inuse, inuse); WT_STAT_SET(session, stats, cache_overhead, cache->overhead_pct); WT_STAT_SET( session, stats, cache_pages_inuse, __wt_cache_pages_inuse(cache)); WT_STAT_SET( session, stats, cache_bytes_dirty, __wt_cache_dirty_inuse(cache)); WT_STAT_SET(session, stats, cache_eviction_maximum_page_size, cache->evict_max_page_size); WT_STAT_SET(session, stats, cache_pages_dirty, cache->pages_dirty); WT_STAT_SET( session, stats, cache_bytes_internal, cache->bytes_internal); WT_STAT_SET( session, stats, cache_bytes_overflow, cache->bytes_overflow); WT_STAT_SET(session, stats, cache_bytes_leaf, leaf); }
/*根据cache的状态更新cache的统计信息*/ void __wt_cache_stats_update(WT_SESSION_IMPL *session) { WT_CACHE *cache; WT_CONNECTION_IMPL *conn; WT_CONNECTION_STATS *stats; conn = S2C(session); cache = conn->cache; stats = &conn->stats; WT_STAT_SET(stats, cache_bytes_max, conn->cache_size); WT_STAT_SET(stats, cache_bytes_inuse, __wt_cache_bytes_inuse(cache)); WT_STAT_SET(stats, cache_overhead, cache->overhead_pct); WT_STAT_SET(stats, cache_pages_inuse, __wt_cache_pages_inuse(cache)); WT_STAT_SET(stats, cache_bytes_dirty, __wt_cache_dirty_inuse(cache)); WT_STAT_SET(stats, cache_eviction_maximum_page_size, cache->evict_max_page_size); WT_STAT_SET(stats, cache_pages_dirty, cache->pages_dirty); /* Figure out internal, leaf and overflow stats */ WT_STAT_SET(stats, cache_bytes_internal, cache->bytes_internal); WT_STAT_SET(stats, cache_bytes_leaf, conn->cache_size - (cache->bytes_internal + cache->bytes_overflow)); WT_STAT_SET(stats, cache_bytes_overflow, cache->bytes_overflow); }
/* * __wt_block_stat -- * Block statistics */ void __wt_block_stat(WT_SESSION_IMPL *session, WT_BLOCK *block, WT_DSRC_STATS *stats) { /* * We're looking inside the live system's structure, which normally * requires locking: the chances of a corrupted read are probably * non-existent, and it's statistics information regardless, but it * isn't like this is a common function for an application to call. */ __wt_spin_lock(session, &block->live_lock); WT_STAT_SET(stats, allocation_size, block->allocsize); WT_STAT_SET(stats, block_checkpoint_size, block->live.ckpt_size); WT_STAT_SET(stats, block_magic, WT_BLOCK_MAGIC); WT_STAT_SET(stats, block_major, WT_BLOCK_MAJOR_VERSION); WT_STAT_SET(stats, block_minor, WT_BLOCK_MINOR_VERSION); WT_STAT_SET(stats, block_reuse_bytes, block->live.avail.bytes); WT_STAT_SET(stats, block_size, block->fh->size); __wt_spin_unlock(session, &block->live_lock); }
/* * __wt_cache_stats_update -- * Update the cache statistics for return to the application. */ void __wt_cache_stats_update(WT_SESSION_IMPL *session) { WT_CACHE *cache; WT_CONNECTION_IMPL *conn; WT_CONNECTION_STATS *stats; conn = S2C(session); cache = conn->cache; stats = &conn->stats; WT_STAT_SET(stats, cache_bytes_max, conn->cache_size); WT_STAT_SET(stats, cache_bytes_inuse, __wt_cache_bytes_inuse(cache)); WT_STAT_SET(stats, cache_overhead, cache->overhead_pct); WT_STAT_SET(stats, cache_pages_inuse, __wt_cache_pages_inuse(cache)); WT_STAT_SET(stats, cache_bytes_dirty, __wt_cache_dirty_inuse(cache)); WT_STAT_SET(stats, cache_eviction_maximum_page_size, cache->evict_max_page_size); WT_STAT_SET(stats, cache_pages_dirty, cache->pages_dirty); }
/* * __wt_btree_stat_init -- * Initialize the Btree statistics. */ int __wt_btree_stat_init(WT_SESSION_IMPL *session, WT_CURSOR_STAT *cst) { WT_BM *bm; WT_BTREE *btree; WT_DECL_RET; WT_DSRC_STATS **stats; WT_REF *next_walk; btree = S2BT(session); bm = btree->bm; stats = btree->dhandle->stats; WT_RET(bm->stat(bm, session, stats[0])); WT_STAT_SET(session, stats, btree_fixed_len, btree->bitcnt); WT_STAT_SET(session, stats, btree_maximum_depth, btree->maximum_depth); WT_STAT_SET(session, stats, btree_maxintlkey, btree->maxintlkey); WT_STAT_SET(session, stats, btree_maxintlpage, btree->maxintlpage); WT_STAT_SET(session, stats, btree_maxleafkey, btree->maxleafkey); WT_STAT_SET(session, stats, btree_maxleafpage, btree->maxleafpage); WT_STAT_SET(session, stats, btree_maxleafvalue, btree->maxleafvalue); /* Everything else is really, really expensive. */ if (!F_ISSET(cst, WT_CONN_STAT_ALL)) return (0); /* * Clear the statistics we're about to count. */ WT_STAT_SET(session, stats, btree_column_deleted, 0); WT_STAT_SET(session, stats, btree_column_fix, 0); WT_STAT_SET(session, stats, btree_column_internal, 0); WT_STAT_SET(session, stats, btree_column_rle, 0); WT_STAT_SET(session, stats, btree_column_variable, 0); WT_STAT_SET(session, stats, btree_entries, 0); WT_STAT_SET(session, stats, btree_overflow, 0); WT_STAT_SET(session, stats, btree_row_internal, 0); WT_STAT_SET(session, stats, btree_row_leaf, 0); next_walk = NULL; while ((ret = __wt_tree_walk( session, &next_walk, 0)) == 0 && next_walk != NULL) { WT_WITH_PAGE_INDEX(session, ret = __stat_page(session, next_walk->page, stats)); WT_RET(ret); } return (ret == WT_NOTFOUND ? 0 : ret); }
/* * __wt_txn_stats_update -- * Update the transaction statistics for return to the application. */ void __wt_txn_stats_update(WT_SESSION_IMPL *session) { WT_CONNECTION_IMPL *conn; WT_CONNECTION_STATS **stats; WT_TXN_GLOBAL *txn_global; uint64_t checkpoint_pinned, snapshot_pinned; conn = S2C(session); txn_global = &conn->txn_global; stats = conn->stats; checkpoint_pinned = txn_global->checkpoint_state.pinned_id; snapshot_pinned = txn_global->nsnap_oldest_id; WT_STAT_SET(session, stats, txn_pinned_range, txn_global->current - txn_global->oldest_id); #if WT_TIMESTAMP_SIZE == 8 WT_STAT_SET(session, stats, txn_pinned_timestamp, txn_global->commit_timestamp.val - txn_global->pinned_timestamp.val); WT_STAT_SET(session, stats, txn_pinned_timestamp_oldest, txn_global->commit_timestamp.val - txn_global->oldest_timestamp.val); #endif WT_STAT_SET(session, stats, txn_pinned_snapshot_range, snapshot_pinned == WT_TXN_NONE ? 0 : txn_global->current - snapshot_pinned); WT_STAT_SET(session, stats, txn_pinned_checkpoint_range, checkpoint_pinned == WT_TXN_NONE ? 0 : txn_global->current - checkpoint_pinned); WT_STAT_SET( session, stats, txn_checkpoint_time_max, conn->ckpt_time_max); WT_STAT_SET( session, stats, txn_checkpoint_time_min, conn->ckpt_time_min); WT_STAT_SET( session, stats, txn_checkpoint_time_recent, conn->ckpt_time_recent); WT_STAT_SET( session, stats, txn_checkpoint_time_total, conn->ckpt_time_total); WT_STAT_SET(session, stats, txn_commit_queue_len, txn_global->commit_timestampq_len); WT_STAT_SET(session, stats, txn_read_queue_len, txn_global->read_timestampq_len); }
/* * __wt_lsm_stat_init -- * Initialize a LSM statistics structure. */ int __wt_lsm_stat_init(WT_SESSION_IMPL *session, WT_LSM_TREE *lsm_tree, WT_CURSOR_STAT *cst, uint32_t flags) { WT_CURSOR *stat_cursor; WT_DECL_ITEM(uribuf); WT_DECL_RET; WT_DSRC_STATS *stats; WT_LSM_CHUNK *chunk; const char *cfg[] = API_CONF_DEFAULTS( session, open_cursor, "statistics_fast=on"); const char *disk_cfg[] = API_CONF_DEFAULTS(session, open_cursor, "checkpoint=WiredTigerCheckpoint,statistics_fast=on"); const char *desc, *pvalue; uint64_t value; u_int i; int locked, stat_key; WT_UNUSED(flags); locked = 0; WT_ERR(__wt_scr_alloc(session, 0, &uribuf)); /* Clear the statistics we are about to recalculate. */ if (cst->stats != NULL) stats = (WT_DSRC_STATS *)cst->stats; else { WT_ERR(__wt_calloc_def(session, 1, &stats)); __wt_stat_init_dsrc_stats(stats); cst->stats_first = cst->stats = (WT_STATS *)stats; cst->stats_count = sizeof(*stats) / sizeof(WT_STATS); } *stats = lsm_tree->stats; if (LF_ISSET(WT_STATISTICS_CLEAR)) __wt_stat_clear_dsrc_stats(&lsm_tree->stats); /* Hold the LSM lock so that we can safely walk through the chunks. */ WT_ERR(__wt_readlock(session, lsm_tree->rwlock)); locked = 1; /* Set the stats for this run. */ WT_STAT_SET(stats, lsm_chunk_count, lsm_tree->nchunks); for (i = 0; i < lsm_tree->nchunks; i++) { chunk = lsm_tree->chunk[i]; if (chunk->generation > (uint32_t)WT_STAT(stats, lsm_generation_max)) WT_STAT_SET(stats, lsm_generation_max, chunk->generation); /* * LSM chunk reads happen from a checkpoint, so get the * statistics for a checkpoint if one exists. */ WT_ERR(__wt_buf_fmt( session, uribuf, "statistics:%s", chunk->uri)); ret = __wt_curstat_open(session, uribuf->data, F_ISSET(chunk, WT_LSM_CHUNK_ONDISK) ? disk_cfg : cfg, &stat_cursor); /* * XXX kludge: we may have an empty chunk where no checkpoint * was written. If so, try to open the ordinary handle on that * chunk instead. */ if (ret == WT_NOTFOUND && F_ISSET(chunk, WT_LSM_CHUNK_ONDISK)) ret = __wt_curstat_open( session, uribuf->data, cfg, &stat_cursor); WT_ERR(ret); while ((ret = stat_cursor->next(stat_cursor)) == 0) { WT_ERR(stat_cursor->get_key(stat_cursor, &stat_key)); WT_ERR(stat_cursor->get_value( stat_cursor, &desc, &pvalue, &value)); WT_STAT_INCRKV(stats, stat_key, value); } WT_ERR_NOTFOUND_OK(ret); WT_ERR(stat_cursor->close(stat_cursor)); if (!F_ISSET(chunk, WT_LSM_CHUNK_BLOOM)) continue; WT_STAT_INCR(stats, bloom_count); WT_STAT_INCRV(stats, bloom_size, (chunk->count * lsm_tree->bloom_bit_count) / 8); WT_ERR(__wt_buf_fmt( session, uribuf, "statistics:%s", chunk->bloom_uri)); WT_ERR(__wt_curstat_open(session, uribuf->data, cfg, &stat_cursor)); stat_cursor->set_key( stat_cursor, WT_STAT_DSRC_CACHE_EVICTION_CLEAN); WT_ERR(stat_cursor->search(stat_cursor)); WT_ERR(stat_cursor->get_value( stat_cursor, &desc, &pvalue, &value)); WT_STAT_INCRV(stats, cache_eviction_clean, value); WT_STAT_INCRV(stats, bloom_page_evict, value); stat_cursor->set_key( stat_cursor, WT_STAT_DSRC_CACHE_EVICTION_DIRTY); WT_ERR(stat_cursor->search(stat_cursor)); WT_ERR(stat_cursor->get_value( stat_cursor, &desc, &pvalue, &value)); WT_STAT_INCRV(stats, cache_eviction_dirty, value); WT_STAT_INCRV(stats, bloom_page_evict, value); stat_cursor->set_key( stat_cursor, WT_STAT_DSRC_CACHE_EVICTION_FAIL); WT_ERR(stat_cursor->search(stat_cursor)); WT_ERR(stat_cursor->get_value( stat_cursor, &desc, &pvalue, &value)); WT_STAT_INCRV(stats, cache_eviction_fail, value); stat_cursor->set_key(stat_cursor, WT_STAT_DSRC_CACHE_READ); WT_ERR(stat_cursor->search(stat_cursor)); WT_ERR(stat_cursor->get_value( stat_cursor, &desc, &pvalue, &value)); WT_STAT_INCRV(stats, cache_read, value); WT_STAT_INCRV(stats, bloom_page_read, value); stat_cursor->set_key(stat_cursor, WT_STAT_DSRC_CACHE_WRITE); WT_ERR(stat_cursor->search(stat_cursor)); WT_ERR(stat_cursor->get_value( stat_cursor, &desc, &pvalue, &value)); WT_STAT_INCRV(stats, cache_write, value); WT_ERR(stat_cursor->close(stat_cursor)); } err: if (locked) WT_TRET(__wt_rwunlock(session, lsm_tree->rwlock)); __wt_scr_free(&uribuf); return (ret); }
/* * __wt_cache_stats_update -- * Update the cache statistics for return to the application. */ void __wt_cache_stats_update(WT_SESSION_IMPL *session) { WT_CACHE *cache; WT_CONNECTION_IMPL *conn; WT_CONNECTION_STATS **stats; uint64_t inuse, leaf; conn = S2C(session); cache = conn->cache; stats = conn->stats; inuse = __wt_cache_bytes_inuse(cache); /* * There are races updating the different cache tracking values so * be paranoid calculating the leaf byte usage. */ leaf = inuse > cache->bytes_internal ? inuse - cache->bytes_internal : 0; WT_STAT_SET(session, stats, cache_bytes_max, conn->cache_size); WT_STAT_SET(session, stats, cache_bytes_inuse, inuse); WT_STAT_SET(session, stats, cache_overhead, cache->overhead_pct); WT_STAT_SET( session, stats, cache_bytes_dirty, __wt_cache_dirty_inuse(cache)); WT_STAT_SET( session, stats, cache_bytes_image, __wt_cache_bytes_image(cache)); WT_STAT_SET( session, stats, cache_pages_inuse, __wt_cache_pages_inuse(cache)); WT_STAT_SET( session, stats, cache_bytes_internal, cache->bytes_internal); WT_STAT_SET(session, stats, cache_bytes_leaf, leaf); WT_STAT_SET( session, stats, cache_bytes_other, __wt_cache_bytes_other(cache)); WT_STAT_SET(session, stats, cache_eviction_maximum_page_size, cache->evict_max_page_size); WT_STAT_SET(session, stats, cache_pages_dirty, cache->pages_dirty_intl + cache->pages_dirty_leaf); /* * The number of files with active walks ~= number of hazard pointers * in the walk session. Note: reading without locking. */ if (conn->evict_server_running) WT_STAT_SET(session, stats, cache_eviction_walks_active, cache->walk_session->nhazard); }