/* * __thread_group_grow -- * Increase the number of running threads in the group. */ static int __thread_group_grow( WT_SESSION_IMPL *session, WT_THREAD_GROUP *group, uint32_t new_count) { WT_THREAD *thread; WT_ASSERT(session, __wt_rwlock_islocked(session, group->lock)); /* * Any bounds checking is done by the caller so we know that * there is space in the array for new threads. */ while (group->current_threads < new_count) { thread = group->threads[group->current_threads++]; __wt_verbose(session, WT_VERB_THREAD_GROUP, "Starting utility thread: %p:%" PRIu32, (void *)group, thread->id); F_SET(thread, WT_THREAD_RUN); WT_ASSERT(session, thread->session != NULL); WT_RET(__wt_thread_create(thread->session, &thread->tid, __wt_thread_run, thread)); } return (0); }
/* * __wt_thread_group_destroy -- * Shut down a thread group. Our caller must hold the lock. */ int __wt_thread_group_destroy(WT_SESSION_IMPL *session, WT_THREAD_GROUP *group) { WT_DECL_RET; __wt_verbose(session, WT_VERB_THREAD_GROUP, "Destroying thread group: %p", (void *)group); WT_ASSERT(session, __wt_rwlock_islocked(session, group->lock)); /* Shut down all threads and free associated resources. */ WT_TRET(__thread_group_shrink(session, group, 0)); __wt_free(session, group->threads); WT_TRET(__wt_cond_destroy(session, &group->wait_cond)); __wt_rwlock_destroy(session, &group->lock); /* * Clear out any settings from the group, some structures are reused * for different thread groups - in particular the eviction thread * group for recovery and then normal runtime. */ memset(group, 0, sizeof(*group)); return (ret); }
/* * __thread_group_shrink -- * Decrease the number of running threads in the group, and free any * memory associated with slots larger than the new count. */ static int __thread_group_shrink(WT_SESSION_IMPL *session, WT_THREAD_GROUP *group, uint32_t new_count) { WT_DECL_RET; WT_SESSION *wt_session; WT_THREAD *thread; uint32_t current_slot; WT_ASSERT(session, __wt_rwlock_islocked(session, group->lock)); for (current_slot = group->alloc; current_slot > new_count; ) { /* * The offset value is a counter not an array index, * so adjust it before finding the last thread in the group. */ thread = group->threads[--current_slot]; if (thread == NULL) continue; /* Wake threads to ensure they notice the state change */ if (thread->tid != 0) { __wt_verbose(session, WT_VERB_THREAD_GROUP, "Stopping utility thread: %p:%" PRIu32, (void *)group, thread->id); F_CLR(thread, WT_THREAD_RUN); __wt_cond_signal(session, group->wait_cond); WT_TRET(__wt_thread_join(session, thread->tid)); thread->tid = 0; } if (thread->session != NULL) { wt_session = (WT_SESSION *)thread->session; WT_TRET(wt_session->close(wt_session, NULL)); thread->session = NULL; } __wt_free(session, thread); group->threads[current_slot] = NULL; } /* Update the thread group state to match our changes */ group->current_threads = current_slot; return (ret); }
/* * __thread_group_resize -- * Resize an array of utility threads already holding the lock. */ static int __thread_group_resize( WT_SESSION_IMPL *session, WT_THREAD_GROUP *group, uint32_t new_min, uint32_t new_max, uint32_t flags) { WT_CONNECTION_IMPL *conn; WT_DECL_RET; WT_THREAD *thread; size_t alloc; uint32_t i, session_flags; conn = S2C(session); session_flags = 0; WT_ASSERT(session, group->current_threads <= group->alloc && __wt_rwlock_islocked(session, group->lock)); if (new_min == group->min && new_max == group->max) return (0); /* * Coll shrink to reduce the number of thread structures and running * threads if required by the change in group size. */ WT_RET(__thread_group_shrink(session, group, new_max)); /* * Only reallocate the thread array if it is the largest ever, since * our realloc doesn't support shrinking the allocated size. */ if (group->alloc < new_max) { alloc = group->alloc * sizeof(*group->threads); WT_RET(__wt_realloc(session, &alloc, new_max * sizeof(*group->threads), &group->threads)); group->alloc = new_max; } /* * Initialize the structures based on the previous group size, not * the previous allocated size. */ for (i = group->max; i < new_max; i++) { WT_ERR(__wt_calloc_one(session, &thread)); /* * Threads get their own session and lookaside table cursor * if the lookaside table is open. Note that threads are * started during recovery, before the lookaside table is * created. */ if (LF_ISSET(WT_THREAD_CAN_WAIT)) session_flags = WT_SESSION_CAN_WAIT; if (F_ISSET(conn, WT_CONN_LAS_OPEN)) FLD_SET(session_flags, WT_SESSION_LOOKASIDE_CURSOR); WT_ERR(__wt_open_internal_session(conn, group->name, false, session_flags, &thread->session)); if (LF_ISSET(WT_THREAD_PANIC_FAIL)) F_SET(thread, WT_THREAD_PANIC_FAIL); thread->id = i; thread->run_func = group->run_func; WT_ASSERT(session, group->threads[i] == NULL); group->threads[i] = thread; } if (group->current_threads < new_min) WT_ERR(__thread_group_grow(session, group, new_min)); err: /* * Update the thread group information even on failure to improve our * chances of cleaning up properly. */ group->max = new_max; group->min = new_min; /* * An error resizing a thread array is fatal, it should only happen * in an out of memory situation. */ if (ret != 0) { WT_TRET(__wt_thread_group_destroy(session, group)); WT_PANIC_RET(session, ret, "Error while resizing thread group"); } return (ret); }
/* * __wt_page_out -- * Discard an in-memory page, freeing all memory associated with it. */ void __wt_page_out(WT_SESSION_IMPL *session, WT_PAGE **pagep) { WT_PAGE *page; WT_PAGE_HEADER *dsk; WT_PAGE_MODIFY *mod; /* * Kill our caller's reference, do our best to catch races. */ page = *pagep; *pagep = NULL; if (F_ISSET(session->dhandle, WT_DHANDLE_DEAD)) __wt_page_modify_clear(session, page); /* * We should never discard: * - a dirty page, * - a page queued for eviction, or * - a locked page. */ WT_ASSERT(session, !__wt_page_is_modified(page)); WT_ASSERT(session, !F_ISSET_ATOMIC(page, WT_PAGE_EVICT_LRU)); WT_ASSERT(session, !__wt_rwlock_islocked(session, &page->page_lock)); /* * If a root page split, there may be one or more pages linked from the * page; walk the list, discarding pages. */ switch (page->type) { case WT_PAGE_COL_INT: case WT_PAGE_ROW_INT: mod = page->modify; if (mod != NULL && mod->mod_root_split != NULL) __wt_page_out(session, &mod->mod_root_split); break; } /* Update the cache's information. */ __wt_cache_page_evict(session, page); dsk = (WT_PAGE_HEADER *)page->dsk; if (F_ISSET_ATOMIC(page, WT_PAGE_DISK_ALLOC)) __wt_cache_page_image_decr(session, dsk->mem_size); /* Discard any mapped image. */ if (F_ISSET_ATOMIC(page, WT_PAGE_DISK_MAPPED)) (void)S2BT(session)->bm->map_discard( S2BT(session)->bm, session, dsk, (size_t)dsk->mem_size); /* * If discarding the page as part of process exit, the application may * configure to leak the memory rather than do the work. */ if (F_ISSET(S2C(session), WT_CONN_LEAK_MEMORY)) return; /* Free the page modification information. */ if (page->modify != NULL) __free_page_modify(session, page); switch (page->type) { case WT_PAGE_COL_FIX: break; case WT_PAGE_COL_INT: case WT_PAGE_ROW_INT: __free_page_int(session, page); break; case WT_PAGE_COL_VAR: __free_page_col_var(session, page); break; case WT_PAGE_ROW_LEAF: __free_page_row_leaf(session, page); break; } /* Discard any allocated disk image. */ if (F_ISSET_ATOMIC(page, WT_PAGE_DISK_ALLOC)) __wt_overwrite_and_free_len(session, dsk, dsk->mem_size); __wt_overwrite_and_free(session, page); }