/* * __wt_async_op_enqueue -- * Enqueue an operation onto the work queue. */ int __wt_async_op_enqueue(WT_SESSION_IMPL *session, WT_ASYNC_OP_IMPL *op) { WT_ASYNC *async; WT_CONNECTION_IMPL *conn; WT_DECL_RET; uint64_t cur_head, cur_tail, my_alloc, my_slot; #ifdef HAVE_DIAGNOSTIC WT_ASYNC_OP_IMPL *my_op; #endif conn = S2C(session); async = conn->async; /* * If an application re-uses a WT_ASYNC_OP, we end up here with an * invalid object. */ if (op->state != WT_ASYNCOP_READY) WT_RET_MSG(session, EINVAL, "application error: WT_ASYNC_OP already in use"); /* * Enqueue op at the tail of the work queue. * We get our slot in the ring buffer to use. */ my_alloc = WT_ATOMIC_ADD8(async->alloc_head, 1); my_slot = my_alloc % async->async_qsize; /* * Make sure we haven't wrapped around the queue. * If so, wait for the tail to advance off this slot. */ WT_ORDERED_READ(cur_tail, async->tail_slot); while (cur_tail == my_slot) { __wt_yield(); WT_ORDERED_READ(cur_tail, async->tail_slot); } #ifdef HAVE_DIAGNOSTIC WT_ORDERED_READ(my_op, async->async_queue[my_slot]); if (my_op != NULL) return (__wt_panic(session)); #endif WT_PUBLISH(async->async_queue[my_slot], op); op->state = WT_ASYNCOP_ENQUEUED; if (WT_ATOMIC_ADD4(async->cur_queue, 1) > async->max_queue) WT_PUBLISH(async->max_queue, async->cur_queue); /* * Multiple threads may be adding ops to the queue. We need to wait * our turn to make our slot visible to workers. */ WT_ORDERED_READ(cur_head, async->head); while (cur_head != (my_alloc - 1)) { __wt_yield(); WT_ORDERED_READ(cur_head, async->head); } WT_PUBLISH(async->head, my_alloc); return (ret); }
/* * __lsm_tree_close -- * Close an LSM tree structure. */ static int __lsm_tree_close(WT_SESSION_IMPL *session, WT_LSM_TREE *lsm_tree) { WT_DECL_RET; WT_SESSION *wt_session; WT_SESSION_IMPL *s; uint32_t i; if (F_ISSET(lsm_tree, WT_LSM_TREE_WORKING)) { F_CLR(lsm_tree, WT_LSM_TREE_WORKING); if (F_ISSET(S2C(session), WT_CONN_LSM_MERGE)) for (i = 0; i < lsm_tree->merge_threads; i++) WT_TRET(__wt_thread_join( session, lsm_tree->worker_tids[i])); WT_TRET(__wt_thread_join(session, lsm_tree->ckpt_tid)); if (FLD_ISSET(lsm_tree->bloom, WT_LSM_BLOOM_NEWEST)) WT_TRET(__wt_thread_join(session, lsm_tree->bloom_tid)); } /* * Close the worker thread sessions and free their hazard arrays * (necessary because we set WT_SESSION_INTERNAL to simplify shutdown * ordering). * * Do this in the main thread to avoid deadlocks. */ for (i = 0; i < lsm_tree->merge_threads; i++) { if ((s = lsm_tree->worker_sessions[i]) == NULL) continue; lsm_tree->worker_sessions[i] = NULL; F_SET(s, F_ISSET(session, WT_SESSION_SCHEMA_LOCKED)); wt_session = &s->iface; WT_TRET(wt_session->close(wt_session, NULL)); /* * This is safe after the close because session handles are * not freed, but are managed by the connection. */ __wt_free(NULL, s->hazard); } if (lsm_tree->bloom_session != NULL) { F_SET(lsm_tree->bloom_session, F_ISSET(session, WT_SESSION_SCHEMA_LOCKED)); wt_session = &lsm_tree->bloom_session->iface; WT_TRET(wt_session->close(wt_session, NULL)); /* * This is safe after the close because session handles are * not freed, but are managed by the connection. */ __wt_free(NULL, lsm_tree->bloom_session->hazard); } if (lsm_tree->ckpt_session != NULL) { F_SET(lsm_tree->ckpt_session, F_ISSET(session, WT_SESSION_SCHEMA_LOCKED)); wt_session = &lsm_tree->ckpt_session->iface; WT_TRET(wt_session->close(wt_session, NULL)); /* * This is safe after the close because session handles are * not freed, but are managed by the connection. */ __wt_free(NULL, lsm_tree->ckpt_session->hazard); } if (ret != 0) { __wt_err(session, ret, "shutdown error while cleaning up LSM"); (void)__wt_panic(session); } return (ret); }
/* * __lsm_tree_close -- * Close an LSM tree structure. */ static int __lsm_tree_close(WT_SESSION_IMPL *session, WT_LSM_TREE *lsm_tree) { WT_DECL_RET; WT_SESSION *wt_session; WT_SESSION_IMPL *s; uint32_t i; if (F_ISSET(lsm_tree, WT_LSM_TREE_WORKING)) { F_CLR(lsm_tree, WT_LSM_TREE_WORKING); /* * Signal all threads to wake them up, then wait for them to * exit. * * !!! * If we have the schema lock, have the LSM worker sessions * inherit the flag before we do anything. The thread may * already be waiting for the schema lock, but the loop in the * WT_WITH_SCHEMA_LOCK macro takes care of that. */ if (F_ISSET(S2C(session), WT_CONN_LSM_MERGE)) for (i = 0; i < lsm_tree->merge_threads; i++) { if ((s = lsm_tree->worker_sessions[i]) == NULL) continue; if (F_ISSET(session, WT_SESSION_SCHEMA_LOCKED)) s->skip_schema_lock = 1; WT_TRET(__wt_cond_signal( session, lsm_tree->work_cond)); WT_TRET(__wt_thread_join( session, lsm_tree->worker_tids[i])); } if (F_ISSET(session, WT_SESSION_SCHEMA_LOCKED)) lsm_tree->ckpt_session->skip_schema_lock = 1; WT_TRET(__wt_cond_signal(session, lsm_tree->work_cond)); WT_TRET(__wt_thread_join(session, lsm_tree->ckpt_tid)); } /* * Close the worker thread sessions. Do this in the main thread to * avoid deadlocks. */ for (i = 0; i < lsm_tree->merge_threads; i++) { if ((s = lsm_tree->worker_sessions[i]) == NULL) continue; lsm_tree->worker_sessions[i] = NULL; wt_session = &s->iface; WT_TRET(wt_session->close(wt_session, NULL)); } if (lsm_tree->ckpt_session != NULL) { wt_session = &lsm_tree->ckpt_session->iface; WT_TRET(wt_session->close(wt_session, NULL)); } if (ret != 0) { __wt_err(session, ret, "shutdown error while cleaning up LSM"); (void)__wt_panic(session); } return (ret); }