/* * __log_slot_close -- * Close out the slot the caller is using. The slot may already be * closed or freed by another thread. */ static int __log_slot_close( WT_SESSION_IMPL *session, WT_LOGSLOT *slot, bool *releasep, bool forced) { WT_CONNECTION_IMPL *conn; WT_LOG *log; int64_t end_offset, new_state, old_state; #ifdef HAVE_DIAGNOSTIC uint64_t time_start, time_stop; int count; #endif *releasep = false; WT_ASSERT(session, F_ISSET(session, WT_SESSION_LOCKED_SLOT)); conn = S2C(session); log = conn->log; if (slot == NULL) return (WT_NOTFOUND); retry: old_state = slot->slot_state; /* * If this close is coming from a forced close and a thread is in * the middle of using the slot, return EBUSY. The caller can * decide if retrying is necessary or not. */ if (forced && WT_LOG_SLOT_INPROGRESS(old_state)) return (__wt_set_return(session, EBUSY)); /* * If someone else is switching out this slot we lost. Nothing to * do but return. Return WT_NOTFOUND anytime the given slot was * processed by another closing thread. Only return 0 when we * actually closed the slot. */ if (WT_LOG_SLOT_CLOSED(old_state)) { WT_STAT_CONN_INCR(session, log_slot_close_race); return (WT_NOTFOUND); } /* * If someone completely processed this slot, we're done. */ if (FLD_LOG_SLOT_ISSET( (uint64_t)slot->slot_state, WT_LOG_SLOT_RESERVED)) { WT_STAT_CONN_INCR(session, log_slot_close_race); return (WT_NOTFOUND); } new_state = (old_state | WT_LOG_SLOT_CLOSE); /* * Close this slot. If we lose the race retry. */ if (!__wt_atomic_casiv64(&slot->slot_state, old_state, new_state)) goto retry; /* * We own the slot now. No one else can join. * Set the end LSN. */ WT_STAT_CONN_INCR(session, log_slot_closes); if (WT_LOG_SLOT_DONE(new_state)) *releasep = true; slot->slot_end_lsn = slot->slot_start_lsn; /* * A thread setting the unbuffered flag sets the unbuffered size after * setting the flag. There could be a delay between a thread setting * the flag, a thread closing the slot, and the original thread setting * that value. If the state is unbuffered, wait for the unbuffered * size to be set. */ #ifdef HAVE_DIAGNOSTIC count = 0; time_start = __wt_clock(session); #endif if (WT_LOG_SLOT_UNBUFFERED_ISSET(old_state)) { while (slot->slot_unbuffered == 0) { WT_STAT_CONN_INCR(session, log_slot_close_unbuf); __wt_yield(); #ifdef HAVE_DIAGNOSTIC ++count; if (count > WT_MILLION) { time_stop = __wt_clock(session); if (WT_CLOCKDIFF_SEC( time_stop, time_start) > 10) { __wt_errx(session, "SLOT_CLOSE: Slot %" PRIu32 " Timeout unbuffered, state 0x%" PRIx64 " unbuffered %" PRId64, (uint32_t)(slot - &log->slot_pool[0]), (uint64_t)slot->slot_state, slot->slot_unbuffered); __log_slot_dump(session); __wt_abort(session); } count = 0; } #endif } } end_offset = WT_LOG_SLOT_JOINED_BUFFERED(old_state) + slot->slot_unbuffered; slot->slot_end_lsn.l.offset += (uint32_t)end_offset; WT_STAT_CONN_INCRV(session, log_slot_consolidated, end_offset); /* * XXX Would like to change so one piece of code advances the LSN. */ log->alloc_lsn = slot->slot_end_lsn; WT_ASSERT(session, log->alloc_lsn.l.file >= log->write_lsn.l.file); return (0); }
/* * __log_slot_close -- * Close out the slot the caller is using. The slot may already be * closed or freed by another thread. */ static int __log_slot_close( WT_SESSION_IMPL *session, WT_LOGSLOT *slot, bool *releasep, bool forced) { WT_CONNECTION_IMPL *conn; WT_LOG *log; int64_t end_offset, new_state, old_state; WT_ASSERT(session, F_ISSET(session, WT_SESSION_LOCKED_SLOT)); WT_ASSERT(session, releasep != NULL); conn = S2C(session); log = conn->log; *releasep = 0; if (slot == NULL) return (WT_NOTFOUND); retry: old_state = slot->slot_state; /* * If this close is coming from a forced close and a thread is in * the middle of using the slot, return EBUSY. The caller can * decide if retrying is necessary or not. */ if (forced && WT_LOG_SLOT_INPROGRESS(old_state)) return (EBUSY); /* * If someone else is switching out this slot we lost. Nothing to * do but return. Return WT_NOTFOUND anytime the given slot was * processed by another closing thread. Only return 0 when we * actually closed the slot. */ if (WT_LOG_SLOT_CLOSED(old_state)) return (WT_NOTFOUND); /* * If someone completely processed this slot, we're done. */ if (FLD64_ISSET((uint64_t)slot->slot_state, WT_LOG_SLOT_RESERVED)) return (WT_NOTFOUND); new_state = (old_state | WT_LOG_SLOT_CLOSE); /* * Close this slot. If we lose the race retry. */ if (!__wt_atomic_casiv64(&slot->slot_state, old_state, new_state)) goto retry; /* * We own the slot now. No one else can join. * Set the end LSN. */ WT_STAT_FAST_CONN_INCR(session, log_slot_closes); if (WT_LOG_SLOT_DONE(new_state)) *releasep = 1; slot->slot_end_lsn = slot->slot_start_lsn; end_offset = WT_LOG_SLOT_JOINED_BUFFERED(old_state) + slot->slot_unbuffered; slot->slot_end_lsn.offset += (wt_off_t)end_offset; WT_STAT_FAST_CONN_INCRV(session, log_slot_consolidated, end_offset); /* * XXX Would like to change so one piece of code advances the LSN. */ log->alloc_lsn = slot->slot_end_lsn; WT_ASSERT(session, log->alloc_lsn.file >= log->write_lsn.file); return (0); }