/* * __wt_lsm_tree_drop -- * Drop an LSM tree. */ int __wt_lsm_tree_drop( WT_SESSION_IMPL *session, const char *name, const char *cfg[]) { WT_DECL_RET; WT_LSM_CHUNK *chunk; WT_LSM_TREE *lsm_tree; u_int i; int locked; locked = 0; /* Get the LSM tree. */ WT_RET(__wt_lsm_tree_get(session, name, 1, &lsm_tree)); /* Shut down the LSM worker. */ WT_ERR(__lsm_tree_close(session, lsm_tree)); /* Prevent any new opens. */ WT_ERR(__wt_try_writelock(session, lsm_tree->rwlock)); locked = 1; /* Drop the chunks. */ for (i = 0; i < lsm_tree->nchunks; i++) { chunk = lsm_tree->chunk[i]; WT_ERR(__wt_schema_drop(session, chunk->uri, cfg)); if (F_ISSET(chunk, WT_LSM_CHUNK_BLOOM)) WT_ERR( __wt_schema_drop(session, chunk->bloom_uri, cfg)); } /* Drop any chunks on the obsolete list. */ for (i = 0; i < lsm_tree->nold_chunks; i++) { if ((chunk = lsm_tree->old_chunks[i]) == NULL) continue; WT_ERR(__wt_schema_drop(session, chunk->uri, cfg)); if (F_ISSET(chunk, WT_LSM_CHUNK_BLOOM)) WT_ERR( __wt_schema_drop(session, chunk->bloom_uri, cfg)); } ret = __wt_rwunlock(session, lsm_tree->rwlock); locked = 0; if (ret == 0) ret = __wt_metadata_remove(session, name); err: if (locked) WT_TRET(__wt_rwunlock(session, lsm_tree->rwlock)); WT_TRET(__lsm_tree_discard(session, lsm_tree)); return (ret); }
/* * __wt_lsm_tree_truncate -- * Truncate an LSM tree. */ int __wt_lsm_tree_truncate( WT_SESSION_IMPL *session, const char *name, const char *cfg[]) { WT_DECL_RET; WT_LSM_CHUNK *chunk; WT_LSM_TREE *lsm_tree; int locked; WT_UNUSED(cfg); locked = 0; /* Get the LSM tree. */ WT_RET(__wt_lsm_tree_get(session, name, 1, &lsm_tree)); /* Shut down the LSM worker. */ WT_RET(__lsm_tree_close(session, lsm_tree)); /* Prevent any new opens. */ WT_RET(__wt_try_writelock(session, lsm_tree->rwlock)); locked = 1; /* Create the new chunk. */ WT_ERR(__wt_calloc_def(session, 1, &chunk)); chunk->id = WT_ATOMIC_ADD(lsm_tree->last, 1); WT_ERR(__wt_lsm_tree_setup_chunk(session, lsm_tree, chunk)); /* Mark all chunks old. */ WT_ERR(__wt_lsm_merge_update_tree( session, lsm_tree, 0, lsm_tree->nchunks, chunk)); WT_ERR(__wt_lsm_meta_write(session, lsm_tree)); WT_ERR(__lsm_tree_start_worker(session, lsm_tree)); ret = __wt_rwunlock(session, lsm_tree->rwlock); locked = 0; if (ret == 0) __wt_lsm_tree_release(session, lsm_tree); err: if (locked) WT_TRET(__wt_rwunlock(session, lsm_tree->rwlock)); /* * Don't discard the LSM tree structure unless there has been an * error. The handle remains valid for future operations. */ if (ret != 0) WT_TRET(__lsm_tree_discard(session, lsm_tree)); return (ret); }
/* * __log_server -- * The log server thread. */ static WT_THREAD_RET __log_server(void *arg) { WT_CONNECTION_IMPL *conn; WT_DECL_RET; WT_LOG *log; WT_SESSION_IMPL *session; u_int locked; session = arg; conn = S2C(session); log = conn->log; locked = 0; while (F_ISSET(conn, WT_CONN_LOG_SERVER_RUN)) { /* * Perform log pre-allocation. */ if (conn->log_prealloc > 0) WT_ERR(__log_prealloc_once(session)); /* * Perform the archive. */ if (FLD_ISSET(conn->log_flags, WT_CONN_LOG_ARCHIVE)) { if (__wt_try_writelock( session, log->log_archive_lock) == 0) { locked = 1; WT_ERR(__log_archive_once(session, 0)); WT_ERR( __wt_writeunlock( session, log->log_archive_lock)); locked = 0; } else WT_ERR(__wt_verbose(session, WT_VERB_LOG, "log_archive: Blocked due to open log " "cursor holding archive lock")); } /* Wait until the next event. */ WT_ERR(__wt_cond_wait(session, conn->log_cond, WT_MILLION)); } if (0) { err: __wt_err(session, ret, "log server error"); } if (locked) (void)__wt_writeunlock(session, log->log_archive_lock); return (WT_THREAD_RET_VALUE); }
/* * __wt_thread_group_start_one -- * Start a new thread if possible. */ int __wt_thread_group_start_one( WT_SESSION_IMPL *session, WT_THREAD_GROUP *group, bool wait) { WT_DECL_RET; if (group->current_threads >= group->max) return (0); if (wait) __wt_writelock(session, group->lock); else if (__wt_try_writelock(session, group->lock) != 0) return (0); /* Recheck the bounds now that we hold the lock */ if (group->current_threads < group->max) WT_TRET(__thread_group_grow( session, group, group->current_threads + 1)); __wt_writeunlock(session, group->lock); return (ret); }
/* * __log_server -- * The log server thread. */ static WT_THREAD_RET __log_server(void *arg) { WT_CONNECTION_IMPL *conn; WT_DECL_RET; WT_LOG *log; WT_SESSION_IMPL *session; int freq_per_sec, signalled; session = arg; conn = S2C(session); log = conn->log; signalled = 0; /* * Set this to the number of times per second we want to force out the * log slot buffer. */ #define WT_FORCE_PER_SECOND 20 freq_per_sec = WT_FORCE_PER_SECOND; /* * The log server thread does a variety of work. It forces out any * buffered log writes. It pre-allocates log files and it performs * log archiving. The reason the wrlsn thread does not force out * the buffered writes is because we want to process and move the * write_lsn forward as quickly as possible. The same reason applies * to why the log file server thread does not force out the writes. * That thread does fsync calls which can take a long time and we * don't want log records sitting in the buffer over the time it * takes to sync out an earlier file. */ while (F_ISSET(conn, WT_CONN_LOG_SERVER_RUN)) { /* * Slots depend on future activity. Force out buffered * writes in case we are idle. This cannot be part of the * wrlsn thread because of interaction advancing the write_lsn * and a buffer may need to wait for the write_lsn to advance * in the case of a synchronous buffer. We end up with a hang. */ WT_ERR_BUSY_OK(__wt_log_force_write(session, 0)); /* * We don't want to archive or pre-allocate files as often as * we want to force out log buffers. Only do it once per second * or if the condition was signalled. */ if (--freq_per_sec <= 0 || signalled != 0) { freq_per_sec = WT_FORCE_PER_SECOND; /* * Perform log pre-allocation. */ if (conn->log_prealloc > 0) WT_ERR(__log_prealloc_once(session)); /* * Perform the archive. */ if (FLD_ISSET(conn->log_flags, WT_CONN_LOG_ARCHIVE)) { if (__wt_try_writelock( session, log->log_archive_lock) == 0) { ret = __log_archive_once(session, 0); WT_TRET(__wt_writeunlock( session, log->log_archive_lock)); WT_ERR(ret); } else WT_ERR( __wt_verbose(session, WT_VERB_LOG, "log_archive: Blocked due to open " "log cursor holding archive lock")); } } /* Wait until the next event. */ WT_ERR(__wt_cond_wait_signal(session, conn->log_cond, WT_MILLION / WT_FORCE_PER_SECOND, &signalled)); } if (0) { err: __wt_err(session, ret, "log server error"); } return (WT_THREAD_RET_VALUE); }
/* * __wt_lsm_tree_rename -- * Rename an LSM tree. */ int __wt_lsm_tree_rename(WT_SESSION_IMPL *session, const char *oldname, const char *newname, const char *cfg[]) { WT_DECL_RET; WT_ITEM buf; WT_LSM_CHUNK *chunk; WT_LSM_TREE *lsm_tree; const char *old; u_int i; int locked; old = NULL; WT_CLEAR(buf); locked = 0; /* Get the LSM tree. */ WT_RET(__wt_lsm_tree_get(session, oldname, 1, &lsm_tree)); /* Shut down the LSM worker. */ WT_ERR(__lsm_tree_close(session, lsm_tree)); /* Prevent any new opens. */ WT_ERR(__wt_try_writelock(session, lsm_tree->rwlock)); locked = 1; /* Set the new name. */ WT_ERR(__lsm_tree_set_name(session, lsm_tree, newname)); /* Rename the chunks. */ for (i = 0; i < lsm_tree->nchunks; i++) { chunk = lsm_tree->chunk[i]; old = chunk->uri; chunk->uri = NULL; WT_ERR(__wt_lsm_tree_chunk_name( session, lsm_tree, chunk->id, &buf)); chunk->uri = __wt_buf_steal(session, &buf, NULL); WT_ERR(__wt_schema_rename(session, old, chunk->uri, cfg)); __wt_free(session, old); if (F_ISSET(chunk, WT_LSM_CHUNK_BLOOM)) { old = chunk->bloom_uri; chunk->bloom_uri = NULL; WT_ERR(__wt_lsm_tree_bloom_name( session, lsm_tree, chunk->id, &buf)); chunk->bloom_uri = __wt_buf_steal(session, &buf, NULL); F_SET(chunk, WT_LSM_CHUNK_BLOOM); WT_ERR(__wt_schema_rename( session, old, chunk->uri, cfg)); __wt_free(session, old); } } ret = __wt_rwunlock(session, lsm_tree->rwlock); locked = 0; if (ret == 0) ret = __wt_lsm_meta_write(session, lsm_tree); if (ret == 0) ret = __wt_metadata_remove(session, oldname); err: if (locked) WT_TRET(__wt_rwunlock(session, lsm_tree->rwlock)); if (old != NULL) __wt_free(session, old); /* * Discard this LSM tree structure. The first operation on the renamed * tree will create a new one. */ WT_TRET(__lsm_tree_discard(session, lsm_tree)); return (ret); }
/* * __log_server -- * The log server thread. */ static WT_THREAD_RET __log_server(void *arg) { struct timespec start, now; WT_CONNECTION_IMPL *conn; WT_DECL_RET; WT_LOG *log; WT_SESSION_IMPL *session; uint64_t timediff; bool did_work, locked, signalled; session = arg; conn = S2C(session); log = conn->log; locked = signalled = false; /* * Set this to the number of milliseconds we want to run archive and * pre-allocation. Start it so that we run on the first time through. */ timediff = WT_THOUSAND; /* * The log server thread does a variety of work. It forces out any * buffered log writes. It pre-allocates log files and it performs * log archiving. The reason the wrlsn thread does not force out * the buffered writes is because we want to process and move the * write_lsn forward as quickly as possible. The same reason applies * to why the log file server thread does not force out the writes. * That thread does fsync calls which can take a long time and we * don't want log records sitting in the buffer over the time it * takes to sync out an earlier file. */ did_work = true; while (F_ISSET(conn, WT_CONN_LOG_SERVER_RUN)) { /* * Slots depend on future activity. Force out buffered * writes in case we are idle. This cannot be part of the * wrlsn thread because of interaction advancing the write_lsn * and a buffer may need to wait for the write_lsn to advance * in the case of a synchronous buffer. We end up with a hang. */ WT_ERR_BUSY_OK(__wt_log_force_write(session, 0, &did_work)); /* * We don't want to archive or pre-allocate files as often as * we want to force out log buffers. Only do it once per second * or if the condition was signalled. */ if (timediff >= WT_THOUSAND || signalled) { /* * Perform log pre-allocation. */ if (conn->log_prealloc > 0) { /* * Log file pre-allocation is disabled when a * hot backup cursor is open because we have * agreed not to rename or remove any files in * the database directory. */ WT_ERR(__wt_readlock( session, conn->hot_backup_lock)); locked = true; if (!conn->hot_backup) WT_ERR(__log_prealloc_once(session)); WT_ERR(__wt_readunlock( session, conn->hot_backup_lock)); locked = false; } /* * Perform the archive. */ if (FLD_ISSET(conn->log_flags, WT_CONN_LOG_ARCHIVE)) { if (__wt_try_writelock( session, log->log_archive_lock) == 0) { ret = __log_archive_once(session, 0); WT_TRET(__wt_writeunlock( session, log->log_archive_lock)); WT_ERR(ret); } else WT_ERR( __wt_verbose(session, WT_VERB_LOG, "log_archive: Blocked due to open " "log cursor holding archive lock")); } } /* Wait until the next event. */ WT_ERR(__wt_epoch(session, &start)); WT_ERR(__wt_cond_auto_wait_signal(session, conn->log_cond, did_work, &signalled)); WT_ERR(__wt_epoch(session, &now)); timediff = WT_TIMEDIFF_MS(now, start); } if (0) { err: __wt_err(session, ret, "log server error"); if (locked) WT_TRET(__wt_readunlock( session, conn->hot_backup_lock)); } return (WT_THREAD_RET_VALUE); }
/* * __sweep -- * Close unused dhandles on the connection dhandle list. */ static int __sweep(WT_SESSION_IMPL *session) { WT_CONNECTION_IMPL *conn; WT_DATA_HANDLE *dhandle, *dhandle_next; WT_DECL_RET; time_t now; conn = S2C(session); /* * Session's cache handles unless the session itself is closed, at which * time the handle reference counts are immediately decremented. Don't * discard handles that have been open recently. */ WT_RET(__wt_seconds(session, &now)); dhandle = SLIST_FIRST(&conn->dhlh); for (; dhandle != NULL; dhandle = dhandle_next) { dhandle_next = SLIST_NEXT(dhandle, l); if (dhandle->session_ref != 0 || now - dhandle->timeofdeath <= WT_DHANDLE_SWEEP_WAIT) continue; /* * We have a candidate for closing; if it's open, acquire an * exclusive lock on the handle and close it (the lock blocks * threads from opening the handle). We might be blocking an * open for a fairly long time (over disk I/O), but the handle * has been quiescent for awhile. * * The close can fail if an update cannot be written (updates in * a no-longer-referenced file might not yet be globally visible * if sessions have disjoint sets of files open). If the handle * is busy, skip it, we'll retry the close the next time, after * the transaction state has progressed. */ if (F_ISSET(dhandle, WT_DHANDLE_OPEN)) { /* * We don't set WT_DHANDLE_EXCLUSIVE deliberately, we * want opens to block on us rather than returning an * EBUSY error to the application. */ ret = __wt_try_writelock(session, dhandle->rwlock); if (ret == EBUSY) { ret = 0; continue; } WT_RET(ret); WT_WITH_DHANDLE(session, dhandle, ret = __wt_conn_btree_sync_and_close(session)); if (ret == EBUSY) ret = 0; WT_TRET(__wt_rwunlock(session, dhandle->rwlock)); WT_RET(ret); } /* * Attempt to discard the handle (the called function checks the * handle-open flag after acquiring appropriate locks, which is * why we don't do any special handling of EBUSY returns above, * that path never cleared the handle-open flag. */ ret = __wt_conn_dhandle_discard_single(session, dhandle, 0); if (ret == EBUSY) ret = 0; WT_RET(ret); } return (0); }
/* * __wt_txn_update_oldest -- * Sweep the running transactions to update the oldest ID required. */ int __wt_txn_update_oldest(WT_SESSION_IMPL *session, uint32_t flags) { WT_CONNECTION_IMPL *conn; WT_DECL_RET; WT_SESSION_IMPL *oldest_session; WT_TXN_GLOBAL *txn_global; uint64_t current_id, last_running, oldest_id; uint64_t prev_last_running, prev_oldest_id; bool strict, wait; conn = S2C(session); txn_global = &conn->txn_global; strict = LF_ISSET(WT_TXN_OLDEST_STRICT); wait = LF_ISSET(WT_TXN_OLDEST_WAIT); current_id = last_running = txn_global->current; prev_last_running = txn_global->last_running; prev_oldest_id = txn_global->oldest_id; /* * For pure read-only workloads, or if the update isn't forced and the * oldest ID isn't too far behind, avoid scanning. */ if (prev_oldest_id == current_id || (!strict && WT_TXNID_LT(current_id, prev_oldest_id + 100))) return (0); /* First do a read-only scan. */ if (wait) __wt_readlock(session, txn_global->scan_rwlock); else if ((ret = __wt_try_readlock(session, txn_global->scan_rwlock)) != 0) return (ret == EBUSY ? 0 : ret); __txn_oldest_scan(session, &oldest_id, &last_running, &oldest_session); __wt_readunlock(session, txn_global->scan_rwlock); /* * If the state hasn't changed (or hasn't moved far enough for * non-forced updates), give up. */ if ((oldest_id == prev_oldest_id || (!strict && WT_TXNID_LT(oldest_id, prev_oldest_id + 100))) && ((last_running == prev_last_running) || (!strict && WT_TXNID_LT(last_running, prev_last_running + 100)))) return (0); /* It looks like an update is necessary, wait for exclusive access. */ if (wait) __wt_writelock(session, txn_global->scan_rwlock); else if ((ret = __wt_try_writelock(session, txn_global->scan_rwlock)) != 0) return (ret == EBUSY ? 0 : ret); /* * If the oldest ID has been updated while we waited, don't bother * scanning. */ if (WT_TXNID_LE(oldest_id, txn_global->oldest_id) && WT_TXNID_LE(last_running, txn_global->last_running)) goto done; /* * Re-scan now that we have exclusive access. This is necessary because * threads get transaction snapshots with read locks, and we have to be * sure that there isn't a thread that has got a snapshot locally but * not yet published its snap_min. */ __txn_oldest_scan(session, &oldest_id, &last_running, &oldest_session); #ifdef HAVE_DIAGNOSTIC { /* * Make sure the ID doesn't move past any named snapshots. * * Don't include the read/assignment in the assert statement. Coverity * complains if there are assignments only done in diagnostic builds, * and when the read is from a volatile. */ uint64_t id = txn_global->nsnap_oldest_id; WT_ASSERT(session, id == WT_TXN_NONE || !WT_TXNID_LT(id, oldest_id)); } #endif /* Update the oldest ID. */ if (WT_TXNID_LT(txn_global->oldest_id, oldest_id)) txn_global->oldest_id = oldest_id; if (WT_TXNID_LT(txn_global->last_running, last_running)) { txn_global->last_running = last_running; #ifdef HAVE_VERBOSE /* Output a verbose message about long-running transactions, * but only when some progress is being made. */ if (WT_VERBOSE_ISSET(session, WT_VERB_TRANSACTION) && current_id - oldest_id > 10000 && oldest_session != NULL) { __wt_verbose(session, WT_VERB_TRANSACTION, "old snapshot %" PRIu64 " pinned in session %" PRIu32 " [%s]" " with snap_min %" PRIu64 "\n", oldest_id, oldest_session->id, oldest_session->lastop, oldest_session->txn.snap_min); } #endif } done: __wt_writeunlock(session, txn_global->scan_rwlock); return (ret); }
/* * __sweep -- * Close unused dhandles on the connection dhandle list. */ static int __sweep(WT_SESSION_IMPL *session) { WT_CONNECTION_IMPL *conn; WT_DATA_HANDLE *dhandle, *dhandle_next; WT_DECL_RET; time_t now; int locked; conn = S2C(session); /* Don't discard handles that have been open recently. */ WT_RET(__wt_seconds(session, &now)); WT_STAT_FAST_CONN_INCR(session, dh_conn_sweeps); dhandle = SLIST_FIRST(&conn->dhlh); for (; dhandle != NULL; dhandle = dhandle_next) { dhandle_next = SLIST_NEXT(dhandle, l); if (WT_IS_METADATA(dhandle)) continue; if (dhandle->session_inuse != 0 || now <= dhandle->timeofdeath + WT_DHANDLE_SWEEP_WAIT) continue; if (dhandle->timeofdeath == 0) { dhandle->timeofdeath = now; WT_STAT_FAST_CONN_INCR(session, dh_conn_tod); continue; } /* * We have a candidate for closing; if it's open, acquire an * exclusive lock on the handle and close it. We might be * blocking opens for a long time (over disk I/O), but the * handle was quiescent for awhile. * * The close can fail if an update cannot be written (updates * in a no-longer-referenced file might not yet be globally * visible if sessions have disjoint sets of files open). If * the handle is busy, skip it, we'll retry the close the next * time, after the transaction state has progressed. * * We don't set WT_DHANDLE_EXCLUSIVE deliberately, we want * opens to block on us rather than returning an EBUSY error to * the application. */ if ((ret = __wt_try_writelock(session, dhandle->rwlock)) == EBUSY) continue; WT_RET(ret); locked = 1; /* If the handle is open, try to close it. */ if (F_ISSET(dhandle, WT_DHANDLE_OPEN)) { WT_WITH_DHANDLE(session, dhandle, ret = __wt_conn_btree_sync_and_close(session, 0)); if (ret != 0) goto unlock; /* We closed the btree handle, bump the statistic. */ WT_STAT_FAST_CONN_INCR(session, dh_conn_handles); } /* * If there are no longer any references to the handle in any * sessions, attempt to discard it. The called function * re-checks that the handle is not in use, which is why we * don't do any special handling of EBUSY returns above. */ if (dhandle->session_inuse == 0 && dhandle->session_ref == 0) { WT_WITH_DHANDLE(session, dhandle, ret = __wt_conn_dhandle_discard_single(session, 0)); if (ret != 0) goto unlock; /* If the handle was discarded, it isn't locked. */ locked = 0; } else WT_STAT_FAST_CONN_INCR(session, dh_conn_ref); unlock: if (locked) WT_TRET(__wt_writeunlock(session, dhandle->rwlock)); WT_RET_BUSY_OK(ret); } return (0); }
/* * __conn_dhandle_open_lock -- * Spin on the current data handle until either (a) it is open, read * locked; or (b) it is closed, write locked. If exclusive access is * requested and cannot be granted immediately because the handle is * in use, fail with EBUSY. * * Here is a brief summary of how different operations synchronize using * either the schema lock, handle locks or handle flags: * * open -- holds the schema lock, one thread gets the handle exclusive, * reverts to a shared handle lock and drops the schema lock * once the handle is open; * bulk load -- sets bulk and exclusive; * salvage, truncate, update, verify -- hold the schema lock, set a * "special" flag; * sweep -- gets a write lock on the handle, doesn't set exclusive * * The schema lock prevents a lot of potential conflicts: we should never * see handles being salvaged or verified because those operation hold the * schema lock. However, it is possible to see a handle that is being * bulk loaded, or that the sweep server is closing. * * The principle here is that application operations can cause other * application operations to fail (so attempting to open a cursor on a * file while it is being bulk-loaded will fail), but internal or * database-wide operations should not prevent application-initiated * operations. For example, attempting to verify a file should not fail * because the sweep server happens to be in the process of closing that * file. */ static int __conn_dhandle_open_lock( WT_SESSION_IMPL *session, WT_DATA_HANDLE *dhandle, uint32_t flags) { WT_BTREE *btree; WT_DECL_RET; int is_open, lock_busy, want_exclusive; btree = dhandle->handle; lock_busy = 0; want_exclusive = LF_ISSET(WT_DHANDLE_EXCLUSIVE) ? 1 : 0; /* * Check that the handle is open. We've already incremented * the reference count, so once the handle is open it won't be * closed by another thread. * * If we can see the WT_DHANDLE_OPEN flag set while holding a * lock on the handle, then it's really open and we can start * using it. Alternatively, if we can get an exclusive lock * and WT_DHANDLE_OPEN is still not set, we need to do the open. */ for (;;) { /* * If the handle is already open for a special operation, * give up. */ if (F_ISSET(btree, WT_BTREE_SPECIAL_FLAGS)) return (EBUSY); /* * If the handle is open, get a read lock and recheck. * * Wait for a read lock if we want exclusive access and failed * to get it: the sweep server may be closing this handle, and * we need to wait for it to complete. If we want exclusive * access and find the handle open once we get the read lock, * give up: some other thread has it locked for real. */ if (F_ISSET(dhandle, WT_DHANDLE_OPEN) && (!want_exclusive || lock_busy)) { WT_RET(__wt_readlock(session, dhandle->rwlock)); is_open = F_ISSET(dhandle, WT_DHANDLE_OPEN) ? 1 : 0; if (is_open && !want_exclusive) return (0); WT_RET(__wt_readunlock(session, dhandle->rwlock)); } else is_open = 0; /* * It isn't open or we want it exclusive: try to get an * exclusive lock. There is some subtlety here: if we race * with another thread that successfully opens the file, we * don't want to block waiting to get exclusive access. */ if ((ret = __wt_try_writelock(session, dhandle->rwlock)) == 0) { /* * If it was opened while we waited, drop the write * lock and get a read lock instead. */ if (F_ISSET(dhandle, WT_DHANDLE_OPEN) && !want_exclusive) { lock_busy = 0; WT_RET( __wt_writeunlock(session, dhandle->rwlock)); continue; } /* We have an exclusive lock, we're done. */ F_SET(dhandle, WT_DHANDLE_EXCLUSIVE); return (0); } else if (ret != EBUSY || (is_open && want_exclusive)) return (ret); else lock_busy = 1; /* Give other threads a chance to make progress. */ __wt_yield(); } }