Пример #1
0
Status WiredTigerRecordStore::truncate(OperationContext* txn) {
    WiredTigerCursor startWrap(_uri, _tableId, true, txn);
    WT_CURSOR* start = startWrap.get();
    int ret = WT_OP_CHECK(start->next(start));
    // Empty collections don't have anything to truncate.
    if (ret == WT_NOTFOUND) {
        return Status::OK();
    }
    invariantWTOK(ret);

    WT_SESSION* session = WiredTigerRecoveryUnit::get(txn)->getSession(txn)->getSession();
    invariantWTOK(WT_OP_CHECK(session->truncate(session, NULL, start, NULL, NULL)));
    _changeNumRecords(txn, -numRecords(txn));
    _increaseDataSize(txn, -dataSize(txn));

    return Status::OK();
}
Пример #2
0
int64_t WiredTigerRecordStore::cappedDeleteAsNeeded_inlock(OperationContext* txn,
                                                           const RecordId& justInserted) {
    // we do this is a side transaction in case it aborts
    WiredTigerRecoveryUnit* realRecoveryUnit =
        checked_cast<WiredTigerRecoveryUnit*>(txn->releaseRecoveryUnit());
    invariant(realRecoveryUnit);
    WiredTigerSessionCache* sc = realRecoveryUnit->getSessionCache();
    OperationContext::RecoveryUnitState const realRUstate =
        txn->setRecoveryUnit(new WiredTigerRecoveryUnit(sc), OperationContext::kNotInUnitOfWork);

    WiredTigerRecoveryUnit::get(txn)->markNoTicketRequired();  // realRecoveryUnit already has
    WT_SESSION* session = WiredTigerRecoveryUnit::get(txn)->getSession(txn)->getSession();

    int64_t dataSize = _dataSize.load();
    int64_t numRecords = _numRecords.load();

    int64_t sizeOverCap = (dataSize > _cappedMaxSize) ? dataSize - _cappedMaxSize : 0;
    int64_t sizeSaved = 0;
    int64_t docsOverCap = 0, docsRemoved = 0;
    if (_cappedMaxDocs != -1 && numRecords > _cappedMaxDocs)
        docsOverCap = numRecords - _cappedMaxDocs;

    try {
        WriteUnitOfWork wuow(txn);

        WiredTigerCursor curwrap(_uri, _tableId, true, txn);
        WT_CURSOR* c = curwrap.get();
        RecordId newestOld;
        int ret = 0;
        while ((sizeSaved < sizeOverCap || docsRemoved < docsOverCap) && (docsRemoved < 20000) &&
               (ret = WT_OP_CHECK(c->next(c))) == 0) {
            int64_t key;
            ret = c->get_key(c, &key);
            invariantWTOK(ret);

            // don't go past the record we just inserted
            newestOld = _fromKey(key);
            if (newestOld >= justInserted)  // TODO: use oldest uncommitted instead
                break;

            if (_shuttingDown)
                break;

            WT_ITEM old_value;
            invariantWTOK(c->get_value(c, &old_value));

            ++docsRemoved;
            sizeSaved += old_value.size;

            if (_cappedDeleteCallback) {
                uassertStatusOK(_cappedDeleteCallback->aboutToDeleteCapped(
                    txn,
                    newestOld,
                    RecordData(static_cast<const char*>(old_value.data), old_value.size)));
            }
        }

        if (ret != WT_NOTFOUND) {
            invariantWTOK(ret);
        }

        if (docsRemoved > 0) {
            // if we scanned to the end of the collection or past our insert, go back one
            if (ret == WT_NOTFOUND || newestOld >= justInserted) {
                ret = WT_OP_CHECK(c->prev(c));
            }
            invariantWTOK(ret);

            WiredTigerCursor startWrap(_uri, _tableId, true, txn);
            WT_CURSOR* start = startWrap.get();
            ret = WT_OP_CHECK(start->next(start));
            invariantWTOK(ret);

            ret = session->truncate(session, NULL, start, c, NULL);
            if (ret == ENOENT || ret == WT_NOTFOUND) {
                // TODO we should remove this case once SERVER-17141 is resolved
                log() << "Soft failure truncating capped collection. Will try again later.";
                docsRemoved = 0;
            } else {
                invariantWTOK(ret);
                _changeNumRecords(txn, -docsRemoved);
                _increaseDataSize(txn, -sizeSaved);
                wuow.commit();
            }
        }
    } catch (const WriteConflictException& wce) {
        delete txn->releaseRecoveryUnit();
        txn->setRecoveryUnit(realRecoveryUnit, realRUstate);
        log() << "got conflict truncating capped, ignoring";
        return 0;
    } catch (...) {
        delete txn->releaseRecoveryUnit();
        txn->setRecoveryUnit(realRecoveryUnit, realRUstate);
        throw;
    }

    delete txn->releaseRecoveryUnit();
    txn->setRecoveryUnit(realRecoveryUnit, realRUstate);
    return docsRemoved;
}
Пример #3
0
/*
 * backup --
 *	Periodically do a backup and verify it.
 */
void *
backup(void *arg)
{
	WT_CONNECTION *conn;
	WT_CURSOR *backup_cursor;
	WT_DECL_RET;
	WT_SESSION *session;
	u_int incremental, period;
	bool full;
	const char *config, *key;

	(void)(arg);

	conn = g.wts_conn;

	/* Backups aren't supported for non-standard data sources. */
	if (DATASOURCE("helium") || DATASOURCE("kvsbdb"))
		return (NULL);

	/* Open a session. */
	testutil_check(conn->open_session(conn, NULL, NULL, &session));

	/*
	 * Perform a full backup at somewhere under 10 seconds (that way there's
	 * at least one), then at larger intervals, optionally do incremental
	 * backups between full backups.
	 */
	incremental = 0;
	for (period = mmrand(NULL, 1, 10);; period = mmrand(NULL, 20, 45)) {
		/* Sleep for short periods so we don't make the run wait. */
		while (period > 0 && !g.workers_finished) {
			--period;
			sleep(1);
		}

		/*
		 * We can't drop named checkpoints while there's a backup in
		 * progress, serialize backups with named checkpoints. Wait
		 * for the checkpoint to complete, otherwise backups might be
		 * starved out.
		 */
		testutil_check(pthread_rwlock_wrlock(&g.backup_lock));
		if (g.workers_finished) {
			testutil_check(pthread_rwlock_unlock(&g.backup_lock));
			break;
		}

		if (incremental) {
			config = "target=(\"log:\")";
			full = false;
		} else {
			/* Re-create the backup directory. */
			testutil_checkfmt(
			    system(g.home_backup_init),
			    "%s", "backup directory creation failed");

			config = NULL;
			full = true;
		}

		/*
		 * open_cursor can return EBUSY if concurrent with a metadata
		 * operation, retry in that case.
		 */
		while ((ret = session->open_cursor(
		    session, "backup:", NULL, config, &backup_cursor)) == EBUSY)
			__wt_yield();
		if (ret != 0)
			testutil_die(ret, "session.open_cursor: backup");

		while ((ret = backup_cursor->next(backup_cursor)) == 0) {
			testutil_check(
			    backup_cursor->get_key(backup_cursor, &key));
			copy_file(session, key);
		}
		if (ret != WT_NOTFOUND)
			testutil_die(ret, "backup-cursor");

		/* After an incremental backup, truncate the log files. */
		if (incremental)
			testutil_check(session->truncate(
			    session, "log:", backup_cursor, NULL, NULL));

		testutil_check(backup_cursor->close(backup_cursor));
		testutil_check(pthread_rwlock_unlock(&g.backup_lock));

		/*
		 * If automatic log archival isn't configured, optionally do
		 * incremental backups after each full backup. If we're not
		 * doing any more incrementals, verify the backup (we can't
		 * verify intermediate states, once we perform recovery on the
		 * backup database, we can't do any more incremental backups).
		 */
		if (full)
			incremental =
			    g.c_logging_archive ? 1 : mmrand(NULL, 1, 5);
		if (--incremental == 0)
			check_copy();
	}

	if (incremental != 0)
		check_copy();

	testutil_check(session->close(session, NULL));

	return (NULL);
}