Пример #1
0
int move_range(const std::string &min_key, const std::string &max_key, int limit, std::string *moved_max_key){
	// get key range
	std::vector<std::string> keys;
	ssdb::Status s;
	s = src->keys(min_key, max_key, limit, &keys);
	if(!s.ok()){
		log_error("response error: %s", s.code().c_str());
		return -1;
	}
	if(keys.empty()){
		return 0;
	}
	if(moved_max_key){
		*moved_max_key = keys[keys.size() - 1];

		// lock key range
		log_info("lock range %s", KeyRange(min_key, *moved_max_key).str().c_str());
		const std::vector<std::string>* resp;
		resp = src->request("set_kv_range", *moved_max_key, max_key);
		if(!resp || resp->empty() || resp->at(0) != "ok"){
			log_error("src server set_kv_range error!");
			return -1;
		}
	}

	// move key range
	for(int i=0; i<(int)keys.size(); i++){
		const std::string &key = keys[i];
		if(move_key(key) == -1){
			log_fatal("move key %s error! %s", key.c_str(), s.code().c_str());
			exit(1);   
		}
	}
	
	return (int)keys.size();
}
Пример #2
0
    /**
     * Outline of the delete process:
     * 1. Initialize the client for this thread if there is no client. This is for the worker
     *    threads that are attached to any of the threads servicing client requests.
     * 2. Grant this thread authorization to perform deletes.
     * 3. Temporarily enable mode to bypass shard version checks. TODO: Replace this hack.
     * 4. Setup callback to save deletes to moveChunk directory (only if moveParanoia is true).
     * 5. Delete range.
     * 6. Wait until the majority of the secondaries catch up.
     */
    bool RangeDeleterDBEnv::deleteRange(OperationContext* txn,
                                        const RangeDeleteEntry& taskDetails,
                                        long long int* deletedDocs,
                                        std::string* errMsg) {
        const string ns(taskDetails.options.range.ns);
        const BSONObj inclusiveLower(taskDetails.options.range.minKey);
        const BSONObj exclusiveUpper(taskDetails.options.range.maxKey);
        const BSONObj keyPattern(taskDetails.options.range.keyPattern);
        const WriteConcernOptions writeConcern(taskDetails.options.writeConcern);
        const bool fromMigrate = taskDetails.options.fromMigrate;
        const bool onlyRemoveOrphans = taskDetails.options.onlyRemoveOrphanedDocs;

        const bool initiallyHaveClient = haveClient();

        if (!initiallyHaveClient) {
            Client::initThread("RangeDeleter");
        }

        *deletedDocs = 0;
        ShardForceVersionOkModeBlock forceVersion;
        {
            Helpers::RemoveSaver removeSaver("moveChunk",
                                             ns,
                                             taskDetails.options.removeSaverReason);
            Helpers::RemoveSaver* removeSaverPtr = NULL;
            if (serverGlobalParams.moveParanoia &&
                    !taskDetails.options.removeSaverReason.empty()) {
                removeSaverPtr = &removeSaver;
            }

            // log the opId so the user can use it to cancel the delete using killOp.
            unsigned int opId = txn->getCurOp()->opNum();
            log() << "Deleter starting delete for: " << ns
                  << " from " << inclusiveLower
                  << " -> " << exclusiveUpper
                  << ", with opId: " << opId
                  << endl;

            try {
                *deletedDocs =
                        Helpers::removeRange(txn,
                                             KeyRange(ns,
                                                      inclusiveLower,
                                                      exclusiveUpper,
                                                      keyPattern),
                                             false, /*maxInclusive*/
                                             writeConcern,
                                             removeSaverPtr,
                                             fromMigrate,
                                             onlyRemoveOrphans);

                if (*deletedDocs < 0) {
                    *errMsg = "collection or index dropped before data could be cleaned";
                    warning() << *errMsg << endl;

                    if (!initiallyHaveClient) {
                        txn->getClient()->shutdown();
                    }

                    return false;
                }

                log() << "rangeDeleter deleted " << *deletedDocs
                      << " documents for " << ns
                      << " from " << inclusiveLower
                      << " -> " << exclusiveUpper
                      << endl;
            }
            catch (const DBException& ex) {
                *errMsg = str::stream() << "Error encountered while deleting range: "
                                        << "ns" << ns
                                        << " from " << inclusiveLower
                                        << " -> " << exclusiveUpper
                                        << ", cause by:" << causedBy(ex);

                if (!initiallyHaveClient) {
                    txn->getClient()->shutdown();
                }

                return false;
            }
        }

        if (!initiallyHaveClient) {
            txn->getClient()->shutdown();
        }

        return true;
    }
Пример #3
0
    /**
     * Outline of the delete process:
     * 1. Initialize the client for this thread if there is no client. This is for the worker
     *    threads that are attached to any of the threads servicing client requests.
     * 2. Grant this thread authorization to perform deletes.
     * 3. Temporarily enable mode to bypass shard version checks. TODO: Replace this hack.
     * 4. Setup callback to save deletes to moveChunk directory (only if moveParanoia is true).
     * 5. Delete range.
     * 6. Wait until the majority of the secondaries catch up.
     */
    bool RangeDeleterDBEnv::deleteRange(const StringData& ns,
                                        const BSONObj& inclusiveLower,
                                        const BSONObj& exclusiveUpper,
                                        const BSONObj& keyPattern,
                                        bool secondaryThrottle,
                                        std::string* errMsg) {
        const bool initiallyHaveClient = haveClient();

        if (!initiallyHaveClient) {
            Client::initThread("RangeDeleter");
        }

        ShardForceVersionOkModeBlock forceVersion;
        {
            Helpers::RemoveSaver removeSaver("moveChunk", ns.toString(), "post-cleanup");

            // log the opId so the user can use it to cancel the delete using killOp.
            unsigned int opId = cc().curop()->opNum();
            log() << "Deleter starting delete for: " << ns
                  << " from " << inclusiveLower
                  << " -> " << exclusiveUpper
                  << ", with opId: " << opId
                  << endl;

            try {
                long long numDeleted =
                        Helpers::removeRange(KeyRange(ns.toString(),
                                                      inclusiveLower,
                                                      exclusiveUpper,
                                                      keyPattern),
                                             false, /*maxInclusive*/
                                             replSet? secondaryThrottle : false,
                                             serverGlobalParams.moveParanoia ? &removeSaver : NULL,
                                             true, /*fromMigrate*/
                                             true); /*onlyRemoveOrphans*/

                if (numDeleted < 0) {
                    warning() << "collection or index dropped "
                              << "before data could be cleaned" << endl;

                    if (!initiallyHaveClient) {
                        cc().shutdown();
                    }

                    return false;
                }

                log() << "rangeDeleter deleted " << numDeleted
                      << " documents for " << ns
                      << " from " << inclusiveLower
                      << " -> " << exclusiveUpper
                      << endl;
            }
            catch (const DBException& ex) {
                *errMsg = str::stream() << "Error encountered while deleting range: "
                                        << "ns" << ns
                                        << " from " << inclusiveLower
                                        << " -> " << exclusiveUpper
                                        << ", cause by:" << causedBy(ex);

                if (!initiallyHaveClient) {
                    cc().shutdown();
                }

                return false;
            }
        }

        if (replSet) {
            Timer elapsedTime;
            ReplTime lastOpApplied = cc().getLastOp().asDate();
            while (!opReplicatedEnough(lastOpApplied,
                                       BSON("w" << "majority").firstElement())) {
                if (elapsedTime.seconds() >= 3600) {
                    *errMsg = str::stream() << "moveChunk repl sync timed out after "
                                            << elapsedTime.seconds() << " seconds";

                    if (!initiallyHaveClient) {
                        cc().shutdown();
                    }

                    return false;
                }

                sleepsecs(1);
            }

            LOG(elapsedTime.seconds() < 30 ? 1 : 0)
                << "moveChunk repl sync took "
                << elapsedTime.seconds() << " seconds" << endl;
        }

        if (!initiallyHaveClient) {
            cc().shutdown();
        }

        return true;
    }