示例#1
0
Network::TrainingData getDataSet(std::string const& path, size_t nbY, size_t maxPicsPerClass)
{
    std::cout << "Opening " << path << " dataset" << std::endl;
    DIR *datasetDir = opendir(path.c_str());
    if (!datasetDir) {
        std::cerr << "Error while opening dataset directory (" << path << "):" << strerror(errno) << std::endl;
        throw std::runtime_error("Error while loading datas");
    }

    Network::TrainingData datas;
    struct dirent *classEntry;
    while ((classEntry = readdir(datasetDir)))
    {
        if (std::string("..") == classEntry->d_name || std::string(".") == classEntry->d_name) {
//            std::cout << "Omitting " << classEntry->d_name << " directory" << std::endl;
            continue;
        }

        std::cout << classEntry->d_name << ": " << std::flush;
        DIR *classDir = opendir((path + "/" + classEntry->d_name).c_str());
        if (!classDir) {
            std::cerr << "Error while opening class directory (" << classEntry->d_name << "):" << strerror(errno) << std::endl;
            throw std::runtime_error("Error while loading datas");
        }
        int y = getYByClass(classEntry->d_name);
        if (y == -1) {
            std::cout << "Bad class name, skipping" << std::endl;
            continue;
        }
        std::cout << "class " << classEntry->d_name << ", y[" << y << "], " << std::flush;
        struct dirent *picEntry;
        size_t i = 0;
        while ((picEntry = readdir(classDir)))
        {
            if (std::string("..") == picEntry->d_name || std::string(".") == picEntry->d_name)
                continue;
            try {
                datas.emplace_back(getBatch((path + "/" + classEntry->d_name + "/" + picEntry->d_name).c_str(), y, nbY));
            } catch (std::exception const& e) {
                std::cout << e.what() << std::endl;
                continue;
            }
            ++i;
            if (i == maxPicsPerClass)
                break;
        }
        std::cout << i << " pictures opened" << std::endl;
        closedir(classDir);
    }
    return datas;
}
示例#2
0
bool Entity::checkTankCollision(cocos2d::Point pos){
    auto gameScene = GameScene::getCurrentGameScene();
    auto player = gameScene->getDefaultPlayer();
    auto enemyCache = gameScene->getEnemyCache();
    for (auto enemy : enemyCache->getBatch()->getChildren()) {
        if (enemy->isVisible() && this != enemy) {
            Vec2 point = enemy->getPosition();
            Rect rect = Rect(point.x - 12, point.y - 12, 24, 24);
            if (rect.containsPoint(pos)) {
                return true;
            }
        }
    }
    if (player->isVisible() && this != player) {
        Vec2 point = player->getPosition();
        Rect rect = Rect(point.x - 12, point.y - 12, 24, 24);
        if (rect.containsPoint(pos)) {
            return true;
        }
    }
    return false;
}
示例#3
0
void AsyncResultsMerger::handleBatchResponse(
    const executor::TaskExecutor::RemoteCommandCallbackArgs& cbData, size_t remoteIndex) {
    stdx::lock_guard<stdx::mutex> lk(_mutex);

    auto& remote = _remotes[remoteIndex];

    // Clear the callback handle. This indicates that we are no longer waiting on a response from
    // 'remote'.
    remote.cbHandle = executor::TaskExecutor::CallbackHandle();

    // If we're in the process of shutting down then there's no need to process the batch.
    if (_lifecycleState != kAlive) {
        invariant(_lifecycleState == kKillStarted);

        // Make sure to wake up anyone waiting on '_currentEvent' if we're shutting down.
        signalCurrentEventIfReady_inlock();

        // Make a best effort to parse the response and retrieve the cursor id. We need the cursor
        // id in order to issue a killCursors command against it.
        if (cbData.response.isOK()) {
            auto cursorResponse = parseCursorResponse(cbData.response.getValue().data, remote);
            if (cursorResponse.isOK()) {
                remote.cursorId = cursorResponse.getValue().getCursorId();
            }
        }

        // If we're killed and we're not waiting on any more batches to come back, then we are ready
        // to kill the cursors on the remote hosts and clean up this cursor. Schedule the
        // killCursors command and signal that this cursor is safe now safe to destroy. We have to
        // promise not to touch any members of this class because 'this' could become invalid as
        // soon as we signal the event.
        if (!haveOutstandingBatchRequests_inlock()) {
            // If the event handle is invalid, then the executor is in the middle of shutting down,
            // and we can't schedule any more work for it to complete.
            if (_killCursorsScheduledEvent.isValid()) {
                scheduleKillCursors_inlock();
                _executor->signalEvent(_killCursorsScheduledEvent);
            }

            _lifecycleState = kKillComplete;
        }

        return;
    }

    // Early return from this point on signal anyone waiting on an event, if ready() is true.
    ScopeGuard signaller = MakeGuard(&AsyncResultsMerger::signalCurrentEventIfReady_inlock, this);

    StatusWith<CursorResponse> cursorResponseStatus(
        cbData.response.isOK() ? parseCursorResponse(cbData.response.getValue().data, remote)
                               : cbData.response.getStatus());

    if (!cursorResponseStatus.isOK()) {
        auto shard = remote.getShard();
        if (!shard) {
            remote.status = Status(cursorResponseStatus.getStatus().code(),
                                   str::stream() << "Could not find shard " << *remote.shardId
                                                 << " containing host "
                                                 << remote.getTargetHost().toString());
        } else {
            shard->updateReplSetMonitor(remote.getTargetHost(), cursorResponseStatus.getStatus());

            // Retry initial cursor establishment if possible.  Never retry getMores to avoid
            // accidentally skipping results.
            if (!remote.cursorId && remote.retryCount < kMaxNumFailedHostRetryAttempts &&
                shard->isRetriableError(cursorResponseStatus.getStatus().code(),
                                        Shard::RetryPolicy::kIdempotent)) {
                invariant(remote.shardId);
                LOG(1) << "Initial cursor establishment failed with retriable error and will be "
                          "retried"
                       << causedBy(redact(cursorResponseStatus.getStatus()));

                ++remote.retryCount;

                // Since we potentially updated the targeter that the last host it chose might be
                // faulty, the call below may end up getting a different host.
                remote.status = askForNextBatch_inlock(remoteIndex);
                if (remote.status.isOK()) {
                    return;
                }

                // If we end up here, it means we failed to schedule the retry request, which is a
                // more
                // severe error that should not be retried. Just pass through to the error handling
                // logic below.
            } else {
                remote.status = cursorResponseStatus.getStatus();
            }
        }

        // Unreachable host errors are swallowed if the 'allowPartialResults' option is set. We
        // remove the unreachable host entirely from consideration by marking it as exhausted.
        if (_params.isAllowPartialResults) {
            remote.status = Status::OK();

            // Clear the results buffer and cursor id.
            std::queue<BSONObj> emptyBuffer;
            std::swap(remote.docBuffer, emptyBuffer);
            remote.cursorId = 0;
        }

        return;
    }

    // Cursor id successfully established.
    auto cursorResponse = std::move(cursorResponseStatus.getValue());
    remote.cursorId = cursorResponse.getCursorId();
    remote.initialCmdObj = boost::none;

    for (const auto& obj : cursorResponse.getBatch()) {
        // If there's a sort, we're expecting the remote node to give us back a sort key.
        if (!_params.sort.isEmpty() &&
            obj[ClusterClientCursorParams::kSortKeyField].type() != BSONType::Object) {
            remote.status = Status(ErrorCodes::InternalError,
                                   str::stream() << "Missing field '"
                                                 << ClusterClientCursorParams::kSortKeyField
                                                 << "' in document: "
                                                 << obj);
            return;
        }

        remote.docBuffer.push(obj);
        ++remote.fetchedCount;
    }

    // If we're doing a sorted merge, then we have to make sure to put this remote onto the
    // merge queue.
    if (!_params.sort.isEmpty() && !cursorResponse.getBatch().empty()) {
        _mergeQueue.push(remoteIndex);
    }

    // If the cursor is tailable and we just received an empty batch, the next return value should
    // be boost::none in order to indicate the end of the batch.
    if (_params.isTailable && !remote.hasNext()) {
        _eofNext = true;
    }

    // If even after receiving this batch we still don't have anything buffered (i.e. the batchSize
    // was zero), then can schedule work to retrieve the next batch right away.
    //
    // We do not ask for the next batch if the cursor is tailable, as batches received from remote
    // tailable cursors should be passed through to the client without asking for more batches.
    if (!_params.isTailable && !remote.hasNext() && !remote.exhausted()) {
        remote.status = askForNextBatch_inlock(remoteIndex);
        if (!remote.status.isOK()) {
            return;
        }
    }

    // ScopeGuard requires dismiss on success, but we want waiter to be signalled on success as
    // well as failure.
    signaller.Dismiss();
    signalCurrentEventIfReady_inlock();
}
示例#4
0
bool CurrentOpCommandBase::run(OperationContext* opCtx,
                               const std::string& dbName,
                               const BSONObj& cmdObj,
                               BSONObjBuilder& result) {
    // Convert the currentOp command spec into an equivalent aggregation command. This will be
    // of the form {aggregate:1, pipeline: [{$currentOp: {idleConnections: $all, allUsers:
    // !$ownOps, truncateOps: true}}, {$match: {<filter>}}, {$group: {_id: null, inprog: {$push:
    // "$$ROOT"}}}], cursor:{}}
    std::vector<BSONObj> pipeline;

    // {$currentOp: {idleConnections: $all, allUsers: !$ownOps, truncateOps: true}}
    BSONObjBuilder currentOpBuilder;
    BSONObjBuilder currentOpSpecBuilder(currentOpBuilder.subobjStart("$currentOp"));

    // If test commands are enabled, then we allow the currentOp commands to specify whether or not
    // to truncate long operations via the '$truncateOps' parameter. Otherwise, we always truncate
    // operations to match the behaviour of the legacy currentOp command.
    const bool truncateOps =
        !getTestCommandsEnabled() || !cmdObj[kTruncateOps] || cmdObj[kTruncateOps].trueValue();

    currentOpSpecBuilder.append("idleConnections", cmdObj[kAll].trueValue());
    currentOpSpecBuilder.append("allUsers", !cmdObj[kOwnOps].trueValue());
    currentOpSpecBuilder.append("truncateOps", truncateOps);
    currentOpSpecBuilder.doneFast();

    pipeline.push_back(currentOpBuilder.obj());

    // {$match: {<user-defined filter>}}
    BSONObjBuilder matchBuilder;
    BSONObjBuilder matchSpecBuilder(matchBuilder.subobjStart("$match"));

    size_t idx = 0;
    for (const auto& elt : cmdObj) {
        const auto fieldName = elt.fieldNameStringData();

        if (0 == idx++ || kCurOpCmdParams.count(fieldName) || isGenericArgument(fieldName)) {
            continue;
        }

        matchSpecBuilder.append(elt);
    }

    matchSpecBuilder.doneFast();

    pipeline.push_back(matchBuilder.obj());

    // Perform any required modifications to the pipeline before adding the final $group stage.
    modifyPipeline(&pipeline);

    // {$group: {_id: null, inprog: {$push: "$$ROOT"}}}
    BSONObjBuilder groupBuilder;
    BSONObjBuilder groupSpecBuilder(groupBuilder.subobjStart("$group"));

    groupSpecBuilder.append("_id", 0);

    BSONObjBuilder inprogSpecBuilder(groupSpecBuilder.subobjStart("inprog"));

    inprogSpecBuilder.append("$push", "$$ROOT");

    inprogSpecBuilder.doneFast();
    groupSpecBuilder.doneFast();

    pipeline.push_back(groupBuilder.obj());

    // Pipeline is complete; create an AggregationRequest for $currentOp.
    const AggregationRequest request(NamespaceString::makeCollectionlessAggregateNSS("admin"),
                                     std::move(pipeline));

    // Run the pipeline and obtain a CursorResponse.
    auto aggResults = uassertStatusOK(runAggregation(opCtx, request));

    if (aggResults.getBatch().empty()) {
        result.append("inprog", BSONArray());
    } else {
        result.append(aggResults.getBatch().front()["inprog"]);
    }

    // Make any final custom additions to the response object.
    appendToResponse(&result);

    return true;
}
示例#5
0
void AsyncResultsMerger::handleBatchResponse(
    const executor::TaskExecutor::RemoteCommandCallbackArgs& cbData,
    OperationContext* opCtx,
    size_t remoteIndex) {
    stdx::lock_guard<stdx::mutex> lk(_mutex);

    auto& remote = _remotes[remoteIndex];

    // Clear the callback handle. This indicates that we are no longer waiting on a response from
    // 'remote'.
    remote.cbHandle = executor::TaskExecutor::CallbackHandle();

    // If we're in the process of shutting down then there's no need to process the batch.
    if (_lifecycleState != kAlive) {
        invariant(_lifecycleState == kKillStarted);

        // Make sure to wake up anyone waiting on '_currentEvent' if we're shutting down.
        signalCurrentEventIfReady_inlock();

        // If we're killed and we're not waiting on any more batches to come back, then we are ready
        // to kill the cursors on the remote hosts and clean up this cursor. Schedule the
        // killCursors command and signal that this cursor is safe now safe to destroy. We have to
        // promise not to touch any members of this class because 'this' could become invalid as
        // soon as we signal the event.
        if (!haveOutstandingBatchRequests_inlock()) {
            // If the event handle is invalid, then the executor is in the middle of shutting down,
            // and we can't schedule any more work for it to complete.
            if (_killCursorsScheduledEvent.isValid()) {
                scheduleKillCursors_inlock(opCtx);
                _executor->signalEvent(_killCursorsScheduledEvent);
            }

            _lifecycleState = kKillComplete;
        }

        return;
    }

    // Early return from this point on signal anyone waiting on an event, if ready() is true.
    ScopeGuard signaller = MakeGuard(&AsyncResultsMerger::signalCurrentEventIfReady_inlock, this);

    StatusWith<CursorResponse> cursorResponseStatus(
        cbData.response.isOK() ? parseCursorResponse(cbData.response.data, remote)
                               : cbData.response.status);

    if (!cursorResponseStatus.isOK()) {
        auto shard = remote.getShard();
        if (!shard) {
            remote.status = Status(cursorResponseStatus.getStatus().code(),
                                   str::stream() << "Could not find shard containing host "
                                                 << remote.getTargetHost().toString());
        } else {
            shard->updateReplSetMonitor(remote.getTargetHost(), cursorResponseStatus.getStatus());
            remote.status = cursorResponseStatus.getStatus();
        }

        // Unreachable host errors are swallowed if the 'allowPartialResults' option is set. We
        // remove the unreachable host entirely from consideration by marking it as exhausted.
        if (_params->isAllowPartialResults) {
            remote.status = Status::OK();

            // Clear the results buffer and cursor id.
            std::queue<ClusterQueryResult> emptyBuffer;
            std::swap(remote.docBuffer, emptyBuffer);
            remote.cursorId = 0;
        }

        return;
    }

    // Response successfully received.

    auto cursorResponse = std::move(cursorResponseStatus.getValue());

    // Update the cursorId; it is sent as '0' when the cursor has been exhausted on the shard.
    remote.cursorId = cursorResponse.getCursorId();

    // Save the batch in the remote's buffer.
    if (!addBatchToBuffer(remoteIndex, cursorResponse.getBatch())) {
        return;
    }

    // If the cursor is tailable and we just received an empty batch, the next return value should
    // be boost::none in order to indicate the end of the batch.
    // (Note: tailable cursors are only valid on unsharded collections, so the end of the batch from
    // one shard means the end of the overall batch).
    if (_params->isTailable && !remote.hasNext()) {
        _eofNext = true;
    }

    // If even after receiving this batch we still don't have anything buffered (i.e. the batchSize
    // was zero), then can schedule work to retrieve the next batch right away.
    //
    // We do not ask for the next batch if the cursor is tailable, as batches received from remote
    // tailable cursors should be passed through to the client without asking for more batches.
    if (!_params->isTailable && !remote.hasNext() && !remote.exhausted()) {
        remote.status = askForNextBatch_inlock(opCtx, remoteIndex);
        if (!remote.status.isOK()) {
            return;
        }
    }

    // ScopeGuard requires dismiss on success, but we want waiter to be signalled on success as
    // well as failure.
    signaller.Dismiss();
    signalCurrentEventIfReady_inlock();
}