TNodesSet nodesListToSet(const TNodesList& list) {
    TNodesSet ret;

    for (auto i = list.begin(); i != list.end(); i++)
        ret.insert(*i);

    return ret;
}
TGridClientNodePtr GridClientPartitionAffinity::findNode(const GridUuid& id, const TNodesSet& nodes) const {
    TNodesSet::const_iterator iter = find_if(nodes.begin(), nodes.end(), [&id] (const GridClientNode& node) {
        return node.getNodeId() == id;
    });

    assert(iter != nodes.end());

    return TGridClientNodePtr(new GridClientNode(*iter));
}
TNodesSet buildNodeSet(const char* ids[], int cnt) {
    TNodesSet nodes;

    for (int i = 0; i < cnt; ++i) {
        GridClientNode node;
        GridClientNodeMarshallerHelper helper(node);

        helper.setNodeId(GridClientUuid(nodeUuids[i]));

        nodes.insert(node);
    }

    return nodes;
}
TGridClientNodeList GridClientComputeProjectionImpl::nodes(std::function<bool(const GridClientNode&)> filter) const {
    TGridClientNodeList nodes;

    TNodesSet ns;

    subProjectionNodes(ns);

    for (auto it = ns.begin(); it != ns.end(); ++it) {
        GridClientNode* gcn = new GridClientNode(*it);
        TGridClientNodePtr p = TGridClientNodePtr(gcn);

        if (filter(*p))
            nodes.push_back(p);
    }

    return nodes;
}
/**
 * Gets nodes that passes the filter. If this compute instance is a projection, then only
 * nodes that passes projection criteria will be passed to the filter.
 *
 * @param filter Node filter.
 * @return Collection of nodes that satisfy provided filter.
 */
TGridClientNodeList GridClientComputeProjectionImpl::nodes(const std::vector<GridUuid>& ids) const {
    TGridClientNodeList nodes;

    std::set<GridUuid> nodeIds(ids.begin(), ids.end());

    TNodesSet ns;

    subProjectionNodes(ns);

    for (auto it = ns.begin(); it != ns.end(); ++it) {
        if (nodeIds.find(it->getNodeId()) == nodeIds.end())
            continue;

        nodes.push_back(TGridClientNodePtr(new GridClientNode(*it)));
    }

    return nodes;
}
TGridClientNodePtr GridClientPartitionAffinity::getNode(const TNodesSet& nodes, const GridHasheableObject& key) {
    set<GridUuid> newNodes;

    for (auto iter = nodes.begin(); iter != nodes.end(); ++iter)
        newNodes.insert(iter->getNodeId());

    GridUuid nodeId;

    {
        boost::lock_guard<boost::mutex> lock(mux);

        if (addedNodes != newNodes) {
            // Recreate the consistent hash ring.
            addedNodes.clear();
            nodeHash.clear();

            for (auto iter = nodes.begin(); iter != nodes.end(); ++iter) {
                GridClientNode n = *iter;

                add(n, getReplicas(n));
            }
        }

        int32_t part = abs(key.hashCode() % parts);

        std::set<NodeInfo> nInfos;

        for (TNodesSet::const_iterator i = nodes.begin(); i != nodes.end(); i++)
            nInfos.insert(NodeInfo(i->getNodeId(),
                std::shared_ptr<GridHasheableObject>(new GridClientVariantHasheableObject(hashIdResolver(*i)))));

        nodeId = nodeHash.node(GridInt32Hasheable(part), nInfos).id();
    }

    return findNode(nodeId, nodes);
}
Exemple #7
0
void GridClientImpl::fireTopologyEvents(const TNodesSet& updNodes, const TNodesSet& prevNodes) {
    ClientNodeComparator nodeComp;

    TNodesSet newNodes;

    // Calculate new nodes.
    std::set_difference(updNodes.begin(), updNodes.end(), prevNodes.begin(), prevNodes.end(),
            std::inserter(newNodes, newNodes.begin()), nodeComp);

    // File onNodeAdded() events.
    for (auto i = newNodes.begin(); i != newNodes.end(); i++)
        fireNodeAdded(*i);

    TNodesSet leftNodes;

    // Calculate left nodes.
    std::set_difference(prevNodes.begin(), prevNodes.end(), updNodes.begin(), updNodes.end(),
            std::inserter(leftNodes, leftNodes.begin()), nodeComp);

    // Fire onNodeRemoved() events.
    for (auto i = leftNodes.begin(); i != leftNodes.end(); i++)
        fireNodeRemoved(*i);
}
Exemple #8
0
void GridClientImpl::refreshTopology() {
    const GridClientConfiguration& clientCfg = sharedData->clientConfiguration();
    TGridClientSocketAddressList addrs = clientCfg.routers().size() > 0
            ? clientCfg.routers()
            : clientCfg.servers();

    if (addrs.empty()) {
        GG_LOG_DEBUG0("Skipping topology refresh (address list is empty).");

        return;
    }

    TGridClientCommandExecutorPtr exec = sharedData->executor();
    bool updated = false;

    GG_LOG_DEBUG0("Started refreshing the topology.");

    GridClientException last;

    for (auto it = addrs.begin(); !updated && it != addrs.end(); ++it) {
        try {
            GG_LOG_DEBUG("Refresh address: %s", it->host().c_str());

            GridTopologyRequestCommand topRqst;
            GridClientMessageTopologyResult rslt;
            TNodesSet nodes;

            // Fills the command by the default value.
            topRqst.setIncludeAttributes(false);
            topRqst.setIncludeMetrics(false);
            topRqst.setClientId(id().uuid());

            topRqst.setRequestId(topRqst.generateNewId());

            // Executes the topology command.
            exec->executeTopologyCmd(*it, topRqst, rslt);

            TNodesList nbns = rslt.getNodes();

            // Extract the actual list of nodes.
            nodes.insert(nbns.begin(), nbns.end());

            TNodesSet prevNodes = sharedData->topology()->nodes();

            // Update the topology.
            sharedData->topology()->update(nodes);

            fireTopologyEvents(nodes, prevNodes);

            GG_LOG_DEBUG("Topology size: %d", nodes.size());

            updated = true;
        }
        catch (GridClientException& e) {
        	last = e;
        }
    }

    if (!updated)
        GG_LOG_ERROR("Error refreshing the topology: %s", last.what());

    GG_LOG_DEBUG0("Finished refreshing the topology.");
}