Esempio n. 1
0
InvocationResponse ClientImpl::invoke(Procedure &proc) throw (voltdb::Exception, voltdb::NoConnectionsException, voltdb::UninitializedParamsException, voltdb::LibEventException) {
    if (m_bevs.empty()) {
        throw voltdb::NoConnectionsException();
    }
    int32_t messageSize = proc.getSerializedSize();
    ScopedByteBuffer sbb(messageSize);
    int64_t clientData = m_nextRequestId++;
    proc.serializeTo(&sbb, clientData);
    struct bufferevent *bev = m_bevs[m_nextConnectionIndex++ % m_bevs.size()];
    InvocationResponse response;
    boost::shared_ptr<ProcedureCallback> callback(new SyncCallback(&response));
    struct evbuffer *evbuf = bufferevent_get_output(bev);
    if (evbuffer_add(evbuf, sbb.bytes(), static_cast<size_t>(sbb.remaining()))) {
        throw voltdb::LibEventException();
    }
    m_outstandingRequests++;
    (*m_callbacks[bev])[clientData] = callback;
    if (event_base_dispatch(m_base) == -1) {
        throw voltdb::LibEventException();
    }
    m_loopBreakRequested = false;
    return response;
}
Esempio n. 2
0
void ClientImpl::invoke(Procedure &proc, boost::shared_ptr<ProcedureCallback> callback) throw (voltdb::Exception, voltdb::NoConnectionsException, voltdb::UninitializedParamsException, voltdb::LibEventException, voltdb::ElasticModeMismatchException) {
    if (callback.get() == NULL) {
        throw voltdb::NullPointerException();
    }
    if (m_bevs.empty()) {
        throw voltdb::NoConnectionsException();
    }

    if (m_outstandingRequests >= m_maxOutstandingRequests) {
    	if (m_listener.get() != NULL) {
			try {
				 m_listener->backpressure(true);
			} catch (const std::exception& e) {
				std::cerr << "Exception thrown on invocation of backpressure callback: " << e.what() << std::endl;
			}
		}
    	// We are overloaded, we need to reject traffic and notify the caller
        callback->abandon(ProcedureCallback::TOO_BUSY);
        return;
    }

    //do not call the procedures if hashinator is in the LEGACY mode
    if (!m_distributer.isUpdating() && !m_distributer.isElastic()) {
        //todo: need to remove the connection
        throw voltdb::ElasticModeMismatchException();
    }

    int32_t messageSize = proc.getSerializedSize();
    ScopedByteBuffer sbb(messageSize);
    int64_t clientData = m_nextRequestId++;
    proc.serializeTo(&sbb, clientData);

    /*
     * Decide what connection to buffer the event on.
     * First each connection is checked for backpressure. If there is a connection
     * with no backpressure break.
     *
     *  If none can be found, notify the client application and let it decide whether to queue anyways
     *  or run the event loop until there is no more backpressure.
     *
     *  If queuing anyways just pick the next connection.
     *
     *  If waiting for no more backpressure, then set the m_invocationBlockedOnBackpressure flag
     *  so that the write callback knows to break the event loop once there is a connection with no backpressure and
     *  then enter the event loop.
     *  It is possible for the event loop to break early while there is still backpressure because an unrelated callback
     *  may have been invoked and that unrelated callback may have requested that the event loop be broken. In that
     *  case just queue the request to the next connection despite backpressure so that loop break occurs as requested.
     *  Also set the m_invocationBlockedOnBackpressure flag back to false so that the write callback won't spuriously
     *  break the event loop later.
     */
    struct bufferevent *bev = NULL;
    while (true) {
        if (m_ignoreBackpressure) {
            bev = m_bevs[++m_nextConnectionIndex % m_bevs.size()];
            break;
        }

        //Assume backpressure if the number of outstanding requests is too large, i.e. leave bev == NULL
        if (m_outstandingRequests <= m_maxOutstandingRequests) {
            for (size_t ii = 0; ii < m_bevs.size(); ii++) {
                bev = m_bevs[++m_nextConnectionIndex % m_bevs.size()];
        	if (m_backpressuredBevs.find(bev) != m_backpressuredBevs.end()) {
        	    bev = NULL;
        	} else {
        	    break;
        	}
            }
	}

    	if (bev) {
    	    break;
    	} else {
    	    bool callEventLoop = true;
    	    if (m_listener.get() != NULL) {
    	        try {
    	            m_ignoreBackpressure = true;
    	            callEventLoop = !m_listener->backpressure(true);
    	            m_ignoreBackpressure = false;
                } catch (const std::exception& e) {
    	            std::cerr << "Exception thrown on invocation of backpressure callback: " << e.what() << std::endl;
    	        }
    	    }
            if (callEventLoop) {
    	        m_invocationBlockedOnBackpressure = true;
    	        if (event_base_dispatch(m_base) == -1) {
    	            throw voltdb::LibEventException();
    	        }
    	        if (m_loopBreakRequested) {
    	            m_loopBreakRequested = false;
    	            m_invocationBlockedOnBackpressure = false;
    	            bev = m_bevs[++m_nextConnectionIndex % m_bevs.size()];
    	        }
    	    } else {
    	        bev = m_bevs[++m_nextConnectionIndex % m_bevs.size()];
    	        break;
            }
        }
    }

    //route transaction to correct event if client affinity is enabled and hashinator updating is not in progress
    //elastic scalability is disabled
    if (m_useClientAffinity && !m_distributer.isUpdating()) {
        struct bufferevent *routed_bev = routeProcedure(proc, sbb);
        // Check if the routed_bev is valid and has not been removed due to lost connection
        if ((routed_bev) && (m_callbacks.find(routed_bev) != m_callbacks.end()))
            bev = routed_bev;
    }

    struct evbuffer *evbuf = bufferevent_get_output(bev);
    if (evbuffer_add(evbuf, sbb.bytes(), static_cast<size_t>(sbb.remaining()))) {
        throw voltdb::LibEventException();
    }
    m_outstandingRequests++;
    (*m_callbacks[bev])[clientData] = callback;

    if (evbuffer_get_length(evbuf) >  262144) {
        m_backpressuredBevs.insert(bev);
    }

    return;
}