示例#1
0
void MESIProtocol::writeMissHandler(SMPMemRequest *sreq) 
{
  PAddr addr = sreq->getPAddr();
  Line *l = pCache->getLine(addr);
  #ifdef TLS
	   if (l && sreq->isDataReq())
	   {
	    	tls::Epoch* eph= sreq->getOriginalRequest()->getEpoch();
			//This ensures that MESI protocol dosent locate a line in 
			//this cache if epochs are incorrect
	    	if ((l->getEpoch())->getClock() < eph->getClock()){}
	    	else
	    		l=0;	
	   }
	   sendWriteMissAck(sreq);
 #else 
	  if(l && !l->isLocked()) {
	    combineResponses(sreq, (MESIState_t) l->getState());
	    pCache->invalidateLine(addr, sendWriteMissAckCB::create(this, sreq));
	    return;
	  } else {
	    sendWriteMissAck(sreq);
	  } 
 #endif
  //sendData(sreq);
}
示例#2
0
void MESIProtocol::invalidateHandlerTLS(SMPMemRequest *sreq) 
{
  PAddr addr = sreq->getPAddr();
  Line *l = pCache->getLine(addr);
  
  //if(l && !l->isLocked()) {
    combineResponses(sreq, (MESIState_t) l->getState());
    pCache->invalidateLine(addr, dummy1CB::create(this,sreq->getOriginalRequest()));
    return;
  //} 
}
示例#3
0
void MESIProtocol::invalidateHandler(SMPMemRequest *sreq) 
{
  PAddr addr = sreq->getPAddr();
  Line *l = pCache->getLine(addr);
  
  if(l && !l->isLocked()) {
    combineResponses(sreq, (MESIState_t) l->getState());
    pCache->invalidateLine(addr, sendInvalidateAckCB::create(this, sreq));
    return;
  } else {
    sendInvalidateAck(sreq);
  } 
 
}
示例#4
0
void MESIProtocol::readMissHandler(SMPMemRequest *sreq) 
{
  PAddr addr = sreq->getPAddr();
  Line *l = pCache->getLine(addr);
#ifdef TLS
   if (l && sreq->isDataReq())
   {
   	  	tls::Epoch* eph= sreq->getOriginalRequest()->getEpoch();
		//This ensures that MESI protocol dosent locate a line in 
		//this cache if epochs are incorrect
   	
    	if ((l->getEpoch())->getClock() < eph->getClock()){}
    	else
    		l=0;	
   }
#endif
  if(l && !l->isLocked()) {
    combineResponses(sreq, (MESIState_t) l->getState());
    changeState(l, MESI_SHARED);
  } 

  sendReadMissAck(sreq);
  //sendData(sreq);
}
示例#5
0
    /**
     * The core config write functionality.
     *
     * Config writes run in two passes - the first is a quick check to ensure the config servers
     * are all reachable, the second runs the actual write.
     *
     * TODO: Upgrade and move this logic to the config servers, a state machine implementation
     * is probably the next step.
     */
    void ConfigCoordinator::executeBatch( const BatchedCommandRequest& clientRequest,
                                          BatchedCommandResponse* clientResponse,
                                          bool fsyncCheck ) {

        NamespaceString nss( clientRequest.getNS() );
        dassert( nss.db() == "config" || nss.db() == "admin" );
        dassert( clientRequest.sizeWriteOps() == 1u );

        if ( fsyncCheck ) {

            //
            // Sanity check that all configs are still reachable using fsync, preserving legacy
            // behavior
            //

            OwnedPointerVector<ConfigFsyncResponse> fsyncResponsesOwned;
            vector<ConfigFsyncResponse*>& fsyncResponses = fsyncResponsesOwned.mutableVector();

            //
            // Send side
            //

            for ( vector<ConnectionString>::iterator it = _configHosts.begin();
                it != _configHosts.end(); ++it ) {
                ConnectionString& configHost = *it;
                FsyncRequest fsyncRequest;
                _dispatcher->addCommand( configHost, "admin", fsyncRequest );
            }

            _dispatcher->sendAll();

            //
            // Recv side
            //

            bool fsyncError = false;
            while ( _dispatcher->numPending() > 0 ) {

                fsyncResponses.push_back( new ConfigFsyncResponse() );
                ConfigFsyncResponse& fsyncResponse = *fsyncResponses.back();
                Status dispatchStatus = _dispatcher->recvAny( &fsyncResponse.configHost,
                                                              &fsyncResponse.response );

                // We've got to recv everything, no matter what
                if ( !dispatchStatus.isOK() ) {
                    fsyncError = true;
                    buildFsyncErrorFrom( dispatchStatus, &fsyncResponse.response );
                }
                else if ( !fsyncResponse.response.getOk() ) {
                    fsyncError = true;
                }
            }

            if ( fsyncError ) {
                combineFsyncErrors( fsyncResponses, clientResponse );
                return;
            }
            else {
                fsyncResponsesOwned.clear();
            }
        }

        //
        // Do the actual writes
        //

        BatchedCommandRequest configRequest( clientRequest.getBatchType() );
        clientRequest.cloneTo( &configRequest );
        configRequest.setNS( nss.coll() );

        OwnedPointerVector<ConfigResponse> responsesOwned;
        vector<ConfigResponse*>& responses = responsesOwned.mutableVector();

        //
        // Send the actual config writes
        //

        // Get as many batches as we can at once
        for ( vector<ConnectionString>::iterator it = _configHosts.begin();
            it != _configHosts.end(); ++it ) {
            ConnectionString& configHost = *it;
            _dispatcher->addCommand( configHost, nss.db(), configRequest );
        }

        // Send them all out
        _dispatcher->sendAll();

        //
        // Recv side
        //

        while ( _dispatcher->numPending() > 0 ) {

            // Get the response
            responses.push_back( new ConfigResponse() );
            ConfigResponse& configResponse = *responses.back();
            Status dispatchStatus = _dispatcher->recvAny( &configResponse.configHost,
                                                          &configResponse.response );

            if ( !dispatchStatus.isOK() ) {
                buildErrorFrom( dispatchStatus, &configResponse.response );
            }
        }

        combineResponses( responses, clientResponse );
    }