Beispiel #1
0
void DataManager::onDeletedDataObject(Event * e)
{
	if (!e || !e->hasData())
		return;
	
	DataObjectRefList dObjs = e->getDataObjectList();
	unsigned int n_removed = 0;
	bool cleanup = false; // MOS

	for (DataObjectRefList::iterator it = dObjs.begin(); it != dObjs.end(); it++) {
		/* 
		  Do not remove Node descriptions from the bloomfilter. We do not
		  want to receive old node descriptions again.
		  If the flag in the event is set, it means we should keep the data object
		  in the bloomfilter.
		*/
        DataObjectRef dObj = (*it);
		if (!dObj->isNodeDescription() && !e->getFlags()) {
		  if(localBF->has(dObj)) {
		        HAGGLE_DBG("Removing deleted data object [id=%s] from bloomfilter, #objs=%d\n", 
				   DataObject::idString(dObj).c_str(), localBF->numObjects());
			localBF->remove(dObj);
			n_removed++;
		  } else {
			HAGGLE_DBG("Deleted data object data object [id=%s] not found in bloomfilter\n", dObj->getIdStr());
		  }
		} else {
			HAGGLE_DBG("Keeping deleted data object [id=%s] in bloomfilter\n", dObj->getIdStr());
		}

		if(!dObj->hasValidSignature()) {
		  cleanup = true; // MOS - allow new incoming data object from co-certified node 
		}

// SW: JLM: START CACHE STRATEGY:
        if (cacheStrategy && !cacheStrategy->isDone() && cacheStrategy->isResponsibleForDataObject(dObj)) {
            cacheStrategy->handleDeletedDataObject(dObj); 
        }
// SW: JLM: END CACHE STRATEGY.

// CBMEN, HL - Begin
        // Remove any pending send events for this data object
        HAGGLE_DBG2("Cancelling send events for dObj %s\n", dObj->getIdStr());
        kernel->cancelEvents(EVENT_TYPE_DATAOBJECT_SEND, dObj);
// CBMEN, HL, End

	}
	
	if (n_removed > 0 || cleanup) 
	    kernel->getThisNode()->setBloomfilter(*localBF, setCreateTimeOnBloomfilterUpdate);
}
Beispiel #2
0
/*
	public event handler on verified DataObject
 
	means that the DataObject is verified by the SecurityManager
*/ 
void DataManager::onVerifiedDataObject(Event *e)
{
	if (!e || !e->hasData())
		return;

	DataObjectRef dObj = e->getDataObject();
	
	if (!dObj) {
		HAGGLE_DBG("Verified data object event without data object!\n");
		return;
	}

	if (dataObjectsReceived.size() >= MAX_DATAOBJECTS_LISTED) {
		dataObjectsReceived.pop_front();
	}
	dataObjectsReceived.push_back(dObj->getIdStr());

	HAGGLE_DBG("%s Received data object [%s]\n", getName(), dObj->getIdStr());

#ifdef DEBUG
	// dObj->print(NULL); // MOS - NULL means print to debug trace
#endif
	if (dObj->getSignatureStatus() == DataObject::SIGNATURE_INVALID) {
		// This data object had a bad signature, we should remove
		// it from the bloomfilter
		HAGGLE_DBG("Data object [%s] had bad signature, removing from bloomfilter\n", dObj->getIdStr());
		localBF->remove(dObj);
		kernel->getThisNode()->setBloomfilter(*localBF, setCreateTimeOnBloomfilterUpdate);
		return;
	}

	if (dObj->getDataState() == DataObject::DATA_STATE_VERIFIED_BAD) {
		HAGGLE_ERR("Data in data object flagged as bad! -- discarding\n");
		if (localBF->has(dObj)) {
			// Remove the data object from the bloomfilter since it was bad.
			localBF->remove(dObj);
			kernel->getThisNode()->setBloomfilter(*localBF, setCreateTimeOnBloomfilterUpdate);
		}
		return;
	} else if (dObj->getDataState() == DataObject::DATA_STATE_NOT_VERIFIED && helper) {
		// Call our helper to verify the data in the data object.
                if (dObj->dataIsVerifiable()) {
                        helper->addTask(new DataTask(DATA_TASK_VERIFY_DATA, dObj));
                        return;
                }
	}
	
	handleVerifiedDataObject(dObj);
}
Beispiel #3
0
/*
	On send events, the security manager 
 
 */
void SecurityManager::onSendDataObject(Event *e)
{
	if (!e || !e->hasData())
		return;

	DataObjectRef dObj = e->getDataObject();
	
	if (dObj->isThisNodeDescription()) {
		// This is our node description. Piggy-back our certificate.
		if (myCert) {
			Metadata *m;

			m = dObj->getMetadata()->getMetadata("Security");
			
			if (m) {
				HAGGLE_ERR("Node description already has a Security tag!\n");
			} else {
				m = dObj->getMetadata()->addMetadata("Security");
				
				if (m) {
					m->addMetadata(myCert->toMetadata());
				}
			}
		}
	}
	
	// In most cases the data object is already signed here (e.g., if it is generated by a local
	// application, or was received from another node). The only reason to check if we should
	// sign the data object here, is if a data object was generated internally by Haggle -- in
	// which case the data object might not have a signature (e.g., the data object is a node
	// description).
	InterfaceRef iface = dObj->getRemoteInterface();
	
	if (dObj->shouldSign() && !(iface && iface->getType() == Interface::TYPE_APPLICATION_PORT)) {
		// FIXME: data objects should really be signed in the SecurityHelper thread since
		// it is a potentially CPU intensive operation. But it is currently not possible
		// to ensure that the signing operation has finished in the helper thread before
		// the data object is actually sent on the wire by the protocol manager.
		// To handle this situation, we probably need to add a new public event for 
		// security related operations, after which the security manager generates the
		// real send event.
		
		if (helper->signDataObject(dObj, privKey)) {
			HAGGLE_DBG("Successfully signed data object %s\n", dObj->getIdStr());
		} else {
			HAGGLE_DBG("Signing of data object %s failed!\n", dObj->getIdStr());
		}
	}	
}
bool NetworkCodingConfiguration::isNetworkCodingEnabled(DataObjectRef dataObject, NodeRef targetNodeToNetworkCodeFor) {
	// if network coding turned on doesn't matter about dataobjects or targetnoderefids
	if( NetworkCodingConfiguration::isNetworkCodingTurnedOn ) {
		return true;
	}

	if( !targetNodeToNetworkCodeFor ) {
		return false;
	}

	string targetNodeId = targetNodeToNetworkCodeFor->getName();
    string dataObjectId;
    if (dataObject)
        dataObjectId = dataObject->getIdStr();

    string key = dataObjectId + "|" + targetNodeId;
    {
        Mutex::AutoLocker l(NetworkCodingConfiguration::contextAwareMutex); // needs to be fine grained
        contextawarecodingtracker_t::iterator it = NetworkCodingConfiguration::contextawaretracker.find(key);
        if (it != NetworkCodingConfiguration::contextawaretracker.end()) {
            return true;
        }

        key = "|" + targetNodeId;
        it = NetworkCodingConfiguration::contextawaretracker.find(key);
        if (it != NetworkCodingConfiguration::contextawaretracker.end()) {
            HAGGLE_DBG("context aware coding is enabled for targetnoderefid=%s, saving status for dataobject=%s\n", targetNodeId.c_str(), dataObjectId.c_str());
            NetworkCodingConfiguration::contextawaretracker.insert(make_pair(dataObjectId + "|" + targetNodeId, true));
            return true;
        }
    }

	return false;
}
Beispiel #5
0
void EvictStrategyLRFU::updateInfoDataObject(DataObjectRef &dObj, unsigned int count, Timeval time) 
{

    ScratchpadManager *pad = getKernel()->getDataStore()->getScratchpadManager();
    //create scratchpad keys
    string paramNameF0=name;
    paramNameF0.append("_LRU_F0");
    double c_now = 0.0;
    double c_old = 0.0;
    double lastTime = 0.0;
    double paramValueF0 = 0.0;
    double paramValueLastk = 0.0;
    string paramName_c_now=name;
    paramName_c_now.append("_c_now");
    string paramNameLastk=name;
    paramNameLastk.append("_LRU_last_k");
    
    if (countType == EVICT_STRAT_LRFU_COUNT_TYPE_TIME) {
	paramValueLastk = time.getTimeAsMilliSecondsDouble();
    } else {  //otherwise count
	paramValueLastk = (double) count; 
    }

    bool has_attr = pad->hasScratchpadAttributeDouble(dObj, paramNameF0);
    if (has_attr) {
        paramValueF0 = pad->getScratchpadAttributeDouble(dObj, paramNameF0);
	c_now = pad->getScratchpadAttributeDouble(dObj, paramName_c_now);
	c_old = c_now;
       	lastTime = pad->getScratchpadAttributeDouble(dObj, paramNameLastk);
        c_now *= fx_calc(paramValueLastk-lastTime);
        c_now += paramValueF0; 
       	HAGGLE_DBG("%s f(x) = %f + G(%f - %f)*%f = %f\n", dObj->getIdStr() ,paramValueF0 , (float) count, lastTime, c_old,  c_now);

    } else {
	c_now = 1.0; //fx_calc(paramValueLastk);
       	pad->setScratchpadAttributeDouble(dObj, paramNameF0, c_now);
       	HAGGLE_DBG("%s f(0) = g(%f) = %f\n", dObj->getIdStr(), paramValueLastk, c_now);
    }

   //set current values
   pad->setScratchpadAttributeDouble(dObj, paramName_c_now, c_now);
   pad->setScratchpadAttributeDouble(dObj, paramNameLastk, paramValueLastk);
   

}
void NetworkCodingEncoderManagerModuleProcessor::encode(NetworkCodingEncoderTaskRef networkCodingEncoderTask) {

    const DataObjectRef originalDataObjectRef =
            networkCodingEncoderTask->getDataObject();
    const NodeRefList nodeRefList = networkCodingEncoderTask->getNodeRefList();

    HAGGLE_DBG("Perform network coding for data object %s\n", originalDataObjectRef->getIdStr());

    DataObjectRef networkCodedDataObject =
            this->networkCodingEncoderService->encodeDataObject(originalDataObjectRef);
    if(networkCodedDataObject) { // MOS
      HAGGLE_DBG("Generated block %s for data object %s\n", networkCodedDataObject->getIdStr(), originalDataObjectRef->getIdStr());

      Event* sendEvent = new Event(EVENT_TYPE_DATAOBJECT_SEND,
				   networkCodedDataObject, nodeRefList);
      this->haggleKernel->addEvent(sendEvent);
    }
}
Beispiel #7
0
/*
	Check incoming data objects for two reasons:
	1) whether they have an embedded certificate, in which case we verify 
	it and add it to our store in case it is not already there.
	2) sign any data objects that were generated by local applications.
 */
void SecurityManager::onIncomingDataObject(Event *e)
{
	DataObjectRef dObj;
	
	if (!e || !e->hasData())
		return;
	
	dObj = e->getDataObject();
	
	if (dObj->isDuplicate())
		return;

	Metadata *m = dObj->getMetadata()->getMetadata("Security");
	
	// Check if there is a certificate embedded that we do not already have stored
	if (m && m->getMetadata("Certificate")) {
		HAGGLE_DBG("Data object has embedded certificate, trying to verify it!\n");
		helper->addTask(new SecurityTask(SECURITY_TASK_VERIFY_CERTIFICATE, dObj));
	}
			
	InterfaceRef iface = dObj->getRemoteInterface();

	// Check if this data object came from an application, in that case we sign it.
	// In the future, the signing should potentially be handled by the application
	// itself. But this requires some major rethinking of how to manage certificates 
	// and keys, etc.
	if (iface && iface->getType() == Interface::TYPE_APPLICATION_PORT && dObj->shouldSign()) {
		HAGGLE_DBG("Data object should be signed\n");

		// FIXME: data objects should really be signed in the SecurityHelper thread since
		// it is a potentially CPU intensive operation. But it is currently not possible
		// to ensure that the signing operation has finished in the helper thread before
		// the data object is added to the data store.
		if (helper->signDataObject(dObj, privKey)) {
			HAGGLE_DBG("Successfully signed data object %s, which was added by an application.\n", 
				   dObj->getIdStr());
		} else {
			HAGGLE_DBG("Signing of data object %s, which was added by an application, failed!\n", 
				   dObj->getIdStr());
		}
	}
}
/*
 * Callback on data object deletion events. Keeps the cache stat in sync
 * with the database, and notifies the utility functions.
 */
void 
CacheStrategyUtility::_handleDeletedDataObject(
    DataObjectRef &dObj, bool mem_do)
{
    if (!isResponsibleForDataObject(dObj)) {
        //HAGGLE_DBG("Ignoring data object, in-eligible for caching\n");
        return;
    }

    if (stats_replacement_strat && stats_replacement_strat->isResponsibleForDataObject(dObj)) {
        //HAGGLE_DBG("Ignoring data object, delete of stats data object\n");
        return;
    }

    if (deleteDataObjectFromCache(string(dObj->getIdStr()), mem_do)) {
        getUtilityFunction()->notifyDelete(dObj);
    } else {
        HAGGLE_DBG("Data object may have already been removed");
    }
}
/*
 * Returns true iff 
 */
bool 
CacheStrategyUtility::isResponsibleForDataObject(
    DataObjectRef &dObj)
{
    string id = string(dObj->getIdStr());
/*
    // THIS IS NOT THREAD SAFE!! added getOrigSize
    // to grab unaltered file size.
    if (utilMetadata.find(id) != utilMetadata.end()) {
        return true;
    }
*/

    if (dObj->isDuplicate()) {
        return false;
    }

    // SW: TODO: NOTE: this might not be the best way to check if it's from
    // a local application, but it works for now...
    bool isLocal = dObj->getRemoteInterface() && dObj->getRemoteInterface()->isApplication();

    bool notResponsible = dObj->isControlMessage() || dObj->isNodeDescription();
    bool isResponsible = !notResponsible;

    if (!handle_zero_size) {
        isResponsible = isResponsible && (dObj->getOrigDataLen() > 0);
    }

    if (!manage_locally_sent_files) {
        isResponsible = isResponsible && dObj->isPersistent();
    }

    if (stats_replacement_strat && stats_replacement_strat->isResponsibleForDataObject(dObj)) {
        isResponsible = true;
    } else if (manage_only_remote_files) {
        isResponsible = isResponsible && !isLocal;
    }
    return isResponsible;
}
bool FragmentationConfiguration::isFragmentationEnabled(DataObjectRef dataObject, NodeRef targetNodeToFragmentCodeFor) {
	// fragmentation is not enabled at all, so just return false and let nc checks run
	if(!FragmentationConfiguration::isFragmentationTurnedOn) {
		return false;
	}

	// no target node and already passed the is fragemtnation turned on, so return true
	if( !targetNodeToFragmentCodeFor ) {
		return true;
	}

	string targetNodeId = targetNodeToFragmentCodeFor->getName();
    string dataObjectId;
    if (dataObject)
        dataObjectId = dataObject->getIdStr();

    string key = dataObjectId + "|" + targetNodeId;
    {
        Mutex::AutoLocker l(FragmentationConfiguration::contextAwareMutex); // needs to be fine grained
        contextawarefragmentationtracker_t::iterator it = FragmentationConfiguration::contextawaretracker.find(key);
        if (it != FragmentationConfiguration::contextawaretracker.end()) {
            return false;
        }

        key = "|" + targetNodeId;
        it = FragmentationConfiguration::contextawaretracker.find(key);
        if (it != FragmentationConfiguration::contextawaretracker.end()) {
            HAGGLE_DBG("context aware coding is enabled for targetnoderefid=%s, saving status for dataobject=%s\n", targetNodeId.c_str(), dataObjectId.c_str());
            FragmentationConfiguration::contextawaretracker.insert(make_pair(dataObjectId + "|" + targetNodeId, true));
            return false;
        }
    }

	return true;

}
bool FragmentationEncoderService::addAttributes(DataObjectRef originalDataObject, DataObjectRef fragmentDataObject,
        string sequenceNumberListCsv) {

    //copy attributes. though eventually will use rich metadata?
    const Attributes* originalAttributes = originalDataObject->getAttributes();
    for (Attributes::const_iterator it = originalAttributes->begin(); it != originalAttributes->end(); it++) {
        const Attribute attr = (*it).second;
        bool addAttribute = fragmentDataObject->addAttribute(attr);
        if (!addAttribute) {
            HAGGLE_ERR("unable to add attribute\n");
            return false;
        }
    }

    //add sequence number attribute
//	char sequenceBuffer[33];
//	memset(sequenceBuffer, 0, sizeof(sequenceBuffer));
//	sprintf(sequenceBuffer, "%d", sequenceNumber);
//	HAGGLE_DBG("stringSequenceNumber=%s\n", sequenceBuffer);
//	bool addedSequenceNUmber = fragmentDataObject->addAttribute(
//			HAGGLE_ATTR_FRAGMENTATION_SEQUENCE_NUMBER, sequenceBuffer, 0);
//	if (!addedSequenceNUmber) {
//		HAGGLE_ERR("unable to add addedSequenceNUmber attribute\n");
//		return false;
//	}

    HAGGLE_DBG2("stringSequenceNumber=%s\n", sequenceNumberListCsv.c_str());
    bool addedSequenceNumber = fragmentDataObject->addAttribute(HAGGLE_ATTR_FRAGMENTATION_SEQUENCE_NUMBER,
            sequenceNumberListCsv, 0);
    if (!addedSequenceNumber) {
        HAGGLE_ERR("Unable to add sequence number attribute\n");
        return false;
    }

    //add attribute to indicate data object is fragmentation block
    bool addedIsFragmentationCodedAttribute = fragmentDataObject->addAttribute(HAGGLE_ATTR_FRAGMENTATION_NAME, "TRUE",
            0);
    if (!addedIsFragmentationCodedAttribute) {
        HAGGLE_ERR("Unable to add fragmentation attribute\n");
        return false;
    }

    //add original data len attribute
    char lenBuffer[33];
    memset(lenBuffer, 0, sizeof(lenBuffer));
    int len = fragmentationDataObjectUtility->getFileLength(originalDataObject);
    if(len == 0) {
        HAGGLE_ERR("Orignal data len is zero - file already deleted\n");
        return false;
    }
    sprintf(lenBuffer, "%d", len);
    bool addedDataLenAttribute = fragmentDataObject->addAttribute(HAGGLE_ATTR_FRAGMENTATION_PARENT_ORIG_LEN, lenBuffer,
            0);
    if (!addedDataLenAttribute) {
        HAGGLE_ERR("Unable to add original data len attribute\n");
        return false;
    }

    //add dataobject id
    const char* originalId = originalDataObject->getIdStr();
    string originalStringId = originalId;
    bool addedIdAttribute = fragmentDataObject->addAttribute(HAGGLE_ATTR_FRAGMENTATION_PARENT_DATAOBJECT_ID,
            originalStringId, 0);
    if (!addedIdAttribute) {
        HAGGLE_ERR("Unable to add original data object id attribute\n");
        return false;
    }

    //add dataobject name
    string originalName = fragmentationDataObjectUtility->getFileName(originalDataObject);
    HAGGLE_DBG2("Add original name %s as attribute\n", originalName.c_str());
    bool addedNameAttribute = fragmentDataObject->addAttribute(HAGGLE_ATTR_FRAGMENTATION_PARENT_ORIG_NAME, originalName,
            0);
    if (!addedNameAttribute) {
        HAGGLE_ERR("Unable to add original name attribute\n");
        return false;
    }

    //add create time
    string originalCreateTime = originalDataObject->getCreateTime().getAsString();
    HAGGLE_DBG2("Add original create time %s as attribute\n", originalCreateTime.c_str());
    bool addedCreatedTimeAttribute = fragmentDataObject->addAttribute(HAGGLE_ATTR_FRAGMENTATION_PARENT_CREATION_TIME,
            originalCreateTime, 0);
    if (!addedCreatedTimeAttribute) {
        HAGGLE_ERR("Unable to add original create time attribute\n");
        return false;
    }

    //set create time of fragment to same create time as parent so fragment data object ids can match up
    Timeval createTime(originalCreateTime);
    fragmentDataObject->setCreateTime(createTime);

    if(originalDataObject->getSignature()) { // MOS
      //add signee
      string parentSignee = originalDataObject->getSignee();
      HAGGLE_DBG2("Add original signee %s as attribute\n",parentSignee.c_str());
      bool addedSigneeAttribute = fragmentDataObject->
	addAttribute(HAGGLE_ATTR_FRAGMENTATION_PARENT_ORIG_SIGNEE,parentSignee,0);
      if(!addedSigneeAttribute) {
        HAGGLE_ERR("Unable to add original signee attribute\n");
        return false;
      }
      
      //add signature
      char *base64_signature = NULL;
      if (base64_encode_alloc((char *)originalDataObject->getSignature(), originalDataObject->getSignatureLength(), &base64_signature) <= 0) {
        HAGGLE_ERR("Unable to generate base64 encoded signature\n");
        return false;
      }
      string parentSignature = base64_signature;
      HAGGLE_DBG2("Add original signature %s as attribute\n",parentSignature.c_str());
      bool addedSignatureAttribute = fragmentDataObject->
	addAttribute(HAGGLE_ATTR_FRAGMENTATION_PARENT_ORIG_SIGNATURE,parentSignature,0);
      if(!addedSignatureAttribute) {
        HAGGLE_ERR("Unable to add original signature attribute\n");
        return false;
      }
      if(base64_signature) {
        free(base64_signature);
        base64_signature = NULL;
      }
    }

    return true;
}
Beispiel #12
0
ProtocolEvent ProtocolUDP::receiveDataObject()
{
	size_t len = 0;
	string haggleTag = "</Haggle>";
	DataObjectRef dObj;
        char buf[SOCKADDR_SIZE];
        struct sockaddr *peer_addr = (struct sockaddr *)buf;
	ProtocolEvent pEvent;
        unsigned short port;
        Address *addr = NULL;
	struct sockaddr_in *sa = NULL;

#ifdef OS_WINDOWS
	pEvent = receiveData(buffer, bufferSize, peer_addr, 0, &len);
#else
	pEvent = receiveData(buffer, bufferSize, peer_addr, MSG_DONTWAIT, &len);
#endif

	if (pEvent != PROT_EVENT_SUCCESS)
		return pEvent;

        if (peer_addr->sa_family == AF_INET) {
                sa = (struct sockaddr_in *)peer_addr;
                port = ntohs(sa->sin_port);
                addr = new IPv4Address(sa->sin_addr, TransportUDP(port));
	}
#if defined(ENABLE_IPv6) 
        else if (peer_addr->sa_family == AF_INET6) {
                struct sockaddr_in6 *sa6 = (struct sockaddr_in6 *)peer_addr;
                port = ntohs(sa6->sin6_port);
                addr = new IPv6Address(sa6->sin6_addr, TransportUDP(port));
        }
#endif

        if (addr == NULL)
                return PROT_EVENT_ERROR;

	if (peerIface) {
		HAGGLE_ERR("%s UDP peer interface was not null\n", getName());          
                delete addr;
		return PROT_EVENT_ERROR;
	}

        peerIface = new ApplicationPortInterface(port, "Application", addr, IFFLAG_UP);
        peerNode = getKernel()->getNodeStore()->retrieve(peerIface);

	delete addr;

	if (!peerNode) {
		peerNode = Node::create(Node::TYPE_APPLICATION, "Unknown application");

		if (!peerNode) {      
			HAGGLE_ERR("%s Could not create application node\n", getName());
			return PROT_EVENT_ERROR;
		}
	}

	dObj = DataObject::create(buffer, len, localIface, peerIface);

	if (!dObj) {
                HAGGLE_DBG("%s Could not create data object\n", getName());
		return PROT_EVENT_ERROR;
	}

        // Haggle doesn't own files that applications have put in: // MOS - not true - maybe some code missing here

	Timeval ct = dObj->getCreateTime();
	Timeval now = Timeval::now();
	if(!ct.isValid()) dObj->setCreateTime(now); // MOS - in case it was not set by application
	dObj->setReceiveTime(now);

	dataObjectsIncoming += 1; // MOS
	dataObjectBytesIncoming += len; // MOS
	if(!dObj->isControlMessage()) dataObjectsIncomingNonControl += 1; // MOS

        // We must release the peer interface reference after
        // the data object is created as the next incoming
        // data might be from another peer
        peerIface = NULL;

	// MOS - no need to ignore objects from local application (avoid risk of blocking control messages)
	// if (getKernel()->getThisNode()->getBloomfilter()->has(dObj)) {
	//	HAGGLE_DBG("Data object [%s] from interface %s:%u has already been received, ignoring.\n", 
	//		dObj->getIdStr(), sa ? ip_to_str(sa->sin_addr) : "undefined", port);
	//	return PROT_EVENT_SUCCESS;
	// }

	// Generate first an incoming event to conform with the base Protocol class
	getKernel()->addEvent(new Event(EVENT_TYPE_DATAOBJECT_INCOMING, dObj, peerNode));
	
	dataObjectsReceived += 1; // MOS
	dataObjectBytesReceived += len; // MOS

	HAGGLE_DBG("%s Received data object [%s] from interface %s:%u\n", getName(), 
		dObj->getIdStr(), sa ? ip_to_str(sa->sin_addr) : "undefined", port);

	// Since there is no data following, we generate the received event immediately 
	// following the incoming one
	getKernel()->addEvent(new Event(EVENT_TYPE_DATAOBJECT_RECEIVED, dObj, peerNode));

	peerNode = NULL; // MOS - similar to peerIface

	return PROT_EVENT_SUCCESS;
}
DataObjectRef FragmentationEncoderService::encodeDataObject(const DataObjectRef originalDataObject, size_t startIndex,
        size_t fragmentSize, NodeRef nodeRef) {

    const char* filePathOriginalDataObject = fragmentationDataObjectUtility->getFilePath(originalDataObject).c_str();

    string originalFileName = fragmentationDataObjectUtility->getFileName(originalDataObject);

    string fragmentFile = this->fragmentationFileUtility->createFragmentFileName(originalFileName.c_str());
    HAGGLE_DBG2("Creating fragment file name %s\n", fragmentFile.c_str());

    FILE* originaldataObjectFilePointer = fopen(filePathOriginalDataObject, "rb");
    if (NULL == originaldataObjectFilePointer) {
        HAGGLE_ERR("Unable to open file %s\n", filePathOriginalDataObject);
        return NULL;
    }

    FILE* fragmentFilePointer = fopen(fragmentFile.c_str(), "wb");
    if (NULL == fragmentFilePointer) {
        HAGGLE_ERR("Error opening fragment file %s\n", fragmentFile.c_str());
	fclose(originaldataObjectFilePointer);
        return NULL;
    }

    this->fragmentationEncoderStorage->addDataObject(originalDataObject->getIdStr(), originalDataObject);

    fseek(originaldataObjectFilePointer, 0, SEEK_END);
    size_t fileSize = ftell(originaldataObjectFilePointer);

    size_t dataLen = fragmentationDataObjectUtility->getFileLength(originalDataObject);
    size_t totalNumberOfFragments = this->fragmentationDataObjectUtility->calculateTotalNumberOfFragments(dataLen,
            fragmentSize);
    int* fragmentSequenceNumberList = this->getIndexesOfFragmentsToCreate(totalNumberOfFragments);

    string sequenceNumberListCsv = "";

    size_t numberOfFragmentsToWrite = this->fragmentationConfiguration->getNumberFragmentsPerDataObject();
    if (numberOfFragmentsToWrite > totalNumberOfFragments) {
        HAGGLE_DBG2( "numberOfFragmentsToWrite=%d > totalNumberOfFragments=%d\n",
                numberOfFragmentsToWrite, totalNumberOfFragments);
        numberOfFragmentsToWrite = totalNumberOfFragments;
    }
    for (int count = 0; count < numberOfFragmentsToWrite && startIndex < totalNumberOfFragments;
            count++, startIndex++) {
        size_t sequenceNumberToWrite = fragmentSequenceNumberList[startIndex];
        HAGGLE_DBG2( "sequenceNumberToWrite=%d startIndex=%d totalNumberOfFragments=%d\n",
                sequenceNumberToWrite, startIndex, totalNumberOfFragments);
        FragmentationPositionInfo fragmentationPositionInfo =
                this->fragmentationDataObjectUtility->calculateFragmentationPositionInfo(sequenceNumberToWrite,
                        fragmentSize, fileSize);
        char intbuffer[33];
        memset(intbuffer, 0, sizeof(intbuffer));
        sprintf(intbuffer, "%d", sequenceNumberToWrite);
        sequenceNumberListCsv = sequenceNumberListCsv + intbuffer + ",";

        fseek(originaldataObjectFilePointer, fragmentationPositionInfo.startPosition, SEEK_SET);
        char* buffer = new char[fragmentationPositionInfo.actualFragmentSize + 1];
        memset(buffer, 0, fragmentationPositionInfo.actualFragmentSize + 1);
        size_t bytesRead = fread(buffer, 1, fragmentationPositionInfo.actualFragmentSize,
                originaldataObjectFilePointer);
        if (ferror(originaldataObjectFilePointer)) {
            HAGGLE_ERR("Error reading file %s\n", filePathOriginalDataObject);
	    delete[] buffer;
	    delete[] fragmentSequenceNumberList;
	    fclose(fragmentFilePointer);
	    fclose(originaldataObjectFilePointer);
            return NULL;
        }
        HAGGLE_DBG2( "bytesRead=%d read for sequenceNumber=%d actualFragmentSize=%d startPosition=%d\n",
                bytesRead, sequenceNumberToWrite, fragmentationPositionInfo.actualFragmentSize, fragmentationPositionInfo.startPosition);

        /*write to fragment*/
        fwrite(buffer, fragmentationPositionInfo.actualFragmentSize, 1, fragmentFilePointer);
        if (ferror(fragmentFilePointer)) {
            HAGGLE_ERR("Error writing file %s\n", fragmentFile.c_str());
	    delete[] buffer;
	    delete[] fragmentSequenceNumberList;
	    fclose(fragmentFilePointer);
	    fclose(originaldataObjectFilePointer);
            return NULL;
        }
        HAGGLE_DBG2("wrote fragment file %s - sequenceNumber=%d\n", fragmentFile.c_str(), sequenceNumberToWrite);

        delete[] buffer;
    }
    delete[] fragmentSequenceNumberList;
    fclose(fragmentFilePointer);
    fclose(originaldataObjectFilePointer);

    sequenceNumberListCsv = sequenceNumberListCsv.substr(0, sequenceNumberListCsv.size() - 1);
    HAGGLE_DBG2("sequenceNumberListCsv=%s\n", sequenceNumberListCsv.c_str());

    DataObjectRef fragmentDataObjectRef = DataObject::create(fragmentFile, fragmentationDataObjectUtility->getFileName(originalDataObject));

    if(!fragmentDataObjectRef) {
      HAGGLE_ERR("Unable to create data object for file %s (%s)\n", fragmentFile.c_str(), originalDataObject->getFileName().c_str());
      return NULL;
    }

    bool addedAttributes = this->addAttributes(originalDataObject, fragmentDataObjectRef, sequenceNumberListCsv);

    if (!addedAttributes) {
        HAGGLE_ERR("Unable to add fragment attributes\n");
        return NULL;
    }

    fragmentDataObjectRef->setStored(true);

    HAGGLE_DBG2("Created fragment file %s for parent data object %s\n",
            fragmentDataObjectRef->getFilePath().c_str(), originalDataObject->getIdStr());

    return fragmentDataObjectRef;
}
List<DataObjectRef> FragmentationEncoderService::getAllFragmentsForDataObject(DataObjectRef originalDataObject,
        size_t fragmentSize, NodeRef nodeRef) {

    size_t dataLen = fragmentationDataObjectUtility->getFileLength(originalDataObject);
    size_t totalNumberOfFragments = this->fragmentationDataObjectUtility->calculateTotalNumberOfFragments(dataLen,
            fragmentSize);
    size_t numberOfFragmentsPerDataObject = this->fragmentationConfiguration->getNumberFragmentsPerDataObject();

    List<DataObjectRef> dataObjectRefList;
    List<DataObjectRef> dataObjectsMissing;

    string originalDataObjectId = originalDataObject->getIdStr();

    for (int startIndex = 0; startIndex < totalNumberOfFragments; startIndex += numberOfFragmentsPerDataObject) {
        HAGGLE_DBG2("Fragmention startIndex=%d\n", startIndex);
        DataObjectRef fragmentDataObjectRef = this->fragmentationEncoderStorage->getFragment(
        		originalDataObjectId.c_str(), startIndex);
        if (!fragmentDataObjectRef) {
            HAGGLE_DBG2("Generating fragment of parent data object %s with startIndex=%d\n", originalDataObjectId.c_str(), startIndex);
            fragmentDataObjectRef = this->encodeDataObject(originalDataObject, startIndex, fragmentSize, nodeRef);
	    if(fragmentDataObjectRef) // MOS
	      this->fragmentationEncoderStorage->addFragment(originalDataObjectId,startIndex,fragmentDataObjectRef);
        }
	if(fragmentDataObjectRef) { //MOS
	  bool alreadySent = nodeRef->has(fragmentDataObjectRef);
	  if (!alreadySent) {
            HAGGLE_DBG2("Fragment with startIndex=%d has not been sent/received yet\n", startIndex);
            dataObjectsMissing.push_front(fragmentDataObjectRef);
	  }
	  else {
	    HAGGLE_DBG2("Fragment with startIndex=%d has already been sent/received\n", startIndex);
	  }
	}
    }

    if (dataObjectsMissing.size() > 0) {
        int whichToSend = rand() % dataObjectsMissing.size();
        HAGGLE_DBG("Selecting random fragment data object %d out of %d\n", whichToSend, dataObjectsMissing.size());
        for (List<DataObjectRef>::const_iterator it = dataObjectsMissing.begin(); it != dataObjectsMissing.end();
                it++) {
            if (whichToSend == 0) {
                dataObjectRefList.push_front(*it);
                break;
            }
            whichToSend--;
        }
    }

    /*
     int* listIndex = this->getIndexesOfFragmentsToCreate(
     totalNumberOfFragments);
     for (int startIndex = 0; startIndex < totalNumberOfFragments; startIndex +=
     numberOfFragmentsPerDataObject) {
     HAGGLE_DBG("startIndex=%d\n", startIndex);
     DataObjectRef fragmentDataObjectRef = this->encodeDataObject(
     originalDataObject, startIndex, fragmentSize, nodeRef);
     bool alreadySent = nodeRef->getBloomfilter()->has(
     fragmentDataObjectRef);
     if (!alreadySent) {
     HAGGLE_DBG("not already sent startIndex=%d\n", startIndex);
     dataObjectRefList.push_front(fragmentDataObjectRef);
     delete[] listIndex;
     return dataObjectRefList;
     }
     HAGGLE_DBG("already sent startIndex=%d\n");
     }
     delete[] listIndex;
     */

    return dataObjectRefList;
}
Beispiel #15
0
ProtocolEvent ProtocolUDPGeneric::sendDataObjectNowNoControl(
    const DataObjectRef& dObj)
{
    NodeRef currentPeer;
    { Mutex::AutoLocker l(mutex); currentPeer = peerNode; } // MOS

    NodeRef actualPeer = getManager()->getKernel()->getNodeStore()->retrieve(currentPeer, true);
    if (!actualPeer) {
        HAGGLE_ERR("%s Peer not in node store\n", getName());
        return PROT_EVENT_ERROR;
    }

    // check if already in peers bloomfilter
    if (actualPeer->hasThisOrParentDataObject(dObj)) { // MOS
        HAGGLE_DBG("%s Peer already had data object.\n", getName());
        return PROT_EVENT_SUCCESS;
    }
    // done

    // SW: we move the hook here to minimize race condition where we send
    // too many redundant node descriptions
    // TODO: we may want to synchronize on the dObj or have some serial
    // queue so this is not probabilistic
    sendDataObjectNowSuccessHook(dObj);

    HAGGLE_DBG("%s Sending data object [%s] to peer \'%s\'\n", 
        getName(), dObj->getIdStr(), peerDescription().c_str());

    DataObjectDataRetrieverRef retriever = dObj->getDataObjectDataRetriever();
    if (!retriever || !retriever->isValid()) {
        HAGGLE_ERR("%s unable to start reading data\n", getName());
        return PROT_EVENT_ERROR;
    }

    ProtocolEvent pEvent = PROT_EVENT_SUCCESS;

    ssize_t len = retriever->retrieve(buffer, bufferSize, false);

    if (0 == len) {
        HAGGLE_ERR("%s DataObject is empty\n", getName());
        return PROT_EVENT_ERROR;
    }

    if (len < 0) {
        HAGGLE_ERR("%s Could not retrieve data from data object\n", getName());
        return PROT_EVENT_ERROR;
    }

    if ((size_t) len == bufferSize) {
        HAGGLE_ERR("%s Buffer is too small for message\n", getName());
        return PROT_EVENT_ERROR;
    }

    Timeval t_start;
    t_start.setNow();

    setSessionNo(dObj->getId()); // MOS
    setSeqNo(0); // MOS

    size_t bytesSent = 0;

    // MOS - simple way to increase udp redundancy
    for(int i = 0; i <= getConfiguration()->getRedundancy(); i++) {

    pEvent = sendData(buffer, (size_t) len, 0, &bytesSent);
    if (bytesSent != (size_t) len) {
        pEvent = PROT_EVENT_ERROR;
    }

    if (pEvent != PROT_EVENT_SUCCESS) {
        HAGGLE_ERR("%s Broadcast - Error\n", getName());
        return pEvent;
    }
    }

#ifdef DEBUG
    Timeval tx_time = Timeval::now() - t_start;

    HAGGLE_DBG("%s Sent %lu bytes data in %.3lf seconds, average speed = %.2lf kB/s\n", 
        getName(), len, tx_time.getTimeAsSecondsDouble(), 
        (double)len / (1000*tx_time.getTimeAsSecondsDouble()));
#endif

    dataObjectsOutgoing += 1; // MOS
    dataObjectsSent += 1; // MOS
    if(!dObj->isControlMessage()) dataObjectsOutgoingNonControl += 1; // MOS
    dataObjectBytesOutgoing += bytesSent; // MOS
    dataObjectBytesSent += len; // MOS
    if(dObj->isNodeDescription()) { nodeDescSent += 1; nodeDescBytesSent += len; } // MOS

    return PROT_EVENT_SUCCESS;
}
/*
 * Callback when a new data object is inserted. Updates the utility strategy
 * state and notifies the utility functions.
 */
void
CacheStrategyUtility::_handleNewDataObject(
    DataObjectRef &dObj)
{
    if (!isResponsibleForDataObject(dObj)) {
        //HAGGLE_DBG("Ignoring data object, in-eligible for caching\n");
        return;
    }

    if (stats_replacement_strat && stats_replacement_strat->isResponsibleForDataObject(dObj)) {
        stats_replacement_strat->handleNewDataObject(dObj);
        return;
    }

    string id = string(dObj->getIdStr());

    if (utilMetadata.find(id) != utilMetadata.end()) {
        // the DO is already in the cache!
        HAGGLE_DBG("Received data object already in the cache: %s\n", id.c_str());
        current_dupe_do_recv++;
        return;
    }

    getUtilityFunction()->notifyInsertion(dObj);

    int cost = dObj->getOrigDataLen();
    // NOTE: we handle zero sized data objects by "faking"
    // a size of 1.
    if (cost == 0) {
        cost = 1; 
    }

    string strResults="";
    if (Trace::trace.getTraceType() == TRACE_TYPE_DEBUG2) {
        strResults.append(id);
        strResults.append("[I]=");
    }
    double utiltiy = getUtilityFunction()->compute(id, strResults);
    HAGGLE_DBG2("%s --> %f\n", strResults.c_str(), utiltiy);
    Timeval now = Timeval::now();

    bool purgeNow = false;
    if (utiltiy < getGlobalOptimizer()->getMinimumThreshold()) {
        HAGGLE_DBG("Minimum threshold for incoming data object %s is insufficient: %f < %f\n", id.c_str(), utiltiy, getGlobalOptimizer()->getMinimumThreshold());
        current_drop_on_insert++;
        purgeNow = true;
    }

    if (!purgeOnInsert && ((current_size + cost) > max_capacity_kb*1024))  {
        HAGGLE_DBG("Cache is full and purge on insert is disabled, evicting new data object!\n");
        purgeNow = true;
        total_do_hard_evicted++;
        total_do_hard_evicted_bytes += cost;
    }

    if (purgeNow) {
        // we need to properly remove from bloomfilter even when capacity is violated
        getUtilityFunction()->notifyDelete(dObj);

        // CBMEN, HL - Begin
        // Remove any pending send events for this data object
        HAGGLE_STAT("purging send events for dObj %s\n", dObj->getIdStr());
        getManager()->getKernel()->cancelEvents(EVENT_TYPE_DATAOBJECT_SEND, dObj);
        // CBMEN, HL, End

        // delayed delete (although never inserted)
        if (!keep_in_bloomfilter) {
            int delay = (bloomfilter_remove_delay_ms <= 0) ? 1000 : bloomfilter_remove_delay_ms;
            DataObjectId_t *heapId = (DataObjectId_t *)malloc(sizeof(DataObjectId_t));
            DataObject::idStrToId(id, *heapId);
            getManager()->getKernel()->addEvent(new Event(bloomfilterRemoveDelayEventType, heapId, delay/(double)1000));
        }

        return;
    }

    DataObjectUtilityMetadata *do_metadata = new DataObjectUtilityMetadata(
        dObj,
        id,
        cost,
        utiltiy,
        now,
        dObj->getCreateTime());

    if (purgeOnInsert) {
        do_metadata->setEnableDeletion(false);
    }

    if (false == utilMetadata.insert(make_pair(id, do_metadata)).second) {
        HAGGLE_ERR("Somehow data object already in cache\n"); 
        delete do_metadata;
        return;
    }

    current_size += cost;
    current_num_do++;
    total_do_inserted++;
    total_do_inserted_bytes += cost;

    if (!purgeOnInsert) {
        getManager()->insertDataObjectIntoDataStore(dObj);
        return;
    }

    bool was_deleted = false;
    _purgeCache(id, &was_deleted);

    if (was_deleted) {
        HAGGLE_DBG("Purged incoming data object %s on insert.\n", id.c_str());
        current_drop_on_insert++;
        return;
    }

    // DO still in cache, mark to allow deletion in future
    do_metadata->setEnableDeletion(true);
    getManager()->insertDataObjectIntoDataStore(dObj);
}
Beispiel #17
0
ProtocolEvent ProtocolUDPGeneric::receiveDataObjectNoControl()
{
    ProtocolEvent pEvent;
    size_t len;
    pEvent = receiveData(buffer, bufferSize, MSG_DONTWAIT, &len);
    if (pEvent != PROT_EVENT_SUCCESS) {
        return pEvent;
    }

    buffer[bufferSize-1] = '\0';

    if (len == 0) {
        HAGGLE_DBG("%s Received zero-length message\n", getName());
        return PROT_EVENT_ERROR;
    }

    if(lastReceivedSessionNo == lastValidReceivedSessionNo && lastReceivedSeqNo == lastValidReceivedSeqNo) {
      HAGGLE_DBG("%s Ignoring duplicate message - session no %s sequence no %d\n", getName(), DataObject::idString(lastValidReceivedSessionNo).c_str(), lastReceivedSeqNo);
      return PROT_EVENT_SUCCESS;
    }

    memcpy(lastValidReceivedSessionNo, lastReceivedSessionNo, sizeof(DataObjectId_t)); 
    lastValidReceivedSeqNo = lastReceivedSeqNo; 

    // MOS - fastpath based on session id = data object id
    
    if (getKernel()->getThisNode()->getBloomfilter()->has(lastValidReceivedSessionNo)) {
      HAGGLE_DBG("%s Data object (session no %s) already in bloom filter - no event generated\n", getName(), DataObject::idString(lastValidReceivedSessionNo).c_str()); 
	dataObjectsNotReceived += 1; // MOS
        return PROT_EVENT_SUCCESS;
    }

    
    // MOS - quickly add to Bloom filter to reduce redundant processing in other procotols
    getKernel()->getThisNode()->getBloomfilter()->add(lastValidReceivedSessionNo);

    DataObjectRef dObj = DataObject::create_for_putting(localIface,
                                                        peerIface,  
                                                        getKernel()->getStoragePath());
    if (!dObj) {
        HAGGLE_DBG("%s Could not create data object\n", getName());
        return PROT_EVENT_ERROR;
    }

    size_t bytesRemaining = DATAOBJECT_METADATA_PENDING;
    ssize_t bytesPut = dObj->putData(buffer, len, &bytesRemaining, true);

    if (bytesPut < 0) {
        HAGGLE_ERR("%s Could not put data\n", getName());
        return PROT_EVENT_ERROR;
    }

    if(bytesRemaining != len - bytesPut) {
        HAGGLE_ERR("%s Received data object not complete - discarding\n", getName());
        return PROT_EVENT_ERROR;
    }

    HAGGLE_DBG("%s Metadata header received [%s].\n", getName(), dObj->getIdStr());
    
    dObj->setReceiveTime(Timeval::now());

    // MOS - the following was happening after posting INCOMING but that distorts the statistics
    HAGGLE_DBG("%s %ld bytes data received (including header), %ld bytes put\n", getName(), len, bytesPut);
    dataObjectsIncoming += 1; // MOS
    if(!dObj->isControlMessage()) dataObjectsIncomingNonControl += 1; // MOS
    dataObjectBytesIncoming += len; // MOS

    HAGGLE_DBG("%s Received data object [%s] from node %s\n", getName(), 
	       DataObject::idString(dObj).c_str(), peerDescription().c_str()); 
    // MOS - removed interface due to locking issue

    if (getKernel()->getThisNode()->getBloomfilter()->hasParentDataObject(dObj)) {
        HAGGLE_DBG("%s Data object [%s] already in bloom filter - no event generated\n", getName(), DataObject::idString(dObj).c_str()); 
	dataObjectsNotReceived += 1; // MOS
        return PROT_EVENT_SUCCESS;
    }

    NodeRef node = Node::create(dObj);
    if (node && (node == getKernel()->getThisNode())) {
        HAGGLE_DBG("%s Received own node description, discarding early.\n", getName());
	dataObjectsNotReceived += 1; // MOS
        return PROT_EVENT_SUCCESS;
    }

    // MOS - this now happens even before xml parsing
    // getKernel()->getThisNode()->getBloomfilter()->add(dObj);

    if(bytesRemaining > 0) {
      ssize_t bytesPut2 = dObj->putData(buffer + bytesPut, len - bytesPut, &bytesRemaining, false);
      HAGGLE_DBG("%s processing payload - %ld bytes put\n", getName(), bytesPut2);

      if (bytesPut2 < 0) {
        HAGGLE_ERR("%s Could not put data\n", getName());
        return PROT_EVENT_ERROR;
      }
      
      if(bytesRemaining != 0) {
        HAGGLE_ERR("%s Received data object not complete - discarding\n", getName());
        return PROT_EVENT_ERROR;
      }
    }

    NodeRef currentPeer;
    { Mutex::AutoLocker l(mutex); currentPeer = peerNode; } // MOS

    // Generate first an incoming event to conform with the base Protocol class
    getKernel()->addEvent(new Event(EVENT_TYPE_DATAOBJECT_INCOMING, dObj, currentPeer));

    receiveDataObjectSuccessHook(dObj);

    // Since there is no data following, we generate the received event immediately 
    // following the incoming one
    getKernel()->addEvent(new Event(EVENT_TYPE_DATAOBJECT_RECEIVED, dObj, currentPeer));

    dataObjectsReceived += 1; // MOS
    dataObjectBytesReceived += len; // MOS
    if(dObj->isNodeDescription()) { nodeDescReceived += 1; nodeDescBytesReceived += len; } // MOS

    return PROT_EVENT_SUCCESS;
}
Beispiel #18
0
void
DataManager::handleVerifiedDataObject(DataObjectRef& dObj)
{
    if (!dObj) {
        HAGGLE_ERR ("Handle verified object received null object.\n");
        return;
    }

        if(networkCodingConfiguration->isNetworkCodingEnabled(dObj,NULL) &&
	   !networkCodingConfiguration->isForwardingEnabled()) {
	  if(networkCodingDataObjectUtility->isNetworkCodedDataObject(dObj)) {
	    if (dObj->isDuplicate()) {
	      HAGGLE_DBG("Data object %s is a duplicate! Not generating DATAOBJECT_NEW event\n", dObj->getIdStr());
	    } else {
	      kernel->addEvent(new Event(EVENT_TYPE_DATAOBJECT_NEW, dObj));
	      return;
	    }
	  }
	}

        if(fragmentationConfiguration->isFragmentationEnabled(dObj,NULL) &&
	   !fragmentationConfiguration->isForwardingEnabled()) {
	  if(fragmentationDataObjectUtility->isFragmentationDataObject(dObj)) {
	    if (dObj->isDuplicate()) {
	      HAGGLE_DBG("Data object %s is a duplicate! Not generating DATAOBJECT_NEW event\n", dObj->getIdStr());
	    } else {
	      kernel->addEvent(new Event(EVENT_TYPE_DATAOBJECT_NEW, dObj));
	      return;
	    }
	  }
	}

    // MOS - add data object to Bloomfilter to cover the case 
    // where there was no incoming event (e.g. encrypting data object from local app)
    if (dObj->getABEStatus() != DataObject::ABE_NOT_NEEDED && !localBF->has(dObj)) {
      localBF->add(dObj);
      HAGGLE_DBG("Adding encrypted data object [%s] to our bloomfilter, #objs=%d\n", DataObject::idString(dObj).c_str(), localBF->numObjects());
      kernel->getThisNode()->getBloomfilter()->add(dObj); // MOS
    }

    if (cacheStrategy && !cacheStrategy->isDone() && cacheStrategy->isResponsibleForDataObject(dObj)) {
        cacheStrategy->handleNewDataObject(dObj);
    }
    else {
        //default action for dObj's that are NOT handled by cache strat code
        insertDataObjectIntoDataStore (dObj);
    }
}