/** * Returns the id of the exporter collection entry or 0 in the case of an error */ int IpfixDbWriterMongo::getExporterID(const IpfixRecord::SourceID& sourceID) { list<ExporterCacheEntry>::iterator iter; int id = -1; uint32_t expIp = 0; iter = exporterCache.begin(); while(iter != exporterCache.end()) { if (equalExporter(iter->sourceID, sourceID)) { // found exporter in exporterCache DPRINTF("Exporter (ODID=%d, id=%d) found in exporter cache", sourceID.observationDomainId, iter->id); exporterCache.push_front(*iter); exporterCache.erase(iter); // update current exporter currentExporter = &exporterCache.front(); return exporterCache.front().id; } iter++; } // convert IP address (correct host byte order since 07/2010) expIp = sourceID.exporterAddress.toUInt32(); mongo::BSONObj exporter = con.findOne(dbCollectionExporters, QUERY("sourceID" << sourceID.observationDomainId << "srcIp" << expIp)); // search exporter collection if(exporter.isEmpty()){ mongo::BSONObj exporterCounter; mongo::BSONObj cmd; cmd = BSON( "findAndModify" << "counters" << "query" << BSON("_id" << "exporterCounter") << "update" << BSON("$inc" << BSON("c" << 1))); msg(MSG_DEBUG, "FIND AND MODIFY: %s", cmd.toString().c_str()); con.runCommand(dbName, cmd, exporterCounter); mongo::BSONObjBuilder b; id = exporterCounter.getObjectField("value").getIntField("c"); b << "sourceID" << sourceID.observationDomainId << "srcIP" << expIp << "id" << id; mongo::BSONObj obj = b.obj(); con.insert(dbCollectionExporters, obj); } else { id = exporter.getIntField("id"); } // insert exporter in cache ExporterCacheEntry tmp = {sourceID, id}; exporterCache.push_front(tmp); // update current exporter currentExporter = &exporterCache.front(); // pop last element if exporter cache is to long if(exporterCache.size() > MAX_EXPORTER) exporterCache.pop_back(); return id; }
/** * Returns the id of the exporter table entry or 0 in the case of an error */ int IpfixDbWriter::getExporterID(const IpfixRecord::SourceID& sourceID) { list<ExporterCacheEntry>::iterator iter; MYSQL_RES* dbResult; MYSQL_ROW dbRow; int id = 0; uint32_t expIp = 0; ostringstream statement; iter = exporterCache.begin(); while(iter != exporterCache.end()) { if (equalExporter(iter->sourceID, sourceID)) { // found exporter in exporterCache DPRINTF("Exporter (ODID=%d, id=%d) found in exporter cache", sourceID.observationDomainId, iter->id); exporterCache.push_front(*iter); exporterCache.erase(iter); // update current exporter currentExporter = &exporterCache.front(); return exporterCache.front().id; } iter++; } // convert IP address if(sourceID.exporterAddress.len == 4) expIp = *(uint32_t*)(sourceID.exporterAddress.ip); // search exporter table statement << "SELECT id FROM exporter WHERE sourceID=" << sourceID.observationDomainId << " AND srcIp=" << expIp; DPRINTF("SQL Query: %s", statement.str().c_str()); if(mysql_query(conn, statement.str().c_str()) != 0) { msg(MSG_ERROR,"IpfixDbWriter: Select on exporter table failed. Error: %s", mysql_error(conn)); return 0;// If a failure occurs, return 0 } dbResult = mysql_store_result(conn); if(( dbRow = mysql_fetch_row(dbResult))) { // found in table id = atoi(dbRow[0]); mysql_free_result(dbResult); DPRINTF("ExporterID %d is in exporter table", id); } else { mysql_free_result(dbResult); // insert new exporter table entry statement.str(""); statement.clear(); statement << "INSERT INTO exporter (ID,sourceID,srcIP) VALUES ('NULL','" << sourceID.observationDomainId << "','" << expIp << "')"; DPRINTF("SQL Query: %s", statement.str().c_str()); if(mysql_query(conn, statement.str().c_str()) != 0) { msg(MSG_ERROR,"IpfixDbWriter: Insert in exporter table failed. Error: %s", conn); return 0; } id = mysql_insert_id(conn); msg(MSG_INFO,"IpfixDbWriter: new exporter (ODID=%d, id=%d) inserted in exporter table", sourceID.observationDomainId, id); } // insert exporter in cache ExporterCacheEntry tmp = {sourceID, id}; exporterCache.push_front(tmp); // update current exporter currentExporter = &exporterCache.front(); // pop last element if exporter cache is to long if(exporterCache.size() > MAX_EXPORTER) exporterCache.pop_back(); return id; }
/** * loop over properties and template to get the IPFIX values in correct order to store in database * The result is written to BSON Object, and flowstart is returned */ mongo::BSONObj IpfixDbWriterMongo::getInsertObj(const IpfixRecord::SourceID& sourceID, TemplateInfo& dataTemplateInfo,uint16_t length, IpfixRecord::Data* data) { uint64_t intdata = 0; uint64_t intdata2 = 0; uint32_t k; bool notfound, notfound2; mongo::BSONObjBuilder obj; time_t flowstartsec = 0; if (!allProp) { /** loop over a subset of elements (selected properties) and loop over the IPFIX_TYPEID of the record * to get the corresponding data to store and make insert statement */ for(vector<Property>::iterator prop = documentProperties.begin(); prop != documentProperties.end(); prop++) { if (prop->ipfixId == EXPORTERID) { // if this is the same source ID as last time, we get the exporter id from currentExporter if ((currentExporter != NULL) && equalExporter(sourceID, currentExporter->sourceID)) { DPRINTF("Exporter is same as last time (ODID=%d, id=%d)", sourceID.observationDomainId, currentExporter->id); intdata = (uint64_t)currentExporter->id; } else { // lookup exporter buffer to get exporterID from sourcID and expIp intdata = (uint64_t)getExporterID(sourceID); } } else { notfound = true; // try to gather data required for the field if(dataTemplateInfo.fieldCount > 0) { // look inside the ipfix record for(k=0; k < dataTemplateInfo.fieldCount; k++) { if( dataTemplateInfo.fieldInfo[k].type.enterprise == prop->enterprise && dataTemplateInfo.fieldInfo[k].type.id == prop->ipfixId) { notfound = false; intdata = getData(dataTemplateInfo.fieldInfo[k].type,(data+dataTemplateInfo.fieldInfo[k].offset)); DPRINTF("IpfixDbWriterMongo::getData: really saw ipfix id %d in packet with intdata %llX, type %d, length %d and offset %X", prop->ipfixId, intdata, dataTemplateInfo.fieldInfo[k].type.id, dataTemplateInfo.fieldInfo[k].type.length, dataTemplateInfo.fieldInfo[k].offset); break; } } } if( dataTemplateInfo.dataCount > 0 && notfound) { // look in static data fields of template for data for(k=0; k < dataTemplateInfo.dataCount; k++) { if(dataTemplateInfo.fieldInfo[k].type.enterprise == prop->enterprise && dataTemplateInfo.dataInfo[k].type.id == prop->ipfixId) { notfound = false; intdata = getData(dataTemplateInfo.dataInfo[k].type,(dataTemplateInfo.data+dataTemplateInfo.dataInfo[k].offset)); break; } } } if(notfound) { notfound2 = true; // for some Ids, we have an alternative if(prop->enterprise == 0) { switch (prop->ipfixId) { case IPFIX_TYPEID_flowStartSeconds: if(dataTemplateInfo.fieldCount > 0) { for(k=0; k < dataTemplateInfo.fieldCount; k++) { // look for alternative (flowStartMilliseconds/1000) if(dataTemplateInfo.fieldInfo[k].type.id == IPFIX_TYPEID_flowStartMilliseconds) { intdata = getData(dataTemplateInfo.fieldInfo[k].type,(data+dataTemplateInfo.fieldInfo[k].offset)) / 1000; notfound = false; break; } // if no flow start time is available, maybe this is is from a netflow from Cisco // then - as a last alternative - use flowStartSysUpTime as flow start time if(dataTemplateInfo.fieldInfo[k].type.id == IPFIX_TYPEID_flowStartSysUpTime) { intdata2 = getData(dataTemplateInfo.fieldInfo[k].type,(data+dataTemplateInfo.fieldInfo[k].offset)); notfound2 = false; } } if(notfound && !notfound2) { intdata = intdata2; notfound = false; } } break; case IPFIX_TYPEID_flowEndSeconds: if(dataTemplateInfo.fieldCount > 0) { for(k=0; k < dataTemplateInfo.fieldCount; k++) { // look for alternative (flowEndMilliseconds/1000) if(dataTemplateInfo.fieldInfo[k].type.id == IPFIX_TYPEID_flowEndMilliseconds) { intdata = getData(dataTemplateInfo.fieldInfo[k].type,(data+dataTemplateInfo.fieldInfo[k].offset)) / 1000; notfound = false; break; } // if no flow end time is available, maybe this is from a netflow from Cisco // then use flowEndSysUpTime as flow start time if(dataTemplateInfo.fieldInfo[k].type.id == IPFIX_TYPEID_flowEndSysUpTime) { intdata2 = getData(dataTemplateInfo.fieldInfo[k].type,(data+dataTemplateInfo.fieldInfo[k].offset)); notfound2 = false; } } if(notfound && !notfound2) { intdata = intdata2; notfound = false; } } break; } } else if (prop->enterprise==IPFIX_PEN_reverse) { switch (prop->ipfixId) { case IPFIX_TYPEID_flowStartSeconds: // look for alternative (revFlowStartMilliseconds/1000) if(dataTemplateInfo.fieldCount > 0) { for(k=0; k < dataTemplateInfo.fieldCount; k++) { if(dataTemplateInfo.fieldInfo[k].type == InformationElement::IeInfo(IPFIX_TYPEID_flowStartMilliseconds, IPFIX_PEN_reverse)) { intdata = getData(dataTemplateInfo.fieldInfo[k].type,(data+dataTemplateInfo.fieldInfo[k].offset)) / 1000; notfound = false; break; } } } break; case IPFIX_TYPEID_flowEndSeconds: // look for alternative (revFlowEndMilliseconds/1000) if(dataTemplateInfo.fieldCount > 0) { for(k=0; k < dataTemplateInfo.fieldCount; k++) { if(dataTemplateInfo.fieldInfo[k].type == InformationElement::IeInfo(IPFIX_TYPEID_flowEndMilliseconds, IPFIX_PEN_reverse)) { intdata = getData(dataTemplateInfo.fieldInfo[k].type,(data+dataTemplateInfo.fieldInfo[k].offset)) / 1000; notfound = false; break; } } } break; } } // if still not found, get default value if(notfound) intdata = prop->defaultValue; } // we need extra treatment for timing related fields if(prop->enterprise == 0 ) { switch (prop->ipfixId) { case IPFIX_TYPEID_flowStartSeconds: // save time for table access if (flowstartsec==0) flowstartsec = intdata; break; case IPFIX_TYPEID_flowEndSeconds: break; case IPFIX_TYPEID_flowStartMilliseconds: // if flowStartSeconds is not stored in one of the columns, but flowStartMilliseconds is, // then we use flowStartMilliseconds for table access // This is realized by storing this value only if flowStartSeconds has not yet been seen. // A later appearing flowStartSeconds will override this value. if (flowstartsec==0) flowstartsec = intdata/1000; case IPFIX_TYPEID_flowEndMilliseconds: // in the database the millisecond entry is counted from last second intdata %= 1000; break; } } else if (prop->enterprise==IPFIX_PEN_reverse) switch (prop->ipfixId) { case IPFIX_TYPEID_flowStartMilliseconds: case IPFIX_TYPEID_flowEndMilliseconds: // in the database the millisecond entry is counted from last second intdata %= 1000; break; } } msg(MSG_DEBUG, "saw ipfix id %s (element ID %d) in packet with intdata %llX", prop->propertyName, prop->ipfixId, static_cast<int64_t>(intdata)); if (beautyProp) obj << prop->propertyName << static_cast<long long int>(intdata); else obj << boost::lexical_cast<std::string>(prop->ipfixId).c_str() << static_cast<long long int>(intdata); if (flowstartsec == 0) { msg(MSG_ERROR, "IpfixDbWriterMongo: Failed to get timing data from record. Will be saved in default table."); } } } else { /* Dump all elements to DB */ if(dataTemplateInfo.fieldCount > 0) { // look in ipfix records for(int k=0; k < dataTemplateInfo.fieldCount; k++) { intdata = getData(dataTemplateInfo.fieldInfo[k].type,(data+dataTemplateInfo.fieldInfo[k].offset)); DPRINTF("IpfixDbWriterMongo::getData: dumping from packet intdata %llX, type %d, length %d and offset %X", intdata, dataTemplateInfo.fieldInfo[k].type.id, dataTemplateInfo.fieldInfo[k].type.length, dataTemplateInfo.fieldInfo[k].offset); obj << boost::lexical_cast<std::string>(dataTemplateInfo.fieldInfo[k].type.id).c_str() << static_cast<long long int>(intdata); } } if( dataTemplateInfo.dataCount > 0) { // look in static data fields of template for data for(int k=0; k < dataTemplateInfo.dataCount; k++) { intdata = getData(dataTemplateInfo.dataInfo[k].type,(dataTemplateInfo.data+dataTemplateInfo.dataInfo[k].offset)); obj << boost::lexical_cast<std::string>(dataTemplateInfo.fieldInfo[k].type.id).c_str() << static_cast<long long int>(intdata); } } } return obj.obj(); }
/** * loop over table columns and template to get the IPFIX values in correct order to store in database * The result is written into row, the firstSwitched time is returned in flowstartsec */ string& IpfixDbWriter::getInsertString(string& row, time_t& flowstartsec, const IpfixRecord::SourceID& sourceID, TemplateInfo& dataTemplateInfo,uint16_t length, IpfixRecord::Data* data) { uint64_t intdata = 0; uint64_t intdata2 = 0; uint32_t k; bool notfound, notfound2; bool first = true; ostringstream rowStream(row); flowstartsec = 0; rowStream << "("; /**loop over the columname and loop over the IPFIX_TYPEID of the record to get the corresponding data to store and make insert statement*/ for(vector<Column>::iterator col = tableColumns.begin(); col != tableColumns.end(); col++) { if (col->ipfixId == EXPORTERID) { // if this is the same source ID as last time, we get the exporter id from currentExporter if ((currentExporter != NULL) && equalExporter(sourceID, currentExporter->sourceID)) { DPRINTF("Exporter is same as last time (ODID=%d, id=%d)", sourceID.observationDomainId, currentExporter->id); intdata = (uint64_t)currentExporter->id; } else { // lookup exporter buffer to get exporterID from sourcID and expIp intdata = (uint64_t)getExporterID(sourceID); } } else { notfound = true; // try to gather data required for the field if(dataTemplateInfo.fieldCount > 0) { // look inside the ipfix record for(k=0; k < dataTemplateInfo.fieldCount; k++) { if(dataTemplateInfo.fieldInfo[k].type.enterprise == col->enterprise && dataTemplateInfo.fieldInfo[k].type.id == col->ipfixId) { notfound = false; intdata = getData(dataTemplateInfo.fieldInfo[k].type,(data+dataTemplateInfo.fieldInfo[k].offset)); DPRINTF("IpfixDbWriter::getData: really saw ipfix id %d in packet with intdata %llX, type %d, length %d and offset %X", col->ipfixId, intdata, dataTemplateInfo.fieldInfo[k].type.id, dataTemplateInfo.fieldInfo[k].type.length, dataTemplateInfo.fieldInfo[k].offset); break; } } } if( dataTemplateInfo.dataCount > 0 && notfound) { // look in static data fields of template for data for(k=0; k < dataTemplateInfo.dataCount; k++) { if(dataTemplateInfo.fieldInfo[k].type.enterprise == col->enterprise && dataTemplateInfo.dataInfo[k].type.id == col->ipfixId) { notfound = false; intdata = getData(dataTemplateInfo.dataInfo[k].type,(dataTemplateInfo.data+dataTemplateInfo.dataInfo[k].offset)); break; } } } if(notfound) { notfound2 = true; // for some Ids, we have an alternative if(col->enterprise == 0) { switch (col->ipfixId) { case IPFIX_TYPEID_flowStartSeconds: if(dataTemplateInfo.fieldCount > 0) { for(k=0; k < dataTemplateInfo.fieldCount; k++) { // look for alternative (flowStartMilliSeconds/1000) if(dataTemplateInfo.fieldInfo[k].type.id == IPFIX_TYPEID_flowStartMilliSeconds) { intdata = getData(dataTemplateInfo.fieldInfo[k].type,(data+dataTemplateInfo.fieldInfo[k].offset)) / 1000; notfound = false; break; } // if no flow start time is available, maybe this is is from a netflow from Cisco // then - as a last alternative - use flowStartSysUpTime as flow start time if(dataTemplateInfo.fieldInfo[k].type.id == IPFIX_TYPEID_flowStartSysUpTime) { intdata2 = getData(dataTemplateInfo.fieldInfo[k].type,(data+dataTemplateInfo.fieldInfo[k].offset)); notfound2 = false; } } if(notfound && !notfound2) { intdata = intdata2; notfound = false; } } break; //TODO: replace by enterprise number (Gerhard 12/2009) case IPFIX_ETYPEID_revFlowStartSeconds: // look for alternative (revFlowStartMilliSeconds/1000) if(dataTemplateInfo.fieldCount > 0) { for(k=0; k < dataTemplateInfo.fieldCount; k++) { if(dataTemplateInfo.fieldInfo[k].type.id == IPFIX_ETYPEID_revFlowStartMilliSeconds) { intdata = getData(dataTemplateInfo.fieldInfo[k].type,(data+dataTemplateInfo.fieldInfo[k].offset)) / 1000; notfound = false; break; } } } break; case IPFIX_TYPEID_flowEndSeconds: if(dataTemplateInfo.fieldCount > 0) { for(k=0; k < dataTemplateInfo.fieldCount; k++) { // look for alternative (flowEndMilliSeconds/1000) if(dataTemplateInfo.fieldInfo[k].type.id == IPFIX_TYPEID_flowEndMilliSeconds) { intdata = getData(dataTemplateInfo.fieldInfo[k].type,(data+dataTemplateInfo.fieldInfo[k].offset)) / 1000; notfound = false; break; } // if no flow end time is available, maybe this is is from a netflow from Cisco // then use flowEndSysUpTime as flow start time if(dataTemplateInfo.fieldInfo[k].type.id == IPFIX_TYPEID_flowEndSysUpTime) { intdata2 = getData(dataTemplateInfo.fieldInfo[k].type,(data+dataTemplateInfo.fieldInfo[k].offset)); notfound2 = false; } } if(notfound && !notfound2) { intdata = intdata2; notfound = false; } } break; //TODO: replace by enterprise number (Gerhard 12/2009) case IPFIX_ETYPEID_revFlowEndSeconds: // look for alternative (revFlowEndMilliSeconds/1000) if(dataTemplateInfo.fieldCount > 0) { for(k=0; k < dataTemplateInfo.fieldCount; k++) { if(dataTemplateInfo.fieldInfo[k].type.id == IPFIX_ETYPEID_revFlowEndMilliSeconds) { intdata = getData(dataTemplateInfo.fieldInfo[k].type,(data+dataTemplateInfo.fieldInfo[k].offset)) / 1000; notfound = false; break; } } } break; } } // if still not found, get default value if(notfound) intdata = col->defaultValue; } // we need extra treatment for timing related fields if(col->enterprise == 0 ) { switch (col->ipfixId) { case IPFIX_TYPEID_flowStartSeconds: // save time for table access if (flowstartsec==0) flowstartsec = intdata; break; case IPFIX_TYPEID_flowEndSeconds: break; case IPFIX_TYPEID_flowStartMilliSeconds: // if flowStartSeconds is not stored in one of the columns, but flowStartMilliSeconds is, // then we use flowStartMilliSeconds for table access // This is realized by storing this value only if flowStartSeconds has not yet been seen. // A later appearing flowStartSeconds will override this value. if (flowstartsec==0) flowstartsec = intdata/1000; case IPFIX_TYPEID_flowEndMilliSeconds: //TODO: replace by enterprise number (Gerhard 12/2009) case IPFIX_ETYPEID_revFlowStartMilliSeconds: case IPFIX_ETYPEID_revFlowEndMilliSeconds: // in the database the millisecond entry is counted from last second intdata %= 1000; break; } } } DPRINTF("saw ipfix id %d in packet with intdata %llX", col->ipfixId, intdata); if(first) rowStream << intdata; else rowStream << "," << intdata; first = false; } rowStream << ")"; if (flowstartsec == 0) { msg(MSG_ERROR, "IpfixDbWriter: Failed to get timing data from record. Will be saved in default table."); } row = rowStream.str(); DPRINTF("Insert row: %s", row.c_str()); return row; }