Exemplo n.º 1
0
static void send_data(char *rfile, time_t twin_start, 
			time_t twin_end, uint32_t count, unsigned int delay, int confirm, int netflow_version) {
master_record_t	master_record;
common_record_t	*flow_record;
nffile_t		*nffile;
int 			i, done, ret, again;
uint32_t		numflows, cnt;

#ifdef COMPAT15
int	v1_map_done = 0;
#endif
	
	// Get the first file handle
	nffile = GetNextFile(NULL, twin_start, twin_end);
	if ( !nffile ) {
		LogError("GetNextFile() error in %s line %d: %s\n", __FILE__, __LINE__, strerror(errno) );
		return;
	}
	if ( nffile == EMPTY_LIST ) {
		LogError("Empty file list. No files to process\n");
		return;
	}

	peer.send_buffer   	= malloc(UDP_PACKET_SIZE);
	peer.flush			= 0;
	if ( !peer.send_buffer ) {
		LogError("malloc() error in %s line %d: %s\n", __FILE__, __LINE__, strerror(errno) );
		CloseFile(nffile);
		DisposeFile(nffile);
		return;
	}
	peer.buff_ptr = peer.send_buffer;
	peer.endp  	  = (void *)((pointer_addr_t)peer.send_buffer + UDP_PACKET_SIZE - 1);

	if ( netflow_version == 5 ) 
		Init_v5_v7_output(&peer);
	else 
		Init_v9_output(&peer);

	numflows	= 0;
	done	 	= 0;

	// setup Filter Engine to point to master_record, as any record read from file
	// is expanded into this record
	Engine->nfrecord = (uint64_t *)&master_record;

	cnt = 0;
	while ( !done ) {
		// get next data block from file
		ret = ReadBlock(nffile);

		switch (ret) {
			case NF_CORRUPT:
			case NF_ERROR:
				if ( ret == NF_CORRUPT ) 
					LogError("Skip corrupt data file '%s'\n",GetCurrentFilename());
				else 
					LogError("Read error in file '%s': %s\n",GetCurrentFilename(), strerror(errno) );
				// fall through - get next file in chain
			case NF_EOF: {
				nffile_t *next = GetNextFile(nffile, twin_start, twin_end);
				if ( next == EMPTY_LIST ) {
					done = 1;
				}
				if ( next == NULL ) {
					done = 1;
					LogError("Unexpected end of file list\n");
				}
				// else continue with next file
				continue;
	
				} break; // not really needed
		}

#ifdef COMPAT15
		if ( nffile->block_header->id == DATA_BLOCK_TYPE_1 ) {
			common_record_v1_t *v1_record = (common_record_v1_t *)nffile->buff_ptr;
			// create an extension map for v1 blocks
			if ( v1_map_done == 0 ) {
				extension_map_t *map = malloc(sizeof(extension_map_t) + 2 * sizeof(uint16_t) );
				if ( ! map ) {
					perror("Memory allocation error");
					exit(255);
				}
				map->type 	= ExtensionMapType;
				map->size 	= sizeof(extension_map_t) + 2 * sizeof(uint16_t);
				if (( map->size & 0x3 ) != 0 ) {
					map->size += 4 - ( map->size & 0x3 );
				}
				map->map_id = INIT_ID;
				map->ex_id[0]  = EX_IO_SNMP_2;
				map->ex_id[1]  = EX_AS_2;
				map->ex_id[2]  = 0;
				
				map->extension_size  = 0;
				map->extension_size += extension_descriptor[EX_IO_SNMP_2].size;
				map->extension_size += extension_descriptor[EX_AS_2].size;
					
				Insert_Extension_Map(extension_map_list, map);
				v1_map_done = 1;
			}

			// convert the records to v2
			for ( i=0; i < nffile->block_header->NumRecords; i++ ) {
				common_record_t *v2_record = (common_record_t *)v1_record;
				Convert_v1_to_v2((void *)v1_record);
				// now we have a v2 record -> use size of v2_record->size
				v1_record = (common_record_v1_t *)((pointer_addr_t)v1_record + v2_record->size);
			}
			nffile->block_header->id = DATA_BLOCK_TYPE_2;
		}
#endif

		if ( nffile->block_header->id != DATA_BLOCK_TYPE_2 ) {
			LogError("Can't process block type %u. Skip block.\n", nffile->block_header->id);
			continue;
		}

		// cnt is the number of blocks, which survived the filter
		// and added to the output buffer
		flow_record = nffile->buff_ptr;

		for ( i=0; i < nffile->block_header->NumRecords; i++ ) {
			int match;

			switch ( flow_record->type ) {
				case CommonRecordType: {
					if ( extension_map_list->slot[flow_record->ext_map] == NULL ) {
						LogError("Corrupt data file. Missing extension map %u. Skip record.\n", flow_record->ext_map);
						flow_record = (common_record_t *)((pointer_addr_t)flow_record + flow_record->size);	
						continue;
					} 

					// if no filter is given, the result is always true
					ExpandRecord_v2( flow_record, extension_map_list->slot[flow_record->ext_map], NULL, &master_record);

					match = twin_start && (master_record.first < twin_start || master_record.last > twin_end) ? 0 : 1;

					// filter netflow record with user supplied filter
					if ( match ) 
						match = (*Engine->FilterEngine)(Engine);
	
					if ( match == 0 ) { // record failed to pass all filters
						// increment pointer by number of bytes for netflow record
						flow_record = (common_record_t *)((pointer_addr_t)flow_record + flow_record->size);	
						// go to next record
						continue;
					}
					// Records passed filter -> continue record processing

					if ( netflow_version == 5 ) 
						again = Add_v5_output_record(&master_record, &peer);
					else
						again = Add_v9_output_record(&master_record, &peer);
	
					cnt++;
					numflows++;

					if ( peer.flush ) {
						ret = FlushBuffer(confirm);
	
						if ( ret < 0 ) {
							perror("Error sending data");
							CloseFile(nffile);
							DisposeFile(nffile);
							return;
						}
			
						if ( delay ) {
							// sleep as specified
							usleep(delay);
						}
						cnt = 0;
					}
	
					if ( again ) {
						if ( netflow_version == 5 ) 
							Add_v5_output_record(&master_record, &peer);
						else
							Add_v9_output_record(&master_record, &peer);
						cnt++;
					}

					} break;
				case ExtensionMapType: {
					extension_map_t *map = (extension_map_t *)flow_record;
	
					if ( Insert_Extension_Map(extension_map_list, map) ) {
						// flush new map
						
					} // else map already known and flushed
	
					} break;
				case ExporterRecordType:
				case SamplerRecordype:
				case ExporterInfoRecordType:
				case ExporterStatRecordType:
				case SamplerInfoRecordype:
						// Silently skip exporter/sampler records
					break;
			 	default: {
					LogError("Skip unknown record type %i\n", flow_record->type);
				}
			}
			// Advance pointer by number of bytes for netflow record
			flow_record = (common_record_t *)((pointer_addr_t)flow_record + flow_record->size);	

		}
	} // while

	// flush still remaining records
	if ( cnt ) {
		ret = FlushBuffer(confirm);

		if ( ret < 0 ) {
			perror("Error sending data");
		}

	} // if cnt 

	if (nffile) {
		CloseFile(nffile);
		DisposeFile(nffile);
	}

	close(peer.sockfd);

	return;

} // End of send_data
Exemplo n.º 2
0
stat_record_t process_data(char *wfile, int element_stat, int flow_stat, int sort_flows,
	printer_t print_header, printer_t print_record, time_t twin_start, time_t twin_end, 
	uint64_t limitflows, int tag, int compress, int do_xstat) {
common_record_t 	*flow_record;
master_record_t		*master_record;
nffile_t			*nffile_w, *nffile_r;
xstat_t				*xstat;
stat_record_t 		stat_record;
int 				done, write_file;

#ifdef COMPAT15
int	v1_map_done = 0;
#endif
	
	// time window of all matched flows
	memset((void *)&stat_record, 0, sizeof(stat_record_t));
	stat_record.first_seen = 0x7fffffff;
	stat_record.msec_first = 999;

	// Do the logic first

	// do not print flows when doing any stats are sorting
	if ( sort_flows || flow_stat || element_stat ) {
		print_record = NULL;
	}

	// do not write flows to file, when doing any stats
	// -w may apply for flow_stats later
	write_file = !(sort_flows || flow_stat || element_stat) && wfile;
	nffile_r = NULL;
	nffile_w = NULL;
	xstat  	 = NULL;

	// Get the first file handle
	nffile_r = GetNextFile(NULL, twin_start, twin_end);
	if ( !nffile_r ) {
		LogError("GetNextFile() error in %s line %d: %s\n", __FILE__, __LINE__, strerror(errno) );
		return stat_record;
	}
	if ( nffile_r == EMPTY_LIST ) {
		LogError("Empty file list. No files to process\n");
		return stat_record;
	}

	// preset time window of all processed flows to the stat record in first flow file
	t_first_flow = nffile_r->stat_record->first_seen;
	t_last_flow  = nffile_r->stat_record->last_seen;

	// store infos away for later use
	// although multiple files may be processed, it is assumed that all 
	// have the same settings
	is_anonymized = IP_ANONYMIZED(nffile_r);
	strncpy(Ident, nffile_r->file_header->ident, IDENTLEN);
	Ident[IDENTLEN-1] = '\0';

	// prepare output file if requested
	if ( write_file ) {
		nffile_w = OpenNewFile(wfile, NULL, compress, IP_ANONYMIZED(nffile_r), NULL );
		if ( !nffile_w ) {
			if ( nffile_r ) {
				CloseFile(nffile_r);
				DisposeFile(nffile_r);
			}
			return stat_record;
		}
		if ( do_xstat ) {
			xstat = InitXStat(nffile_w);
			if ( !xstat ) {
				if ( nffile_r ) {
					CloseFile(nffile_r);
					DisposeFile(nffile_r);
				}
				return stat_record;
			}
		}
	}

	// setup Filter Engine to point to master_record, as any record read from file
	// is expanded into this record
	// Engine->nfrecord = (uint64_t *)master_record;

	done = 0;
	while ( !done ) {
	int i, ret;

		// get next data block from file
		ret = ReadBlock(nffile_r);

		switch (ret) {
			case NF_CORRUPT:
			case NF_ERROR:
				if ( ret == NF_CORRUPT ) 
					LogError("Skip corrupt data file '%s'\n",GetCurrentFilename());
				else 
					LogError("Read error in file '%s': %s\n",GetCurrentFilename(), strerror(errno) );
				// fall through - get next file in chain
			case NF_EOF: {
				nffile_t *next = GetNextFile(nffile_r, twin_start, twin_end);
				if ( next == EMPTY_LIST ) {
					done = 1;
				} else if ( next == NULL ) {
					done = 1;
					LogError("Unexpected end of file list\n");
				} else {
					// Update global time span window
					if ( next->stat_record->first_seen < t_first_flow )
						t_first_flow = next->stat_record->first_seen;
					if ( next->stat_record->last_seen > t_last_flow ) 
						t_last_flow = next->stat_record->last_seen;
					// continue with next file
				}
				continue;

				} break; // not really needed
			default:
				// successfully read block
				total_bytes += ret;
		}


#ifdef COMPAT15
		if ( nffile_r->block_header->id == DATA_BLOCK_TYPE_1 ) {
			common_record_v1_t *v1_record = (common_record_v1_t *)nffile_r->buff_ptr;
			// create an extension map for v1 blocks
			if ( v1_map_done == 0 ) {
				extension_map_t *map = malloc(sizeof(extension_map_t) + 2 * sizeof(uint16_t) );
				if ( ! map ) {
					LogError("malloc() allocation error in %s line %d: %s\n", __FILE__, __LINE__, strerror(errno) );
					exit(255);
				}
				map->type 	= ExtensionMapType;
				map->size 	= sizeof(extension_map_t) + 2 * sizeof(uint16_t);
				if (( map->size & 0x3 ) != 0 ) {
					map->size += 4 - ( map->size & 0x3 );
				}

				map->map_id = INIT_ID;

				map->ex_id[0]  = EX_IO_SNMP_2;
				map->ex_id[1]  = EX_AS_2;
				map->ex_id[2]  = 0;
				
				map->extension_size  = 0;
				map->extension_size += extension_descriptor[EX_IO_SNMP_2].size;
				map->extension_size += extension_descriptor[EX_AS_2].size;

				if ( Insert_Extension_Map(extension_map_list,map) && write_file ) {
					// flush new map
					AppendToBuffer(nffile_w, (void *)map, map->size);
				} // else map already known and flushed

				v1_map_done = 1;
			}

			// convert the records to v2
			for ( i=0; i < nffile_r->block_header->NumRecords; i++ ) {
				common_record_t *v2_record = (common_record_t *)v1_record;
				Convert_v1_to_v2((void *)v1_record);
				// now we have a v2 record -> use size of v2_record->size
				v1_record = (common_record_v1_t *)((pointer_addr_t)v1_record + v2_record->size);
			}
			nffile_r->block_header->id = DATA_BLOCK_TYPE_2;
		}
#endif

		if ( nffile_r->block_header->id == Large_BLOCK_Type ) {
			// skip
			printf("Xstat block skipped ...\n");
			continue;
		}

		if ( nffile_r->block_header->id != DATA_BLOCK_TYPE_2 ) {
			if ( nffile_r->block_header->id == DATA_BLOCK_TYPE_1 ) {
				LogError("Can't process nfdump 1.5.x block type 1. Add --enable-compat15 to compile compatibility code. Skip block.\n");
			} else {
				LogError("Can't process block type %u. Skip block.\n", nffile_r->block_header->id);
			}
			skipped_blocks++;
			continue;
		}

		flow_record = nffile_r->buff_ptr;
		for ( i=0; i < nffile_r->block_header->NumRecords; i++ ) {

			switch ( flow_record->type ) {
				case CommonRecordV0Type:
				case CommonRecordType:  {
					int match;
					uint32_t map_id = flow_record->ext_map;
					generic_exporter_t *exp_info = exporter_list[flow_record->exporter_sysid];
					if ( map_id >= MAX_EXTENSION_MAPS ) {
						LogError("Corrupt data file. Extension map id %u too big.\n", flow_record->ext_map);
						exit(255);
					}
					if ( extension_map_list->slot[map_id] == NULL ) {
						LogError("Corrupt data file. Missing extension map %u. Skip record.\n", flow_record->ext_map);
						flow_record = (common_record_t *)((pointer_addr_t)flow_record + flow_record->size);	
						continue;
					} 

					total_flows++;
					master_record = &(extension_map_list->slot[map_id]->master_record);
					Engine->nfrecord = (uint64_t *)master_record;
					ExpandRecord_v2( flow_record, extension_map_list->slot[map_id], 
						exp_info ? &(exp_info->info) : NULL, master_record);

					// Time based filter
					// if no time filter is given, the result is always true
					match  = twin_start && (master_record->first < twin_start || master_record->last > twin_end) ? 0 : 1;
					match &= limitflows ? stat_record.numflows < limitflows : 1;

					// filter netflow record with user supplied filter
					if ( match ) 
						match = (*Engine->FilterEngine)(Engine);
	
					if ( match == 0 ) { // record failed to pass all filters
						// increment pointer by number of bytes for netflow record
						flow_record = (common_record_t *)((pointer_addr_t)flow_record + flow_record->size);	
						// go to next record
						continue;
					}

					// Records passed filter -> continue record processing
					// Update statistics
					UpdateStat(&stat_record, master_record);

					// update number of flows matching a given map
					extension_map_list->slot[map_id]->ref_count++;
	
					if ( flow_stat ) {
						AddFlow(flow_record, master_record, extension_map_list->slot[map_id]);
						if ( element_stat ) {
							AddStat(flow_record, master_record);
						} 
					} else if ( element_stat ) {
						AddStat(flow_record, master_record);
					} else if ( sort_flows ) {
						InsertFlow(flow_record, master_record, extension_map_list->slot[map_id]);
					} else {
						if ( write_file ) {
							AppendToBuffer(nffile_w, (void *)flow_record, flow_record->size);
							if ( xstat ) 
								UpdateXStat(xstat, master_record);
						} else if ( print_record ) {
							char *string;
							// if we need to print out this record
							print_record(master_record, &string, tag);
							if ( string ) {
								if ( limitflows ) {
									if ( (stat_record.numflows <= limitflows) )
										printf("%s\n", string);
								} else 
									printf("%s\n", string);
							}
						} else { 
							// mutually exclusive conditions should prevent executing this code
							// this is buggy!
							printf("Bug! - this code should never get executed in file %s line %d\n", __FILE__, __LINE__);
						}
					} // sort_flows - else
					} break; 
				case ExtensionMapType: {
					extension_map_t *map = (extension_map_t *)flow_record;
	
					if ( Insert_Extension_Map(extension_map_list, map) && write_file ) {
						// flush new map
						AppendToBuffer(nffile_w, (void *)map, map->size);
					} // else map already known and flushed
					} break;
				case ExporterRecordType:
				case SamplerRecordype:
						// Silently skip exporter records
					break;
				case ExporterInfoRecordType: {
					int ret = AddExporterInfo((exporter_info_record_t *)flow_record);
					if ( ret != 0 ) {
						if ( write_file && ret == 1 ) 
							AppendToBuffer(nffile_w, (void *)flow_record, flow_record->size);
					} else {
						LogError("Failed to add Exporter Record\n");
					}
					} break;
				case ExporterStatRecordType:
					AddExporterStat((exporter_stats_record_t *)flow_record);
					break;
				case SamplerInfoRecordype: {
					int ret = AddSamplerInfo((sampler_info_record_t *)flow_record);
					if ( ret != 0 ) {
						if ( write_file && ret == 1 ) 
							AppendToBuffer(nffile_w, (void *)flow_record, flow_record->size);
					} else {
						LogError("Failed to add Sampler Record\n");
					}
					} break;
				default: {
					LogError("Skip unknown record type %i\n", flow_record->type);
				}
			}

		// Advance pointer by number of bytes for netflow record
		flow_record = (common_record_t *)((pointer_addr_t)flow_record + flow_record->size);	


		} // for all records

		// check if we are done, due to -c option 
		if ( limitflows ) 
			done = stat_record.numflows >= limitflows;

	} // while

	CloseFile(nffile_r);

	// flush output file
	if ( write_file ) {
		// flush current buffer to disc
		if ( nffile_w->block_header->NumRecords ) {
			if ( WriteBlock(nffile_w) <= 0 ) {
				LogError("Failed to write output buffer to disk: '%s'" , strerror(errno));
			} 
		}

		if ( xstat ) {
			if ( WriteExtraBlock(nffile_w, xstat->block_header ) <= 0 ) {
				LogError("Failed to write xstat buffer to disk: '%s'" , strerror(errno));
			} 
		}

		/* Stat info */
		if ( write_file ) {
			/* Copy stat info and close file */
			memcpy((void *)nffile_w->stat_record, (void *)&stat_record, sizeof(stat_record_t));
			CloseUpdateFile(nffile_w, nffile_r->file_header->ident );
			nffile_w = DisposeFile(nffile_w);
		} // else stdout
	}	 

	PackExtensionMapList(extension_map_list);

	DisposeFile(nffile_r);
	return stat_record;

} // End of process_data
Exemplo n.º 3
0
/* status of read and fill pre-prepared structure lnf_rec */
int lnf_read(lnf_file_t *lnf_file, lnf_rec_t *lnf_rec) {

//master_record_t	*master_record;
int ret;
uint32_t map_id;
extension_map_t *map;
int i;

#ifdef COMPAT15
int	v1_map_done = 0;
#endif

begin:

	if (lnf_file->blk_record_remains == 0) {
	/* all records in block have been processed, we are going to load nex block */

		// get next data block from file
		if (lnf_file->nffile) {
			ret = ReadBlock(lnf_file->nffile);
			lnf_file->processed_blocks++;
		} else {	
			ret = NF_EOF;		/* the firt file in the list */
		}

		switch (ret) {
			case NF_CORRUPT:
				return LNF_ERR_CORRUPT;
			case NF_ERROR:
				return LNF_ERR_READ;
			case NF_EOF: 
				return LNF_EOF;
			default:
				// successfully read block
				lnf_file->processed_bytes += ret;
		}

		/* block types to be skipped  -> goto begin */
		/* block types that are unknown -> return */
		switch (lnf_file->nffile->block_header->id) {
			case DATA_BLOCK_TYPE_1:		/* old record type - nfdump 1.5 */
					lnf_file->skipped_blocks++;
					goto begin;
					return LNF_ERR_COMPAT15;
					break;
			case DATA_BLOCK_TYPE_2:		/* common record type - normally processed */
					break;
			case Large_BLOCK_Type:
					lnf_file->skipped_blocks++;
					goto begin;
					break;
			default: 
					lnf_file->skipped_blocks++;
					return LNF_ERR_UNKBLOCK;
		}

		lnf_file->flow_record = lnf_file->nffile->buff_ptr;
		lnf_file->blk_record_remains = lnf_file->nffile->block_header->NumRecords;
	} /* reading block */

	/* there are some records to process - we are going continue reading next record */
	lnf_file->blk_record_remains--;

	switch (lnf_file->flow_record->type) {
		case ExporterRecordType:
		case SamplerRecordype:
		case ExporterInfoRecordType:
		case ExporterStatRecordType:
		case SamplerInfoRecordype:
				/* just skip */
				FLOW_RECORD_NEXT(lnf_file->flow_record);	
				goto begin;
				break;
		case ExtensionMapType: 
				map = (extension_map_t *)lnf_file->flow_record;
				//Insert_Extension_Map(&instance->extension_map_list, map);
				Insert_Extension_Map(lnf_file->extension_map_list, map);
				FLOW_RECORD_NEXT(lnf_file->flow_record);	
				goto begin;
				break;
			
		case CommonRecordV0Type:
		case CommonRecordType:
				/* data record type - go ahead */
				break;

		default:
				FLOW_RECORD_NEXT(lnf_file->flow_record);	
				return LNF_ERR_UNKREC;

	}

	/* we are sure that record is CommonRecordType */
	map_id = lnf_file->flow_record->ext_map;
	if ( map_id >= MAX_EXTENSION_MAPS ) {
		FLOW_RECORD_NEXT(lnf_file->flow_record);	
		return LNF_ERR_EXTMAPB;
	}
	if ( lnf_file->extension_map_list->slot[map_id] == NULL ) {
		FLOW_RECORD_NEXT(lnf_file->flow_record);	
		return LNF_ERR_EXTMAPM;
	} 


	// changed in 1.6.8 - added exporter info 
//	ExpandRecord_v2( flow_record, extension_map_list.slot[map_id], master_record);
	ExpandRecord_v2(lnf_file->flow_record, lnf_file->extension_map_list->slot[map_id], NULL, lnf_rec->master_record);

	// update number of flows matching a given map
	lnf_file->extension_map_list->slot[map_id]->ref_count++;

	// Move pointer by number of bytes for netflow record
	FLOW_RECORD_NEXT(lnf_file->flow_record);	
/*
	{
		char *s;
		PrintExtensionMap(instance->extension_map_list.slot[map_id]->map);
		format_file_block_record(master_record, &s, 0);
		printf("READ: %s\n", s);
	}
*/

	// processing map 
	//bit_array_clear(&lnf_file->extensions_arr);
	bit_array_clear(lnf_rec->extensions_arr);

	i = 0;
	while (lnf_rec->master_record->map_ref->ex_id[i]) {
		__bit_array_set(lnf_rec->extensions_arr, lnf_rec->master_record->map_ref->ex_id[i], 1);
		i++;
	}

//	lnf_rec->extensions_arr = &(lnf_file->extensions_arr);

	/* the record seems OK. We prepare hash reference with items */
//	lnf_file->lnf_rec = lnf_rec; /* XXX temporary */

	return LNF_OK;

} /* end of _readfnction */
Exemplo n.º 4
0
static void send_data(char *rfile, time_t twin_start, 
			time_t twin_end, uint32_t count, unsigned int delay, int confirm, int anon, int netflow_version) {
data_block_header_t in_block_header;					
master_record_t		master_record;
common_record_t		*flow_record, *in_buff;
stat_record_t 		*stat_record;
int 		i, rfd, done, ret, again;
uint32_t	numflows, cnt;
char 		*string;

#ifdef COMPAT15
int	v1_map_done = 0;
#endif
	
	rfd = GetNextFile(0, twin_start, twin_end, &stat_record);
	if ( rfd < 0 ) {
		if ( rfd == FILE_ERROR )
			fprintf(stderr, "Can't open file for reading: %s\n", strerror(errno));
		return;
	}

	// prepare read and send buffer
	in_buff = (common_record_t *) malloc(BUFFSIZE);
	peer.send_buffer   	= malloc(UDP_PACKET_SIZE);
	peer.flush			= 0;
	if ( !in_buff || !peer.send_buffer ) {
		perror("Memory allocation error");
		close(rfd);
		return;
	}
	peer.writeto  = peer.send_buffer;
	peer.endp  	  = (void *)((pointer_addr_t)peer.send_buffer + UDP_PACKET_SIZE - 1);

	if ( netflow_version == 5 ) 
		Init_v5_v7_output(&peer);
	else 
		Init_v9_output(&peer);

	numflows	= 0;
	done	 	= 0;

	// setup Filter Engine to point to master_record, as any record read from file
	// is expanded into this record
	Engine->nfrecord = (uint64_t *)&master_record;

	cnt = 0;
	while ( !done ) {
		// get next data block from file
		ret = ReadBlock(rfd, &in_block_header, (void *)in_buff, &string);

		switch (ret) {
			case NF_CORRUPT:
			case NF_ERROR:
				if ( ret == NF_CORRUPT ) 
					fprintf(stderr, "Skip corrupt data file '%s': '%s'\n",GetCurrentFilename(), string);
				else 
					fprintf(stderr, "Read error in file '%s': %s\n",GetCurrentFilename(), strerror(errno) );
				// fall through - get next file in chain
			case NF_EOF:
				rfd = GetNextFile(rfd, twin_start, twin_end, NULL);
				if ( rfd < 0 ) {
					if ( rfd == NF_ERROR )
						fprintf(stderr, "Read error in file '%s': %s\n",GetCurrentFilename(), strerror(errno) );

					// rfd == EMPTY_LIST
					done = 1;
				} // else continue with next file
				continue;
	
				break; // not really needed
		}

#ifdef COMPAT15
		if ( in_block_header.id == DATA_BLOCK_TYPE_1 ) {
			common_record_v1_t *v1_record = (common_record_v1_t *)in_buff;
			// create an extension map for v1 blocks
			if ( v1_map_done == 0 ) {
				extension_map_t *map = malloc(sizeof(extension_map_t) + 2 * sizeof(uint16_t) );
				if ( ! map ) {
					perror("Memory allocation error");
					exit(255);
				}
				map->type 	= ExtensionMapType;
				map->size 	= sizeof(extension_map_t) + 2 * sizeof(uint16_t);
				map->map_id = INIT_ID;
				map->ex_id[0]  = EX_IO_SNMP_2;
				map->ex_id[1]  = EX_AS_2;
				map->ex_id[2]  = 0;
				
				Insert_Extension_Map(&extension_map_list, map);
				v1_map_done = 1;
			}

			// convert the records to v2
			for ( i=0; i < in_block_header.NumRecords; i++ ) {
				common_record_t *v2_record = (common_record_t *)v1_record;
				Convert_v1_to_v2((void *)v1_record);
				// now we have a v2 record -> use size of v2_record->size
				v1_record = (common_record_v1_t *)((pointer_addr_t)v1_record + v2_record->size);
			}
			in_block_header.id = DATA_BLOCK_TYPE_2;
		}
#endif

		if ( in_block_header.id != DATA_BLOCK_TYPE_2 ) {
			fprintf(stderr, "Can't process block type %u. Skip block.\n", in_block_header.id);
			continue;
		}

		// cnt is the number of blocks, which survived the filter
		// and added to the output buffer
		flow_record = in_buff;

		for ( i=0; i < in_block_header.NumRecords; i++ ) {
			int match;

			if ( flow_record->type == CommonRecordType ) {
				if ( extension_map_list.slot[flow_record->ext_map] == NULL ) {
					fprintf(stderr, "Corrupt data file. Missing extension map %u. Skip record.\n", flow_record->ext_map);
					flow_record = (common_record_t *)((pointer_addr_t)flow_record + flow_record->size);	
					continue;
				} 

				// if no filter is given, the result is always true
				ExpandRecord_v2( flow_record, extension_map_list.slot[flow_record->ext_map], &master_record);

				match = twin_start && (master_record.first < twin_start || master_record.last > twin_end) ? 0 : 1;

				// filter netflow record with user supplied filter
				if ( match ) 
					match = (*Engine->FilterEngine)(Engine);

				if ( match == 0 ) { // record failed to pass all filters
					// increment pointer by number of bytes for netflow record
					flow_record = (common_record_t *)((pointer_addr_t)flow_record + flow_record->size);	
					// go to next record
					continue;
				}
				// Records passed filter -> continue record processing

				if ( anon ) {
					if ( (flow_record->flags & FLAG_IPV6_ADDR ) == 0 ) {
						master_record.v4.srcaddr = anonymize(master_record.v4.srcaddr);
						master_record.v4.dstaddr = anonymize(master_record.v4.dstaddr);
					} else {
						uint64_t	anon_ip[2];
						anonymize_v6(master_record.v6.srcaddr, anon_ip);
						master_record.v6.srcaddr[0] = anon_ip[0];
						master_record.v6.srcaddr[1] = anon_ip[1];
	
						anonymize_v6(master_record.v6.dstaddr, anon_ip);
						master_record.v6.dstaddr[0] = anon_ip[0];
						master_record.v6.dstaddr[1] = anon_ip[1];
					}
				}

				if ( netflow_version == 5 ) 
					again = Add_v5_output_record(&master_record, &peer);
				else
					again = Add_v9_output_record(&master_record, &peer);

				cnt++;
				numflows++;

				if ( peer.flush ) {
					ret = FlushBuffer(confirm);
	
					if ( ret < 0 ) {
						perror("Error sending data");
						close(rfd);
						return;
					}
		
					if ( delay ) {
						// sleep as specified
						usleep(delay);
					}
					cnt = 0;
				}

				if ( again ) {
					if ( netflow_version == 5 ) 
						Add_v5_output_record(&master_record, &peer);
					else
						Add_v9_output_record(&master_record, &peer);
					cnt++;
				}

			} else if ( flow_record->type == ExtensionMapType ) {
				extension_map_t *map = (extension_map_t *)flow_record;

				if ( Insert_Extension_Map(&extension_map_list, map) ) {
					// flush new map
					
				} // else map already known and flushed

			} else {
				fprintf(stderr, "Skip unknown record type %i\n", flow_record->type);
			}

			// Advance pointer by number of bytes for netflow record
			flow_record = (common_record_t *)((pointer_addr_t)flow_record + flow_record->size);	

		}
	} // while

	// flush still remaining records
	if ( cnt ) {
		ret = FlushBuffer(confirm);

		if ( ret < 0 ) {
			perror("Error sending data");
		}

	} // if cnt 

	if ( rfd ) 
		close(rfd);

	close(peer.sockfd);

	return;

} // End of send_data
Exemplo n.º 5
0
void readNFCap(int rfd,int file_index,uint32_t start,uint32_t end)
{
	data_block_header_t block_header;
	master_record_t master_record;
	master_record_t *p_mr = NULL;
	common_record_t *flow_record; // *in_buff;

	char *string;
	char toString[1024];
	int ret,done =0,i=0;


	memset(in_buff,0,BUFFSIZE);
	p_mr = (master_record_t *)flow_records[file_index].master;

	while (!done) {
		// get next data block from file
		ret = ReadBlock(rfd, &block_header, (void *) &in_buff, &string);

		switch (ret) {
			case NF_CORRUPT:
			case NF_ERROR:
				if (ret == NF_CORRUPT)
					fprintf(stderr, "Skip corrupt data file '%s': '%s'\n",
							GetCurrentFilename(), string);
				else
					fprintf(stderr, "Read error in file '%s': %s\n",
							GetCurrentFilename(), strerror(errno));
				// fall through - get next file in chain
			case NF_EOF:
				goto out ;
		}

		if (block_header.id != DATA_BLOCK_TYPE_2) {
			fprintf(stderr, "Can't process block type %u. Skip block.\n",
					block_header.id);
			continue;
		}

		flow_record = in_buff;
		for (i = 0; i < block_header.NumRecords; i++) {
			memset(toString,0,1024);

			if(unlikely(!p_mr)) {
				printf("**** p_mr is NULL!");
				goto out;
			}

			if (likely(flow_record->type == CommonRecordType)) {
				uint32_t map_id = flow_record->ext_map;
				if (unlikely(extension_map_list.slot[map_id] == NULL)) {
					snprintf(
							toString,
							1024,
							"Corrupt data file! No such extension map id: %u. Skip record",
							flow_record->ext_map);
					toString[1023] = '\0';
				} else {
					ExpandRecord_v2(flow_record,
							extension_map_list.slot[flow_record->ext_map],
							&master_record);
					// update number of flows matching a given map
					extension_map_list.slot[map_id]->ref_count++;

					if(!IS_OUT_TIME_INTERVAL(master_record.first,start,master_record.last,end)) {
						//aggiungo il record perche' cade nell'intervallo che mi interessa
						memcpy(p_mr,&master_record,sizeof(master_record_t));
						//incremento il contatore
						flow_records[file_index].total++;
						//sposto il puntatore
						p_mr++;
					}
				}
			} else if (flow_record->type == ExtensionMapType) {
				extension_map_t *map = (extension_map_t *) flow_record;
				Insert_Extension_Map(&extension_map_list, map);
			} else {
				fprintf(stderr, "Skip unknown record type %i\n",
						flow_record->type);
			}

			// Advance pointer by number of bytes for netflow record
			flow_record = (common_record_t *) ((pointer_addr_t) flow_record + flow_record->size);

		} // for all records

	} // while

out:
	return ;
}
Exemplo n.º 6
0
int StorePcapFlow(FlowSource_t *fs, struct FlowNode *Node) {
common_record_t		*common_record;
uint32_t			packets, bytes, pcap_output_record_size;
uint64_t	start_time, end_time;
int			j, id;
char		*string;
void		*data_ptr;

	if ( !pcap_extension_map ) {
		pcap_extension_map	= (extension_map_t *)malloc(pcap_extension_info.map->size);
		if ( !pcap_extension_map ) {
			LogError("Process_pcap: malloc() error in %s line %d: %s\n", __FILE__, __LINE__, strerror (errno));
			return 0;
		}
		memcpy((void *)pcap_extension_map, (void *)pcap_extension_info.map, pcap_extension_info.map->size);
		if ( !AddExtensionMap(fs, pcap_extension_map) ) {
			LogError("Process_pcap: Fatal: AddExtensionMap() failed in %s line %d\n", __FILE__, __LINE__);
			return 0;
		}

	}

	if ( Node->version == AF_INET6 ) {
		pcap_output_record_size = pcap_output_record_size_v6;
		dbg_printf("Store Flow v6 node: size: %u\n", pcap_output_record_size);
	} else if ( Node->version == AF_INET ) {
		pcap_output_record_size = pcap_output_record_size_v4;
		dbg_printf("Store Flow v4 node: size: %u\n", pcap_output_record_size);
	} else {
		LogError("Process_pcap: Unexpected version in %s line %d: %u\n", __FILE__, __LINE__, Node->version);
		return 0;
	}

	// output buffer size check for all expected records
	if ( !CheckBufferSpace(fs->nffile, pcap_output_record_size) ) {
		// fishy! - should never happen. maybe disk full?
		LogError("Process_pcap: output buffer size error. Abort pcap record processing");
		return 0;
	}

	// map output record to memory buffer
	common_record	= (common_record_t *)fs->nffile->buff_ptr;

	// header data
	common_record->flags		= 0;
  	common_record->type			= CommonRecordType;
	common_record->exporter_sysid = 0;
	common_record->ext_map		= pcap_extension_map->map_id;
	common_record->size			= pcap_output_record_size;

	// pcap common fields
	common_record->srcport		= Node->src_port;
	common_record->dstport		= Node->dst_port;
	common_record->tcp_flags	= Node->flags;
	common_record->prot			= Node->proto;
	common_record->tos			= 0;
	common_record->fwd_status 	= 0;

	if ( Node->version == AF_INET6 ) {
		SetFlag(common_record->flags, FLAG_IPV6_ADDR);
		pcap_v6_block_t *pcap_v6_block = (pcap_v6_block_t *)common_record->data;
		pcap_v6_block->srcaddr[0] = Node->src_addr.v6[0];
		pcap_v6_block->srcaddr[1] = Node->src_addr.v6[1];
		pcap_v6_block->dstaddr[0] = Node->dst_addr.v6[0];
		pcap_v6_block->dstaddr[1] = Node->dst_addr.v6[1];
		pcap_v6_block->dPkts	  = packets = Node->packets;
		pcap_v6_block->dOctets	  = bytes   = Node->bytes;

		data_ptr = (void *)pcap_v6_block->data;
	} else {
		pcap_v4_block_t *pcap_v4_block = (pcap_v4_block_t *)common_record->data;
		pcap_v4_block->srcaddr = Node->src_addr.v4;
		pcap_v4_block->dstaddr = Node->dst_addr.v4;
		pcap_v4_block->dPkts   = packets = Node->packets;
		pcap_v4_block->dOctets = bytes   = Node->bytes;

		data_ptr = (void *)pcap_v4_block->data;
	}

	// process optional extensions
	j = 0;
	while ( (id = pcap_extension_map->ex_id[j]) != 0 ) {
		switch (id) {
			case EX_IO_SNMP_2:	{	// 2 byte input/output interface index
				tpl_ext_4_t *tpl = (tpl_ext_4_t *)data_ptr;
 					tpl->input  = 0;
 					tpl->output = 0;
				data_ptr = (void *)tpl->data;
				} break;
			default:
				// this should never happen, as pcap has no other extensions
				LogError("Process_pcap: Unexpected extension %i for pcap record. Skip extension", id);
		}
		j++;
	}

	common_record->first 		= Node->t_first.tv_sec;
	common_record->msec_first	= Node->t_first.tv_usec / 1000;

	common_record->last 		= Node->t_last.tv_sec;
	common_record->msec_last	= Node->t_last.tv_usec / 1000;

	start_time = (1000LL * (uint64_t)common_record->first) + (uint64_t)common_record->msec_first;
	end_time   = (1000LL * (uint64_t)common_record->last) + (uint64_t)common_record->msec_last;

	// update first_seen, last_seen
	if ( start_time < fs->first_seen )
		fs->first_seen = start_time;
	if ( end_time > fs->last_seen )
		fs->last_seen = end_time;


	// Update stats
	switch (common_record->prot) {
		case IPPROTO_ICMP:
		case IPPROTO_ICMPV6:
			fs->nffile->stat_record->numflows_icmp++;
			fs->nffile->stat_record->numpackets_icmp += packets;
			fs->nffile->stat_record->numbytes_icmp   += bytes;
			// fix odd CISCO behaviour for ICMP port/type in src port
			if ( common_record->srcport != 0 ) {
				uint8_t *s1, *s2;
				s1 = (uint8_t *)&(common_record->srcport);
				s2 = (uint8_t *)&(common_record->dstport);
				s2[0] = s1[1];
				s2[1] = s1[0];
				common_record->srcport = 0;
			}
			break;
		case IPPROTO_TCP:
			fs->nffile->stat_record->numflows_tcp++;
			fs->nffile->stat_record->numpackets_tcp += packets;
			fs->nffile->stat_record->numbytes_tcp   += bytes;
			break;
		case IPPROTO_UDP:
			fs->nffile->stat_record->numflows_udp++;
			fs->nffile->stat_record->numpackets_udp += packets;
			fs->nffile->stat_record->numbytes_udp   += bytes;
			break;
		default:
			fs->nffile->stat_record->numflows_other++;
			fs->nffile->stat_record->numpackets_other += packets;
			fs->nffile->stat_record->numbytes_other   += bytes;
	}

	fs->nffile->stat_record->numflows++;
	fs->nffile->stat_record->numpackets	+= packets;
	fs->nffile->stat_record->numbytes	+= bytes;

	if ( fs->xstat ) {
		uint32_t bpp = packets ? bytes/packets : 0;
		if ( bpp > MAX_BPP ) 
			bpp = MAX_BPP;
		if ( common_record->prot == IPPROTO_TCP ) {
			fs->xstat->bpp_histogram->tcp.bpp[bpp]++;
			fs->xstat->bpp_histogram->tcp.count++;

			fs->xstat->port_histogram->src_tcp.port[common_record->srcport]++;
			fs->xstat->port_histogram->dst_tcp.port[common_record->dstport]++;
			fs->xstat->port_histogram->src_tcp.count++;
			fs->xstat->port_histogram->dst_tcp.count++;
		} else if ( common_record->prot == IPPROTO_UDP ) {
			fs->xstat->bpp_histogram->udp.bpp[bpp]++;
			fs->xstat->bpp_histogram->udp.count++;

			fs->xstat->port_histogram->src_udp.port[common_record->srcport]++;
			fs->xstat->port_histogram->dst_udp.port[common_record->dstport]++;
			fs->xstat->port_histogram->src_udp.count++;
			fs->xstat->port_histogram->dst_udp.count++;
		}
	}

	if ( verbose ) {
		master_record_t master_record;
		ExpandRecord_v2((common_record_t *)common_record, &pcap_extension_info, NULL, &master_record);
	 	format_file_block_record(&master_record, &string, 0);
		printf("%s\n", string);
	}

	// update file record size ( -> output buffer size )
	fs->nffile->block_header->NumRecords += 1;
	fs->nffile->block_header->size 		 += pcap_output_record_size;
	fs->nffile->buff_ptr 				 = data_ptr;

	return 1;

} /* End of StorePcapFlow */
Exemplo n.º 7
0
void Process_v1(void *in_buff, ssize_t in_buff_cnt, FlowSource_t *fs) {
netflow_v1_header_t	*v1_header;
netflow_v1_record_t *v1_record;
exporter_v1_t 		*exporter;
extension_map_t		*extension_map;
common_record_t		*common_record;
uint64_t	start_time, end_time, boot_time;
uint32_t   	First, Last;
uint16_t	count;
uint8_t		flags;
int			i, done, flow_record_length;
ssize_t		size_left;
char		*string;

		// map v1 data structure to input buffer
		v1_header 	= (netflow_v1_header_t *)in_buff;

		exporter = GetExporter(fs, v1_header);
		if ( !exporter ) {
			syslog(LOG_ERR,"Process_v1: Exporter NULL: Abort v1 record processing");
			return;
		}
		flags = 0;

		exporter->packets++;

		extension_map = exporter->extension_map;
		flow_record_length = NETFLOW_V1_RECORD_LENGTH;

		// this many data to process
		size_left	= in_buff_cnt;

		common_record = fs->nffile->buff_ptr;
		done = 0;
		while ( !done ) {
			v1_block_t			*v1_block;

			/* Process header */
	
			// count check
	  		count	= ntohs(v1_header->count);
			if ( count > NETFLOW_V1_MAX_RECORDS ) {
				syslog(LOG_ERR,"Process_v1: Unexpected record count in header: %i. Abort v1 record processing", count);
				fs->nffile->buff_ptr = (void *)common_record;
				return;
			}

			// input buffer size check for all expected records
			if ( size_left < ( NETFLOW_V1_HEADER_LENGTH + count * flow_record_length) ) {
				syslog(LOG_ERR,"Process_v1: Not enough data to process v1 record. Abort v1 record processing");
				fs->nffile->buff_ptr = (void *)common_record;
				return;
			}
	
			// output buffer size check for all expected records
			if ( !CheckBufferSpace(fs->nffile, count * v1_output_record_size) ) {
				// fishy! - should never happen. maybe disk full?
				syslog(LOG_ERR,"Process_v1: output buffer size error. Abort v1 record processing");
				return;
			}

			// map output record to memory buffer
			common_record	= (common_record_t *)fs->nffile->buff_ptr;
			v1_block		= (v1_block_t *)common_record->data;

	  		v1_header->SysUptime	 = ntohl(v1_header->SysUptime);
	  		v1_header->unix_secs	 = ntohl(v1_header->unix_secs);
	  		v1_header->unix_nsecs	 = ntohl(v1_header->unix_nsecs);
	
			/* calculate boot time in msec */
			boot_time  = ((uint64_t)(v1_header->unix_secs)*1000 + 
					((uint64_t)(v1_header->unix_nsecs) / 1000000) ) - (uint64_t)(v1_header->SysUptime);
	
			// process all records
			v1_record	= (netflow_v1_record_t *)((pointer_addr_t)v1_header + NETFLOW_V1_HEADER_LENGTH);

			/* loop over each records associated with this header */
			for (i = 0; i < count; i++) {
				pointer_addr_t	bsize;
				void	*data_ptr;
				uint8_t *s1, *s2;
				int j, id;
				// header data
	  			common_record->flags		= flags;
	  			common_record->type			= CommonRecordType;
	  			common_record->exporter_sysid = exporter->info.sysid;
	  			common_record->ext_map		= extension_map->map_id;
	  			common_record->size			= v1_output_record_size;

				// v1 common fields
	  			common_record->srcport		= ntohs(v1_record->srcport);
	  			common_record->dstport		= ntohs(v1_record->dstport);
	  			common_record->tcp_flags	= v1_record->tcp_flags;
	  			common_record->prot			= v1_record->prot;
	  			common_record->tos			= v1_record->tos;
	  			common_record->fwd_status 	= 0;
	  			common_record->reserved 	= 0;

				// v1 typed data as fixed struct v1_block
	  			v1_block->srcaddr	= ntohl(v1_record->srcaddr);
	  			v1_block->dstaddr	= ntohl(v1_record->dstaddr);
	  			v1_block->dPkts  	= ntohl(v1_record->dPkts);
	  			v1_block->dOctets	= ntohl(v1_record->dOctets);

				// process optional extensions
				data_ptr = (void *)v1_block->data;
				j = 0;
				while ( (id = extension_map->ex_id[j]) != 0 ) {
					switch (id) {
						case EX_IO_SNMP_2:	{	// 2 byte input/output interface index
							tpl_ext_4_t *tpl = (tpl_ext_4_t *)data_ptr;
	  						tpl->input  = ntohs(v1_record->input);
	  						tpl->output = ntohs(v1_record->output);
							data_ptr = (void *)tpl->data;
							} break;
						case EX_NEXT_HOP_v4:	 {	// IPv4 next hop
							tpl_ext_9_t *tpl = (tpl_ext_9_t *)data_ptr;
							tpl->nexthop = ntohl(v1_record->nexthop);
							data_ptr = (void *)tpl->data;
							} break;
						case EX_ROUTER_IP_v4:	 {	// IPv4 router address
							tpl_ext_23_t *tpl = (tpl_ext_23_t *)data_ptr;
							tpl->router_ip = fs->ip.v4;
							data_ptr = (void *)tpl->data;
							ClearFlag(common_record->flags, FLAG_IPV6_EXP);
							} break;
						case EX_RECEIVED: {
							tpl_ext_27_t *tpl = (tpl_ext_27_t *)data_ptr;
							tpl->received  = (uint64_t)((uint64_t)fs->received.tv_sec * 1000LL) + (uint64_t)((uint64_t)fs->received.tv_usec / 1000LL);
							data_ptr = (void *)tpl->data;
							} break;

						default:
							// this should never happen, as v1 has no other extensions
							syslog(LOG_ERR,"Process_v1: Unexpected extension %i for v1 record. Skip extension", id);
					}
					j++;
				}
	
				// Time issues
	  			First	 				= ntohl(v1_record->First);
	  			Last		 			= ntohl(v1_record->Last);

				if ( First > Last ) {
					/* First in msec, in case of msec overflow, between start and end */
					start_time = boot_time - 0x100000000LL + (uint64_t)First;
				} else {
					start_time = boot_time + (uint64_t)First;
				}

				/* end time in msecs */
				end_time = (uint64_t)Last + boot_time;

				// if overflow happened after flow ended but before got exported
				if ( Last > v1_header->SysUptime ) {
					start_time  -= 0x100000000LL;
					end_time    -= 0x100000000LL;
				}

				common_record->first 		= start_time/1000;
				common_record->msec_first	= start_time - common_record->first*1000;
	
				common_record->last 		= end_time/1000;
				common_record->msec_last	= end_time - common_record->last*1000;
	
				// update first_seen, last_seen
				if ( start_time < fs->first_seen )
					fs->first_seen = start_time;
				if ( end_time > fs->last_seen )
					fs->last_seen = end_time;
	
	
				// Update stats
				switch (common_record->prot) {
					case IPPROTO_ICMP:
						fs->nffile->stat_record->numflows_icmp++;
						fs->nffile->stat_record->numpackets_icmp += v1_block->dPkts;
						fs->nffile->stat_record->numbytes_icmp   += v1_block->dOctets;
						// fix odd CISCO behaviour for ICMP port/type in src port
						if ( common_record->srcport != 0 ) {
							s1 = (uint8_t *)&(common_record->srcport);
							s2 = (uint8_t *)&(common_record->dstport);
							s2[0] = s1[1];
							s2[1] = s1[0];
							common_record->srcport = 0;
						}
						break;
					case IPPROTO_TCP:
						fs->nffile->stat_record->numflows_tcp++;
						fs->nffile->stat_record->numpackets_tcp += v1_block->dPkts;
						fs->nffile->stat_record->numbytes_tcp   += v1_block->dOctets;
						break;
					case IPPROTO_UDP:
						fs->nffile->stat_record->numflows_udp++;
						fs->nffile->stat_record->numpackets_udp += v1_block->dPkts;
						fs->nffile->stat_record->numbytes_udp   += v1_block->dOctets;
						break;
					default:
						fs->nffile->stat_record->numflows_other++;
						fs->nffile->stat_record->numpackets_other += v1_block->dPkts;
						fs->nffile->stat_record->numbytes_other   += v1_block->dOctets;
				}
				exporter->flows++;
				fs->nffile->stat_record->numflows++;
				fs->nffile->stat_record->numpackets	+= v1_block->dPkts;
				fs->nffile->stat_record->numbytes	+= v1_block->dOctets;

				if ( fs->xstat ) {
					uint32_t bpp = v1_block->dPkts ? v1_block->dOctets/v1_block->dPkts : 0;
					if ( bpp > MAX_BPP ) 
						bpp = MAX_BPP;
					if ( common_record->prot == IPPROTO_TCP ) {
						fs->xstat->bpp_histogram->tcp.bpp[bpp]++;
						fs->xstat->bpp_histogram->tcp.count++;

						fs->xstat->port_histogram->src_tcp.port[common_record->srcport]++;
						fs->xstat->port_histogram->dst_tcp.port[common_record->dstport]++;
						fs->xstat->port_histogram->src_tcp.count++;
						fs->xstat->port_histogram->dst_tcp.count++;
					} else if ( common_record->prot == IPPROTO_UDP ) {
						fs->xstat->bpp_histogram->udp.bpp[bpp]++;
						fs->xstat->bpp_histogram->udp.count++;

						fs->xstat->port_histogram->src_udp.port[common_record->srcport]++;
						fs->xstat->port_histogram->dst_udp.port[common_record->dstport]++;
						fs->xstat->port_histogram->src_udp.count++;
						fs->xstat->port_histogram->dst_udp.count++;
					}
				}


				if ( verbose ) {
					master_record_t master_record;
					ExpandRecord_v2((common_record_t *)common_record, &v1_extension_info, &(exporter->info), &master_record);
				 	format_file_block_record(&master_record, &string, 0);
					printf("%s\n", string);
				}

				// advance to next input flow record
				v1_record		= (netflow_v1_record_t *)((pointer_addr_t)v1_record + flow_record_length);

				if ( ((pointer_addr_t)data_ptr - (pointer_addr_t)common_record) != v1_output_record_size ) {
					printf("Panic size check: ptr diff: %llu, record size: %u\n", (unsigned long long)((pointer_addr_t)data_ptr - (pointer_addr_t)common_record), v1_output_record_size ); 
					abort();
				}
				// advance to next output record
				common_record	= (common_record_t *)data_ptr;
				v1_block		= (v1_block_t *)common_record->data;
				
				// buffer size sanity check - should never happen, but check it anyway
				bsize = (pointer_addr_t)common_record - (pointer_addr_t)fs->nffile->block_header - sizeof(data_block_header_t);
				if ( bsize > BUFFSIZE ) {
					syslog(LOG_ERR,"### Software error ###: %s line %d", __FILE__, __LINE__);
					syslog(LOG_ERR,"Process_v1: Output buffer overflow! Flush buffer and skip records.");
					syslog(LOG_ERR,"Buffer size: size: %u, bsize: %llu > %u", fs->nffile->block_header->size, (unsigned long long)bsize, BUFFSIZE);
					// reset buffer
					fs->nffile->block_header->size 		= 0;
					fs->nffile->block_header->NumRecords = 0;
					fs->nffile->buff_ptr = (void *)((pointer_addr_t)fs->nffile->block_header + sizeof(data_block_header_t) );
					return;
				}

			} // End of foreach v1 record

		// update file record size ( -> output buffer size )
		fs->nffile->block_header->NumRecords += count;
		fs->nffile->block_header->size 		 += count * v1_output_record_size;
		fs->nffile->buff_ptr 				  = (void *)common_record;

		// still to go for this many input bytes
		size_left 	-= NETFLOW_V1_HEADER_LENGTH + count * flow_record_length;

		// next header
		v1_header	= (netflow_v1_header_t *)v1_record;

		// should never be < 0
		done = size_left <= 0;

	} // End of while !done

	return;

} /* End of Process_v1 */
Exemplo n.º 8
0
int ExportFlowTable(nffile_t *nffile, int aggregate, int bidir, int date_sorted, extension_map_list_t *extension_map_list) {
hash_FlowTable *FlowTable;
FlowTableRecord_t	*r;
SortElement_t 		*SortList;
master_record_t		*aggr_record_mask;
uint32_t 			i;
uint32_t			maxindex, c;
#ifdef DEVEL
char				*string;
#endif

	ExportExtensionMaps(aggregate, bidir, nffile, extension_map_list);
	ExportExporterList(nffile);

	aggr_record_mask = GetMasterAggregateMask();

	FlowTable = GetFlowTable();
	c = 0;
	maxindex = FlowTable->NumRecords;
	if ( date_sorted ) {
		// Sort records according the date
		SortList = (SortElement_t *)calloc(maxindex, sizeof(SortElement_t));

		if ( !SortList ) {
			LogError("malloc() error in %s line %d: %s\n", __FILE__, __LINE__, strerror (errno));
			return 0;
		}

		// preset SortList table - still unsorted
		for ( i=0; i<=FlowTable->IndexMask; i++ ) {
			r = FlowTable->bucket[i];
			if ( !r ) 
				continue;

			// foreach elem in this bucket
			while ( r ) {
				SortList[c].count  = 1000LL * r->flowrecord.first + r->flowrecord.msec_first;	// sort according the date
				SortList[c].record = (void *)r;
				c++;
				r = r->next;
			}
		}

		if ( c != maxindex ) {
			LogError("Abort: Mismatch %s line %d: %s\n", __FILE__, __LINE__, strerror (errno));
			return 0;
		}

		if ( c >= 2 )
 			heapSort(SortList, c, 0);

		for ( i = 0; i < c; i++ ) {
			master_record_t	*flow_record;
			common_record_t *raw_record;
			extension_info_t *extension_info;

			r = (FlowTableRecord_t *)(SortList[i].record);
			raw_record = &(r->flowrecord);
			extension_info = r->map_info_ref;

			flow_record = &(extension_info->master_record);
			ExpandRecord_v2( raw_record, extension_info, r->exp_ref, flow_record);
			flow_record->dPkts 		= r->counter[INPACKETS];
			flow_record->dOctets 	= r->counter[INBYTES];
			flow_record->out_pkts 	= r->counter[OUTPACKETS];
			flow_record->out_bytes 	= r->counter[OUTBYTES];
			flow_record->aggr_flows 	= r->counter[FLOWS];

			// apply IP mask from aggregation, to provide a pretty output
			if ( FlowTable->has_masks ) {
				flow_record->V6.srcaddr[0] &= FlowTable->IPmask[0];
				flow_record->V6.srcaddr[1] &= FlowTable->IPmask[1];
				flow_record->V6.dstaddr[0] &= FlowTable->IPmask[2];
				flow_record->V6.dstaddr[1] &= FlowTable->IPmask[3];
			}

			if ( FlowTable->apply_netbits )
				ApplyNetMaskBits(flow_record, FlowTable->apply_netbits);

			if ( aggr_record_mask ) {
				ApplyAggrMask(flow_record, aggr_record_mask);
			}

			// switch to output extension map
			flow_record->map_ref = extension_info->map;
			flow_record->ext_map = extension_info->map->map_id;
			PackRecord(flow_record, nffile);
#ifdef DEVEL
			format_file_block_record((void *)flow_record, &string, 0);
			printf("%s\n", string);
#endif
			// Update statistics
			UpdateStat(nffile->stat_record, flow_record);
		}

	} else {
		// print them as they came
		for ( i=0; i<=FlowTable->IndexMask; i++ ) {
			r = FlowTable->bucket[i];
			while ( r ) {
				master_record_t	*flow_record;
				common_record_t *raw_record;
				extension_info_t *extension_info;

				raw_record = &(r->flowrecord);
				extension_info = r->map_info_ref;

				flow_record = &(extension_info->master_record);
				ExpandRecord_v2( raw_record, extension_info, r->exp_ref, flow_record);
				flow_record->dPkts 		= r->counter[INPACKETS];
				flow_record->dOctets 	= r->counter[INBYTES];
				flow_record->out_pkts 	= r->counter[OUTPACKETS];
				flow_record->out_bytes 	= r->counter[OUTBYTES];
				flow_record->aggr_flows	= r->counter[FLOWS];

				// apply IP mask from aggregation, to provide a pretty output
				if ( FlowTable->has_masks ) {
					flow_record->V6.srcaddr[0] &= FlowTable->IPmask[0];
					flow_record->V6.srcaddr[1] &= FlowTable->IPmask[1];
					flow_record->V6.dstaddr[0] &= FlowTable->IPmask[2];
					flow_record->V6.dstaddr[1] &= FlowTable->IPmask[3];
				}

				if ( FlowTable->apply_netbits )
					ApplyNetMaskBits(flow_record, FlowTable->apply_netbits);

				if ( aggr_record_mask ) {
					ApplyAggrMask(flow_record, aggr_record_mask);
				}

				// switch to output extension map
				flow_record->map_ref = extension_info->map;
				flow_record->ext_map = extension_info->map->map_id;
				PackRecord(flow_record, nffile);
#ifdef DEVEL
				format_file_block_record((void *)flow_record, &string, 0);
				printf("%s\n", string);
#endif
				// Update statistics
				UpdateStat(nffile->stat_record, flow_record);

				r = r->next;
			}
		}

	}

    if ( nffile->block_header->NumRecords ) {
        if ( WriteBlock(nffile) <= 0 ) {
            LogError("Failed to write output buffer to disk: '%s'" , strerror(errno));
			return 0;
        } 
    }

	return 1;

} // End of ExportFlowTable
Exemplo n.º 9
0
static data_row *process(char *filter) {
data_block_header_t block_header;					
master_record_t		master_record;
common_record_t		*flow_record, *in_buff;
int i, rfd, done, ret;
uint32_t	buffer_size;
data_row * 	port_table;
char *string;
uint64_t total_bytes;

	rfd = GetNextFile(0, 0, 0, NULL);
	if ( rfd < 0 ) {
		if ( errno ) 
			perror("Can't open file for reading");
		return NULL;
	}

	// prepare read and send buffer
	buffer_size = BUFFSIZE;
	in_buff = (common_record_t *) malloc(buffer_size);
	if ( !in_buff ) {
		perror("Memory allocation error");
		close(rfd);
		return NULL;
	}

	port_table    = (data_row *)calloc(65536, sizeof(data_row));
    if ( !port_table) {
        perror("Memory allocation error");
        close(rfd);
        return NULL;
    }

    memset((void *)port_table, 0, 65536 * sizeof(data_row));

	// setup Filter Engine to point to master_record, as any record read from file
	// is expanded into this record
	Engine->nfrecord = (uint64_t *)&master_record;

	done	 	= 0;
	while ( !done ) {

		// get next data block from file
		ret = ReadBlock(rfd, &block_header, (void *)in_buff, &string);

        switch (ret) {
            case NF_CORRUPT:
            case NF_ERROR:
                if ( ret == NF_CORRUPT ) 
                    fprintf(stderr, "Skip corrupt data file '%s': '%s'\n",GetCurrentFilename(), string);
                else 
                    fprintf(stderr, "Read error in file '%s': %s\n",GetCurrentFilename(), strerror(errno) );
                // fall through - get next file in chain
            case NF_EOF:
                rfd = GetNextFile(rfd, 0, 0, NULL);
                if ( rfd < 0 ) {
                    if ( rfd == NF_ERROR )
                        fprintf(stderr, "Read error in file '%s': %s\n",GetCurrentFilename(), strerror(errno) );

                    // rfd == EMPTY_LIST
                    done = 1;
                } // else continue with next file
                continue;
    
                break; // not really needed
            default:
                // successfully read block
                total_bytes += ret;
        }

		if ( block_header.id != DATA_BLOCK_TYPE_2 ) {
			fprintf(stderr, "Can't process block type %u\n", block_header.id);
			continue;
		}

		flow_record = in_buff;

		for ( i=0; i < block_header.NumRecords; i++ ) {
			char        string[1024];
			int			ret;

            if ( flow_record->type == CommonRecordType ) {
                if ( extension_map_list.slot[flow_record->ext_map] == NULL ) {
                    snprintf(string, 1024, "Corrupt data file! No such extension map id: %u. Skip record", flow_record->ext_map );
                    string[1023] = '\0';
                } else {
                    ExpandRecord_v2( flow_record, extension_map_list.slot[flow_record->ext_map], &master_record);
            
   					ret = (*Engine->FilterEngine)(Engine);

					if ( ret == 0 ) { // record failed to pass the filter
						// increment pointer by number of bytes for netflow record
						flow_record = (common_record_t *)((pointer_addr_t)flow_record + flow_record->size);	
						// go to next record
						continue;
					}


					// Add to stat record
					if ( master_record.prot == 6 ) {
						port_table[master_record.dstport].proto[tcp].type[flows]++;
						port_table[master_record.dstport].proto[tcp].type[packets]	+= master_record.dPkts;
						port_table[master_record.dstport].proto[tcp].type[bytes]	+= master_record.dOctets;
					} else if ( master_record.prot == 17 ) {
						port_table[master_record.dstport].proto[udp].type[flows]++;
						port_table[master_record.dstport].proto[udp].type[packets]	+= master_record.dPkts;
						port_table[master_record.dstport].proto[udp].type[bytes]	+= master_record.dOctets;
					}
             	}

            } else if ( flow_record->type == ExtensionMapType ) {
                extension_map_t *map = (extension_map_t *)flow_record;

                if ( Insert_Extension_Map(&extension_map_list, map) ) {
                     // flush new map
                } // else map already known and flushed

            } else {
                fprintf(stderr, "Skip unknown record type %i\n", flow_record->type);
            }

			// Advance pointer by number of bytes for netflow record
			flow_record = (common_record_t *)((pointer_addr_t)flow_record + flow_record->size);	
		}
	} // while

	return port_table;

} // End of process
Exemplo n.º 10
0
static void process_data(void) {
master_record_t	master_record;
common_record_t *flow_record;
nffile_t		*nffile;
int 		i, done, ret;
#ifdef COMPAT15
int	v1_map_done = 0;
#endif

	// Get the first file handle
	nffile = GetNextFile(NULL, 0, 0);
	if ( !nffile ) {
		LogError("GetNextFile() error in %s line %d: %s\n", __FILE__, __LINE__, strerror(errno) );
		return;
	}
	if ( nffile == EMPTY_LIST ) {
		LogError("Empty file list. No files to process\n");
		return;
	}

	done = 0;
	while ( !done ) {
		// get next data block from file
		ret = ReadBlock(nffile);

		switch (ret) {
			case NF_CORRUPT:
			case NF_ERROR:
				if ( ret == NF_CORRUPT ) 
					fprintf(stderr, "Skip corrupt data file '%s'\n",GetCurrentFilename());
				else 
					fprintf(stderr, "Read error in file '%s': %s\n",GetCurrentFilename(), strerror(errno) );
				// fall through - get next file in chain
			case NF_EOF: {
				nffile_t *next = GetNextFile(nffile, 0, 0);
				if ( next == EMPTY_LIST ) {
					done = 1;
				}
				if ( next == NULL ) {
					done = 1;
					LogError("Unexpected end of file list\n");
				}
				// else continue with next file
				continue;

				} break; // not really needed
		}

#ifdef COMPAT15
		if ( nffile->block_header->id == DATA_BLOCK_TYPE_1 ) {
			common_record_v1_t *v1_record = (common_record_v1_t *)nffile->buff_ptr;
			// create an extension map for v1 blocks
			if ( v1_map_done == 0 ) {
				extension_map_t *map = malloc(sizeof(extension_map_t) + 2 * sizeof(uint16_t) );
				if ( ! map ) {
					LogError("malloc() allocation error in %s line %d: %s\n", __FILE__, __LINE__, strerror(errno) );
					exit(255);
				}
				map->type 	= ExtensionMapType;
				map->size 	= sizeof(extension_map_t) + 2 * sizeof(uint16_t);
				if (( map->size & 0x3 ) != 0 ) {
					map->size += 4 - ( map->size & 0x3 );
				}

				map->map_id = INIT_ID;

				map->ex_id[0]  = EX_IO_SNMP_2;
				map->ex_id[1]  = EX_AS_2;
				map->ex_id[2]  = 0;

				map->extension_size  = 0;

				Insert_Extension_Map(&extension_map_list, map);

				v1_map_done = 1;
			}

			// convert the records to v2
			for ( i=0; i < nffile->block_header->NumRecords; i++ ) {
				common_record_t *v2_record = (common_record_t *)v1_record;
				Convert_v1_to_v2((void *)v1_record);
				// now we have a v2 record -> use size of v2_record->size
				v1_record = (common_record_v1_t *)((pointer_addr_t)v1_record + v2_record->size);
			}
			nffile->block_header->id = DATA_BLOCK_TYPE_2;
		}
#endif

		if ( nffile->block_header->id == Large_BLOCK_Type ) {
			// skip
			continue;
		}

		if ( nffile->block_header->id != DATA_BLOCK_TYPE_2 ) {
			fprintf(stderr, "Can't process block type %u. Skip block.\n", nffile->block_header->id);
			continue;
		}

		flow_record = nffile->buff_ptr;
		for ( i=0; i < nffile->block_header->NumRecords; i++ ) {
			char        string[1024];

			switch ( flow_record->type ) {
				case CommonRecordType: {
					uint32_t map_id = flow_record->ext_map;
					generic_exporter_t *exp_info = exporter_list[flow_record->exporter_sysid];
					if ( extension_map_list.slot[map_id] == NULL ) {
						snprintf(string, 1024, "Corrupt data file! No such extension map id: %u. Skip record", flow_record->ext_map );
						string[1023] = '\0';
					} else {
						ExpandRecord_v2( flow_record, extension_map_list.slot[flow_record->ext_map], 
							exp_info ? &(exp_info->info) : NULL, &master_record);

						// update number of flows matching a given map
						extension_map_list.slot[map_id]->ref_count++;
			
						/* 
			 			* insert hier your calls to your processing routine 
			 			* master_record now contains the next flow record as specified in nffile.c
			 			* for example you can print each record:
			 			*
			 			*/
						print_record(&master_record, string);
					}
					printf("%s\n", string);
	
					} break;
				case ExtensionMapType: {
					extension_map_t *map = (extension_map_t *)flow_record;

					if ( Insert_Extension_Map(&extension_map_list, map) ) {
					 	// flush new map
					} // else map already known and flushed

					} break;
				case ExporterRecordType:
				case SamplerRecordype:
						// Silently skip exporter records
					break;
				default: {
					fprintf(stderr, "Skip unknown record type %i\n", flow_record->type);
				}
			}

			// Advance pointer by number of bytes for netflow record
			flow_record = (common_record_t *)((pointer_addr_t)flow_record + flow_record->size);	

		} // for all records

	} // while

	CloseFile(nffile);
	DisposeFile(nffile);

	PackExtensionMapList(&extension_map_list);

} // End of process_data
Exemplo n.º 11
0
static void process_data(profile_channel_info_t *channels, unsigned int num_channels, time_t tslot, int do_xstat) {
common_record_t	*flow_record;
nffile_t		*nffile;
FilterEngine_data_t	*engine;
int 		i, j, done, ret ;
#ifdef COMPAT15
int	v1_map_done = 0;
#endif

	nffile = GetNextFile(NULL, 0, 0);
	if ( !nffile ) {
		LogError("GetNextFile() error in %s line %d: %s\n", __FILE__, __LINE__, strerror(errno) );
		return;
	}
	if ( nffile == EMPTY_LIST ) {
		LogError("Empty file list. No files to process\n");
		return;
	}

	// store infos away for later use
	// although multiple files may be processed, it is assumed that all 
	// have the same settings
	is_anonymized = IP_ANONYMIZED(nffile);
	strncpy(Ident, nffile->file_header->ident, IDENTLEN);
	Ident[IDENTLEN-1] = '\0';

	done = 0;
	while ( !done ) {

		// get next data block from file
		ret = ReadBlock(nffile);

		switch (ret) {
			case NF_CORRUPT:
			case NF_ERROR:
				if ( ret == NF_CORRUPT ) 
					LogError("Skip corrupt data file '%s'\n",GetCurrentFilename());
				else 
					LogError("Read error in file '%s': %s\n",GetCurrentFilename(), strerror(errno) );
				// fall through - get next file in chain
			case NF_EOF: {
				nffile_t *next = GetNextFile(nffile, 0, 0);
				if ( next == EMPTY_LIST ) {
					done = 1;
				}
				if ( next == NULL ) {
					done = 1;
					LogError("Unexpected end of file list\n");
				}
				continue;
	
				} break; // not really needed
		}

#ifdef COMPAT15
		if ( nffile->block_header->id == DATA_BLOCK_TYPE_1 ) {
			common_record_v1_t *v1_record = (common_record_v1_t *)nffile->buff_ptr;
			// create an extension map for v1 blocks
			if ( v1_map_done == 0 ) {
				extension_map_t *map = malloc(sizeof(extension_map_t) + 2 * sizeof(uint16_t) );
				if ( ! map ) {
					LogError("malloc() allocation error in %s line %d: %s\n", __FILE__, __LINE__, strerror(errno) );
					exit(255);
				}
				map->type 	= ExtensionMapType;
				map->size 	= sizeof(extension_map_t) + 2 * sizeof(uint16_t);
				if (( map->size & 0x3 ) != 0 ) {
					map->size += 4 - ( map->size & 0x3 );
				}
				
				map->map_id = INIT_ID;
				map->ex_id[0]  = EX_IO_SNMP_2;
				map->ex_id[1]  = EX_AS_2;
				map->ex_id[2]  = 0;

				map->extension_size  = 0;
				map->extension_size += extension_descriptor[EX_IO_SNMP_2].size;
				map->extension_size += extension_descriptor[EX_AS_2].size;
				
				if ( Insert_Extension_Map(extension_map_list, map) ) {
					int j;
					for ( j=0; j < num_channels; j++ ) {
						if ( channels[j].nffile != NULL) {
							// flush new map
							AppendToBuffer(channels[j].nffile, (void *)map, map->size);
						}
					}
				} // else map already known and flushed
			
				v1_map_done = 1;
			}

			// convert the records to v2
			for ( i=0; i < nffile->block_header->NumRecords; i++ ) {
				common_record_t *v2_record = (common_record_t *)v1_record;
				Convert_v1_to_v2((void *)v1_record);
				// now we have a v2 record -> use size of v2_record->size
				v1_record = (common_record_v1_t *)((pointer_addr_t)v1_record + v2_record->size);
			}
			nffile->block_header->id = DATA_BLOCK_TYPE_2;
		}
#endif

		if ( nffile->block_header->id == Large_BLOCK_Type ) {
			// skip
			continue;
		}

		if ( nffile->block_header->id != DATA_BLOCK_TYPE_2 ) {
			LogError("Can't process block type %u. Skip block.\n", nffile->block_header->id);
			continue;
		}

		flow_record = nffile->buff_ptr;
		for ( i=0; i < nffile->block_header->NumRecords; i++ ) {
			switch ( flow_record->type ) { 
					case CommonRecordType: {
					generic_exporter_t *exp_info = exporter_list[flow_record->exporter_sysid];
					uint32_t map_id = flow_record->ext_map;
					master_record_t	*master_record;

					if ( extension_map_list->slot[map_id] == NULL ) {
						LogError("Corrupt data file. Missing extension map %u. Skip record.\n", flow_record->ext_map);
						flow_record = (common_record_t *)((pointer_addr_t)flow_record + flow_record->size);	
						continue;
					} 
	
					master_record = &(extension_map_list->slot[map_id]->master_record);
					ExpandRecord_v2( flow_record, extension_map_list->slot[flow_record->ext_map], 
						exp_info ? &(exp_info->info) : NULL, master_record);

					for ( j=0; j < num_channels; j++ ) {
						int match;
	
						// apply profile filter
						(channels[j].engine)->nfrecord 	= (uint64_t *)master_record;
						engine = channels[j].engine;
						match = (*engine->FilterEngine)(engine);
	
						// if profile filter failed -> next profile
						if ( !match )
							continue;
	
						// filter was successful -> continue record processing
	
						// update statistics
						UpdateStat(&channels[j].stat_record, master_record);
						if ( channels[j].nffile ) 
							UpdateStat(channels[j].nffile->stat_record, master_record);
	
						if ( channels[j].xstat ) 
							UpdateXStat(channels[j].xstat, master_record);
	
						// do we need to write data to new file - shadow profiles do not have files.
						// check if we need to flush the output buffer
						if ( channels[j].nffile != NULL ) {
							// write record to output buffer
							AppendToBuffer(channels[j].nffile, (void *)flow_record, flow_record->size);
						} 
	
					} // End of for all channels
	
					} break;
				case ExtensionMapType: {
					extension_map_t *map = (extension_map_t *)flow_record;
	
					if ( Insert_Extension_Map(extension_map_list, map) ) {
						int j;
						for ( j=0; j < num_channels; j++ ) {
							if ( channels[j].nffile != NULL ) {
								// flush new map
								AppendToBuffer(channels[j].nffile, (void *)map, map->size);
							}
						}
					} // else map already known and flushed
	
					} break; 
				case ExporterInfoRecordType: {
					int ret = AddExporterInfo((exporter_info_record_t *)flow_record);
					if ( ret != 0 ) {
						int j;
						for ( j=0; j < num_channels; j++ ) {
							if ( channels[j].nffile != NULL && ret == 1) {
								// flush new exporter
								AppendToBuffer(channels[j].nffile, (void *)flow_record, flow_record->size);
							}
						}
					} else {
						LogError("Failed to add Exporter Record\n");
					}
					} break;
				case SamplerInfoRecordype: {
					int ret = AddSamplerInfo((sampler_info_record_t *)flow_record);
					if ( ret != 0 ) {
						int j;
						for ( j=0; j < num_channels; j++ ) {
							if ( channels[j].nffile != NULL && ret == 1 ) {
								// flush new map
								AppendToBuffer(channels[j].nffile, (void *)flow_record, flow_record->size);
							}
						}
					} else {
						LogError("Failed to add Sampler Record\n");
					}
					} break;
				case ExporterRecordType:
				case SamplerRecordype:
				case ExporterStatRecordType:
						// Silently skip exporter records
					break;
				default:  {
					LogError("Skip unknown record type %i\n", flow_record->type);
				}
			}
			// Advance pointer by number of bytes for netflow record
			flow_record = (common_record_t *)((pointer_addr_t)flow_record + flow_record->size);

		} // End of for all umRecords
	} // End of while !done

	// do we need to write data to new file - shadow profiles do not have files.
	for ( j=0; j < num_channels; j++ ) {
		if ( channels[j].nffile != NULL ) {
			// flush output buffer
			if ( channels[j].nffile->block_header->NumRecords ) {
				if ( WriteBlock(channels[j].nffile) <= 0 ) {
					LogError("Failed to write output buffer to disk: '%s'" , strerror(errno));
				} 
			} 
		}
	}
	CloseFile(nffile);
	DisposeFile(nffile);

} // End of process_data