Exemple #1
0
/* close file handler and release related structures */
void lnf_close(lnf_file_t *lnf_file) {

	if (lnf_file == NULL || lnf_file->nffile == NULL) {
		return ;
	}

	if (lnf_file->flags & LNF_WRITE) {

		// write the last records in buffer
		if (lnf_file->nffile->block_header->NumRecords ) {
			if ( WriteBlock(lnf_file->nffile) <= 0 ) {
				fprintf(stderr, "Failed to write output buffer: '%s'" , strerror(errno));
			}
		}
		CloseUpdateFile(lnf_file->nffile, NULL );

	} else {
		CloseFile(lnf_file->nffile);
	}

	DisposeFile(lnf_file->nffile); 

	PackExtensionMapList(lnf_file->extension_map_list);
	FreeExtensionMaps(lnf_file->extension_map_list);

	free(lnf_file);
}
Exemple #2
0
 //
 // ~ShaderFile
 //
 ShaderFile::~ShaderFile()
 {
     if( id.unique() && id.get() != nullptr )
     {
         DisposeFile();
     }
 }
Exemple #3
0
/* close file handler and release related structures */
void lnf_close(lnf_file_t *lnf_file) {

	lnf_map_list_t *map_list, *tmp_map_list;

	if (lnf_file == NULL || lnf_file->nffile == NULL) {
		return ;
	}

	if (lnf_file->flags & LNF_WRITE) {

		// write the last records in buffer
		if (lnf_file->nffile->block_header->NumRecords ) {
			if ( WriteBlock(lnf_file->nffile) <= 0 ) {
				fprintf(stderr, "Failed to write output buffer: '%s'" , strerror(errno));
			}
		}
		CloseUpdateFile(lnf_file->nffile, NULL );

	} else {
		CloseFile(lnf_file->nffile);
	}

	DisposeFile(lnf_file->nffile); 

	PackExtensionMapList(lnf_file->extension_map_list);
	FreeExtensionMaps(lnf_file->extension_map_list);

	map_list = lnf_file->lnf_map_list; 
	while (map_list != NULL) {
		tmp_map_list = map_list;
		map_list = map_list->next;
		bit_array_release(&tmp_map_list->bit_array);
		if (tmp_map_list->map != NULL) {
			free(tmp_map_list->map);
		}
		free(tmp_map_list);
	}

	if (lnf_file->v1convert_buffer != NULL) {
		free(lnf_file->v1convert_buffer);
	}

	free(lnf_file);
}
Exemple #4
0
void CloseChannels (time_t tslot, int compress) {
dirstat_t	*dirstat;
struct stat fstat;
unsigned int num;

	for ( num = 0; num < num_channels; num++ ) {
		if ( profile_channels[num].ofile ) {

			if ( is_anonymized ) 
				SetFlag(profile_channels[num].nffile->file_header->flags, FLAG_ANONYMIZED);
			CloseUpdateFile(profile_channels[num].nffile, Ident);
			profile_channels[num].nffile = DisposeFile(profile_channels[num].nffile);

			stat(profile_channels[num].ofile, &fstat);
			ReadStatInfo(profile_channels[num].dirstat_path, &dirstat, CREATE_AND_LOCK);
	
			if ( rename(profile_channels[num].ofile, profile_channels[num].wfile) < 0 ) {
				LogError("Failed to rename file %s to %s: %s\n", 
					profile_channels[num].ofile, profile_channels[num].wfile, strerror(errno) );
			} else if ( dirstat && tslot > dirstat->last ) {
				dirstat->filesize += 512 * fstat.st_blocks;
				dirstat->numfiles++;
				dirstat->last = tslot;
			}

			if ( dirstat ) {
				WriteStatInfo(dirstat);
			}
		}
		if ( ((profile_channels[num].type & 0x8) == 0) && tslot > 0 ) {
			UpdateRRD(tslot, &profile_channels[num]);
#ifdef HAVE_INFLUXDB
			if(strlen(influxdb_url) > 0)
				UpdateInfluxDB(tslot, &profile_channels[num]);
#endif
		}
	}

} // End of CloseChannels
Exemple #5
0
int main( int argc, char **argv ) {
struct stat stat_buff;
stat_record_t	sum_stat;
printer_t 	print_header, print_record;
nfprof_t 	profile_data;
char 		*rfile, *Rfile, *Mdirs, *wfile, *ffile, *filter, *tstring, *stat_type;
char		*byte_limit_string, *packet_limit_string, *print_format, *record_header;
char		*print_order, *query_file, *UnCompress_file, *nameserver, *aggr_fmt;
int 		c, ffd, ret, element_stat, fdump;
int 		i, user_format, quiet, flow_stat, topN, aggregate, aggregate_mask, bidir;
int 		print_stat, syntax_only, date_sorted, do_tag, compress, do_xstat;
int			plain_numbers, GuessDir, pipe_output, csv_output;
time_t 		t_start, t_end;
uint32_t	limitflows;
char 		Ident[IDENTLEN];

	rfile = Rfile = Mdirs = wfile = ffile = filter = tstring = stat_type = NULL;
	byte_limit_string = packet_limit_string = NULL;
	fdump = aggregate = 0;
	aggregate_mask	= 0;
	bidir			= 0;
	t_start = t_end = 0;
	syntax_only	    = 0;
	topN	        = -1;
	flow_stat       = 0;
	print_stat      = 0;
	element_stat  	= 0;
	do_xstat 		= 0;
	limitflows		= 0;
	date_sorted		= 0;
	total_bytes		= 0;
	total_flows		= 0;
	skipped_blocks	= 0;
	do_tag			= 0;
	quiet			= 0;
	user_format		= 0;
	compress		= 0;
	plain_numbers   = 0;
	pipe_output		= 0;
	csv_output		= 0;
	is_anonymized	= 0;
	GuessDir		= 0;
	nameserver		= NULL;

	print_format    = NULL;
	print_header 	= NULL;
	print_record  	= NULL;
	print_order  	= NULL;
	query_file		= NULL;
	UnCompress_file	= NULL;
	aggr_fmt		= NULL;
	record_header 	= NULL;

	Ident[0] = '\0';

	while ((c = getopt(argc, argv, "6aA:Bbc:D:E:s:hHn:i:j:f:qzr:v:w:K:M:NImO:R:XZt:TVv:x:l:L:o:")) != EOF) {
		switch (c) {
			case 'h':
				usage(argv[0]);
				exit(0);
				break;
			case 'a':
				aggregate = 1;
				break;
			case 'A':
				if ( !ParseAggregateMask(optarg, &aggr_fmt ) ) {
					exit(255);
				}
				aggregate_mask = 1;
				break;
			case 'B':
				GuessDir = 1;
			case 'b':
				if ( !SetBidirAggregation() ) {
					exit(255);
				}
				bidir	  = 1;
				// implies
				aggregate = 1;
				break;
			case 'D':
				nameserver = optarg;
				if ( !set_nameserver(nameserver) ) {
					exit(255);
				}
				break;
			case 'E':
				query_file = optarg;
				if ( !InitExporterList() ) {
					exit(255);
				}
				PrintExporters(query_file);
				exit(0);
				break;
			case 'X':
				fdump = 1;
				break;
			case 'Z':
				syntax_only = 1;
				break;
			case 'q':
				quiet = 1;
				break;
			case 'z':
				compress = 1;
				break;
			case 'c':	
				limitflows = atoi(optarg);
				if ( !limitflows ) {
					LogError("Option -c needs a number > 0\n");
					exit(255);
				}
				break;
			case 's':
				stat_type = optarg;
                if ( !SetStat(stat_type, &element_stat, &flow_stat) ) {
                    exit(255);
                } 
				break;
			case 'V': {
				char *e1, *e2;
				e1 = "";
				e2 = "";
#ifdef NSEL
				e1 = "NSEL-NEL";
#endif
				printf("%s: Version: %s%s%s\n",argv[0], e1, e2, nfdump_version);
				exit(0);
				} break;
			case 'l':
				packet_limit_string = optarg;
				break;
			case 'K':
				LogError("*** Anonymisation moved! Use nfanon to anonymise flows!\n");
				exit(255);
				break;
			case 'H':
				do_xstat = 1;
				break;
			case 'L':
				byte_limit_string = optarg;
				break;
			case 'N':
				plain_numbers = 1;
				break;
			case 'f':
				ffile = optarg;
				break;
			case 't':
				tstring = optarg;
				break;
			case 'r':
				rfile = optarg;
				if ( strcmp(rfile, "-") == 0 )
					rfile = NULL;
				break;
			case 'm':
				print_order = "tstart";
				Parse_PrintOrder(print_order);
				date_sorted = 1;
				LogError("Option -m depricated. Use '-O tstart' instead\n");
				break;
			case 'M':
				Mdirs = optarg;
				break;
			case 'I':
				print_stat++;
				break;
			case 'o':	// output mode
				print_format = optarg;
				break;
			case 'O': {	// stat order by
				int ret;
				print_order = optarg;
				ret = Parse_PrintOrder(print_order);
				if ( ret < 0 ) {
					LogError("Unknown print order '%s'\n", print_order);
					exit(255);
				}
				date_sorted = ret == 6;		// index into order_mode
				} break;
			case 'R':
				Rfile = optarg;
				break;
			case 'w':
				wfile = optarg;
				break;
			case 'n':
				topN = atoi(optarg);
				if ( topN < 0 ) {
					LogError("TopnN number %i out of range\n", topN);
					exit(255);
				}
				break;
			case 'T':
				do_tag = 1;
				break;
			case 'i':
				strncpy(Ident, optarg, IDENT_SIZE);
				Ident[IDENT_SIZE - 1] = 0;
				if ( strchr(Ident, ' ') ) {
					LogError("Ident must not contain spaces\n");
					exit(255);
				}
				break;
			case 'j':
				UnCompress_file = optarg;
				UnCompressFile(UnCompress_file);
				exit(0);
				break;
			case 'x':
				query_file = optarg;
				InitExtensionMaps(NO_EXTENSION_LIST);
				DumpExMaps(query_file);
				exit(0);
				break;
			case 'v':
				query_file = optarg;
				QueryFile(query_file);
				exit(0);
				break;
			case '6':	// print long IPv6 addr
				Setv6Mode(1);
				break;
			default:
				usage(argv[0]);
				exit(0);
		}
	}
	if (argc - optind > 1) {
		usage(argv[0]);
		exit(255);
	} else {
		/* user specified a pcap filter */
		filter = argv[optind];
		FilterFilename = NULL;
	}
	
	// Change Ident only
	if ( rfile && strlen(Ident) > 0 ) {
		ChangeIdent(rfile, Ident);
		exit(0);
	}

	if ( (element_stat || flow_stat) && (topN == -1)  ) 
		topN = 10;

	if ( topN < 0 )
		topN = 0;

	if ( (element_stat && !flow_stat) && aggregate_mask ) {
		LogError("Warning: Aggregation ignored for element statistics\n");
		aggregate_mask = 0;
	}

	if ( !flow_stat && aggregate_mask ) {
		aggregate = 1;
	}

	if ( rfile && Rfile ) {
		LogError("-r and -R are mutually exclusive. Plase specify either -r or -R\n");
		exit(255);
	}
	if ( Mdirs && !(rfile || Rfile) ) {
		LogError("-M needs either -r or -R to specify the file or file list. Add '-R .' for all files in the directories.\n");
		exit(255);
	}

	extension_map_list = InitExtensionMaps(NEEDS_EXTENSION_LIST);
	if ( !InitExporterList() ) {
		exit(255);
	}

	SetupInputFileSequence(Mdirs, rfile, Rfile);

	if ( print_stat ) {
		nffile_t *nffile;
		if ( !rfile && !Rfile && !Mdirs) {
			LogError("Expect data file(s).\n");
			exit(255);
		}

		memset((void *)&sum_stat, 0, sizeof(stat_record_t));
		sum_stat.first_seen = 0x7fffffff;
		sum_stat.msec_first = 999;
		nffile = GetNextFile(NULL, 0, 0);
		if ( !nffile ) {
			LogError("Error open file: %s\n", strerror(errno));
			exit(250);
		}
		while ( nffile && nffile != EMPTY_LIST ) {
			SumStatRecords(&sum_stat, nffile->stat_record);
			nffile = GetNextFile(nffile, 0, 0);
		}
		PrintStat(&sum_stat);
		exit(0);
	}

	// handle print mode
	if ( !print_format ) {
		// automatically select an appropriate output format for custom aggregation
		// aggr_fmt is compiled by ParseAggregateMask
		if ( aggr_fmt ) {
			int len = strlen(AggrPrependFmt) + strlen(aggr_fmt) + strlen(AggrAppendFmt) + 7;	// +7 for 'fmt:', 2 spaces and '\0'
			print_format = malloc(len);
			if ( !print_format ) {
				LogError("malloc() error in %s line %d: %s\n", __FILE__, __LINE__, strerror(errno) );
				exit(255);
			}
			snprintf(print_format, len, "fmt:%s %s %s",AggrPrependFmt, aggr_fmt, AggrAppendFmt );
			print_format[len-1] = '\0';
		} else if ( bidir ) {
			print_format = "biline";
		} else
			print_format = DefaultMode;
	}

	if ( strncasecmp(print_format, "fmt:", 4) == 0 ) {
		// special user defined output format
		char *format = &print_format[4];
		if ( strlen(format) ) {
			if ( !ParseOutputFormat(format, plain_numbers, printmap) )
				exit(255);
			print_record  = format_special;
			record_header = get_record_header();
			user_format	  = 1;
		} else {
			LogError("Missing format description for user defined output format!\n");
			exit(255);
		}
	} else {
		// predefined output format

		// Check for long_v6 mode
		i = strlen(print_format);
		if ( i > 2 ) {
			if ( print_format[i-1] == '6' ) {
				Setv6Mode(1);
				print_format[i-1] = '\0';
			} else 
				Setv6Mode(0);
		}

		i = 0;
		while ( printmap[i].printmode ) {
			if ( strncasecmp(print_format, printmap[i].printmode, MAXMODELEN) == 0 ) {
				if ( printmap[i].Format ) {
					if ( !ParseOutputFormat(printmap[i].Format, plain_numbers, printmap) )
						exit(255);
					// predefined custom format
					print_record  = printmap[i].func;
					record_header = get_record_header();
					user_format	  = 1;
				} else {
					// To support the pipe output format for element stats - check for pipe, and remember this
					if ( strncasecmp(print_format, "pipe", MAXMODELEN) == 0 ) {
						pipe_output = 1;
					}
					if ( strncasecmp(print_format, "csv", MAXMODELEN) == 0 ) {
						csv_output = 1;
						set_record_header();
						record_header = get_record_header();
					}
					// predefined static format
					print_record  = printmap[i].func;
					user_format	  = 0;
				}
				break;
			}
			i++;
		}
	}

	if ( !print_record ) {
		LogError("Unknown output mode '%s'\n", print_format);
		exit(255);
	}

	// this is the only case, where headers are printed.
	if ( strncasecmp(print_format, "raw", 16) == 0 )
		print_header = format_file_block_header;
	
	if ( aggregate && (flow_stat || element_stat) ) {
		aggregate = 0;
		LogError("Command line switch -s overwrites -a\n");
	}

	if ( !filter && ffile ) {
		if ( stat(ffile, &stat_buff) ) {
			LogError("Can't stat filter file '%s': %s\n", ffile, strerror(errno));
			exit(255);
		}
		filter = (char *)malloc(stat_buff.st_size+1);
		if ( !filter ) {
			LogError("malloc() error in %s line %d: %s\n", __FILE__, __LINE__, strerror(errno) );
			exit(255);
		}
		ffd = open(ffile, O_RDONLY);
		if ( ffd < 0 ) {
			LogError("Can't open filter file '%s': %s\n", ffile, strerror(errno));
			exit(255);
		}
		ret = read(ffd, (void *)filter, stat_buff.st_size);
		if ( ret < 0   ) {
			perror("Error reading filter file");
			close(ffd);
			exit(255);
		}
		total_bytes += ret;
		filter[stat_buff.st_size] = 0;
		close(ffd);

		FilterFilename = ffile;
	}

	// if no filter is given, set the default ip filter which passes through every flow
	if ( !filter  || strlen(filter) == 0 ) 
		filter = "any";

	Engine = CompileFilter(filter);
	if ( !Engine ) 
		exit(254);

	if ( fdump ) {
		printf("StartNode: %i Engine: %s\n", Engine->StartNode, Engine->Extended ? "Extended" : "Fast");
		DumpList(Engine);
		exit(0);
	}

	if ( syntax_only )
		exit(0);

	if ( print_order && flow_stat ) {
		printf("-s record and -O (-m) are mutually exclusive options\n");
		exit(255);
	}

	if ((aggregate || flow_stat || print_order)  && !Init_FlowTable() )
			exit(250);

	if (element_stat && !Init_StatTable(HashBits, NumPrealloc) )
			exit(250);

	SetLimits(element_stat || aggregate || flow_stat, packet_limit_string, byte_limit_string);

	if ( tstring ) {
		if ( !ScanTimeFrame(tstring, &t_start, &t_end) )
			exit(255);
	}


	if ( !(flow_stat || element_stat || wfile || quiet ) && record_header ) {
		if ( user_format ) {
			printf("%s\n", record_header);
		} else {
			// static format - no static format with header any more, but keep code anyway
			if ( Getv6Mode() ) {
				printf("%s\n", record_header);
			} else
				printf("%s\n", record_header);
		}
	}

	nfprof_start(&profile_data);
	sum_stat = process_data(wfile, element_stat, aggregate || flow_stat, print_order != NULL,
						print_header, print_record, t_start, t_end, 
						limitflows, do_tag, compress, do_xstat);
	nfprof_end(&profile_data, total_flows);

	if ( total_bytes == 0 ) {
		printf("No matched flows\n");
		exit(0);
	}

	if (aggregate || print_order) {
		if ( wfile ) {
			nffile_t *nffile = OpenNewFile(wfile, NULL, compress, is_anonymized, NULL);
			if ( !nffile ) 
				exit(255);
			if ( ExportFlowTable(nffile, aggregate, bidir, date_sorted, extension_map_list) ) {
				CloseUpdateFile(nffile, Ident );	
			} else {
				CloseFile(nffile);
				unlink(wfile);
			}
			DisposeFile(nffile);
		} else {
			PrintFlowTable(print_record, topN, do_tag, GuessDir, extension_map_list);
		}
	}

	if (flow_stat) {
		PrintFlowStat(record_header, print_record, topN, do_tag, quiet, csv_output, extension_map_list);
#ifdef DEVEL
		printf("Loopcnt: %u\n", loopcnt);
#endif
	} 

	if (element_stat) {
		PrintElementStat(&sum_stat, plain_numbers, record_header, print_record, topN, do_tag, quiet, pipe_output, csv_output);
	} 

	if ( !quiet ) {
		if ( csv_output ) {
			PrintSummary(&sum_stat, plain_numbers, csv_output);
		} else if ( !wfile ) {
			if (is_anonymized)
				printf("IP addresses anonymised\n");
			PrintSummary(&sum_stat, plain_numbers, csv_output);
			if ( t_last_flow == 0 ) {
				// in case of a pre 1.6.6 collected and empty flow file
 				printf("Time window: <unknown>\n");
			} else {
 				printf("Time window: %s\n", TimeString(t_first_flow, t_last_flow));
			}
			printf("Total flows processed: %u, Blocks skipped: %u, Bytes read: %llu\n", 
				total_flows, skipped_blocks, (unsigned long long)total_bytes);
			nfprof_print(&profile_data, stdout);
		}
	}

	Dispose_FlowTable();
	Dispose_StatTable();
	FreeExtensionMaps(extension_map_list);

#ifdef DEVEL
	if ( hash_hit || hash_miss )
		printf("Hash hit: %i, miss: %i, skip: %i, ratio: %5.3f\n", hash_hit, hash_miss, hash_skip, (float)hash_hit/((float)(hash_hit+hash_miss)));
#endif

	return 0;
}
Exemple #6
0
stat_record_t process_data(char *wfile, int element_stat, int flow_stat, int sort_flows,
	printer_t print_header, printer_t print_record, time_t twin_start, time_t twin_end, 
	uint64_t limitflows, int tag, int compress, int do_xstat) {
common_record_t 	*flow_record;
master_record_t		*master_record;
nffile_t			*nffile_w, *nffile_r;
xstat_t				*xstat;
stat_record_t 		stat_record;
int 				done, write_file;

#ifdef COMPAT15
int	v1_map_done = 0;
#endif
	
	// time window of all matched flows
	memset((void *)&stat_record, 0, sizeof(stat_record_t));
	stat_record.first_seen = 0x7fffffff;
	stat_record.msec_first = 999;

	// Do the logic first

	// do not print flows when doing any stats are sorting
	if ( sort_flows || flow_stat || element_stat ) {
		print_record = NULL;
	}

	// do not write flows to file, when doing any stats
	// -w may apply for flow_stats later
	write_file = !(sort_flows || flow_stat || element_stat) && wfile;
	nffile_r = NULL;
	nffile_w = NULL;
	xstat  	 = NULL;

	// Get the first file handle
	nffile_r = GetNextFile(NULL, twin_start, twin_end);
	if ( !nffile_r ) {
		LogError("GetNextFile() error in %s line %d: %s\n", __FILE__, __LINE__, strerror(errno) );
		return stat_record;
	}
	if ( nffile_r == EMPTY_LIST ) {
		LogError("Empty file list. No files to process\n");
		return stat_record;
	}

	// preset time window of all processed flows to the stat record in first flow file
	t_first_flow = nffile_r->stat_record->first_seen;
	t_last_flow  = nffile_r->stat_record->last_seen;

	// store infos away for later use
	// although multiple files may be processed, it is assumed that all 
	// have the same settings
	is_anonymized = IP_ANONYMIZED(nffile_r);
	strncpy(Ident, nffile_r->file_header->ident, IDENTLEN);
	Ident[IDENTLEN-1] = '\0';

	// prepare output file if requested
	if ( write_file ) {
		nffile_w = OpenNewFile(wfile, NULL, compress, IP_ANONYMIZED(nffile_r), NULL );
		if ( !nffile_w ) {
			if ( nffile_r ) {
				CloseFile(nffile_r);
				DisposeFile(nffile_r);
			}
			return stat_record;
		}
		if ( do_xstat ) {
			xstat = InitXStat(nffile_w);
			if ( !xstat ) {
				if ( nffile_r ) {
					CloseFile(nffile_r);
					DisposeFile(nffile_r);
				}
				return stat_record;
			}
		}
	}

	// setup Filter Engine to point to master_record, as any record read from file
	// is expanded into this record
	// Engine->nfrecord = (uint64_t *)master_record;

	done = 0;
	while ( !done ) {
	int i, ret;

		// get next data block from file
		ret = ReadBlock(nffile_r);

		switch (ret) {
			case NF_CORRUPT:
			case NF_ERROR:
				if ( ret == NF_CORRUPT ) 
					LogError("Skip corrupt data file '%s'\n",GetCurrentFilename());
				else 
					LogError("Read error in file '%s': %s\n",GetCurrentFilename(), strerror(errno) );
				// fall through - get next file in chain
			case NF_EOF: {
				nffile_t *next = GetNextFile(nffile_r, twin_start, twin_end);
				if ( next == EMPTY_LIST ) {
					done = 1;
				} else if ( next == NULL ) {
					done = 1;
					LogError("Unexpected end of file list\n");
				} else {
					// Update global time span window
					if ( next->stat_record->first_seen < t_first_flow )
						t_first_flow = next->stat_record->first_seen;
					if ( next->stat_record->last_seen > t_last_flow ) 
						t_last_flow = next->stat_record->last_seen;
					// continue with next file
				}
				continue;

				} break; // not really needed
			default:
				// successfully read block
				total_bytes += ret;
		}


#ifdef COMPAT15
		if ( nffile_r->block_header->id == DATA_BLOCK_TYPE_1 ) {
			common_record_v1_t *v1_record = (common_record_v1_t *)nffile_r->buff_ptr;
			// create an extension map for v1 blocks
			if ( v1_map_done == 0 ) {
				extension_map_t *map = malloc(sizeof(extension_map_t) + 2 * sizeof(uint16_t) );
				if ( ! map ) {
					LogError("malloc() allocation error in %s line %d: %s\n", __FILE__, __LINE__, strerror(errno) );
					exit(255);
				}
				map->type 	= ExtensionMapType;
				map->size 	= sizeof(extension_map_t) + 2 * sizeof(uint16_t);
				if (( map->size & 0x3 ) != 0 ) {
					map->size += 4 - ( map->size & 0x3 );
				}

				map->map_id = INIT_ID;

				map->ex_id[0]  = EX_IO_SNMP_2;
				map->ex_id[1]  = EX_AS_2;
				map->ex_id[2]  = 0;
				
				map->extension_size  = 0;
				map->extension_size += extension_descriptor[EX_IO_SNMP_2].size;
				map->extension_size += extension_descriptor[EX_AS_2].size;

				if ( Insert_Extension_Map(extension_map_list,map) && write_file ) {
					// flush new map
					AppendToBuffer(nffile_w, (void *)map, map->size);
				} // else map already known and flushed

				v1_map_done = 1;
			}

			// convert the records to v2
			for ( i=0; i < nffile_r->block_header->NumRecords; i++ ) {
				common_record_t *v2_record = (common_record_t *)v1_record;
				Convert_v1_to_v2((void *)v1_record);
				// now we have a v2 record -> use size of v2_record->size
				v1_record = (common_record_v1_t *)((pointer_addr_t)v1_record + v2_record->size);
			}
			nffile_r->block_header->id = DATA_BLOCK_TYPE_2;
		}
#endif

		if ( nffile_r->block_header->id == Large_BLOCK_Type ) {
			// skip
			printf("Xstat block skipped ...\n");
			continue;
		}

		if ( nffile_r->block_header->id != DATA_BLOCK_TYPE_2 ) {
			if ( nffile_r->block_header->id == DATA_BLOCK_TYPE_1 ) {
				LogError("Can't process nfdump 1.5.x block type 1. Add --enable-compat15 to compile compatibility code. Skip block.\n");
			} else {
				LogError("Can't process block type %u. Skip block.\n", nffile_r->block_header->id);
			}
			skipped_blocks++;
			continue;
		}

		flow_record = nffile_r->buff_ptr;
		for ( i=0; i < nffile_r->block_header->NumRecords; i++ ) {

			switch ( flow_record->type ) {
				case CommonRecordV0Type:
				case CommonRecordType:  {
					int match;
					uint32_t map_id = flow_record->ext_map;
					generic_exporter_t *exp_info = exporter_list[flow_record->exporter_sysid];
					if ( map_id >= MAX_EXTENSION_MAPS ) {
						LogError("Corrupt data file. Extension map id %u too big.\n", flow_record->ext_map);
						exit(255);
					}
					if ( extension_map_list->slot[map_id] == NULL ) {
						LogError("Corrupt data file. Missing extension map %u. Skip record.\n", flow_record->ext_map);
						flow_record = (common_record_t *)((pointer_addr_t)flow_record + flow_record->size);	
						continue;
					} 

					total_flows++;
					master_record = &(extension_map_list->slot[map_id]->master_record);
					Engine->nfrecord = (uint64_t *)master_record;
					ExpandRecord_v2( flow_record, extension_map_list->slot[map_id], 
						exp_info ? &(exp_info->info) : NULL, master_record);

					// Time based filter
					// if no time filter is given, the result is always true
					match  = twin_start && (master_record->first < twin_start || master_record->last > twin_end) ? 0 : 1;
					match &= limitflows ? stat_record.numflows < limitflows : 1;

					// filter netflow record with user supplied filter
					if ( match ) 
						match = (*Engine->FilterEngine)(Engine);
	
					if ( match == 0 ) { // record failed to pass all filters
						// increment pointer by number of bytes for netflow record
						flow_record = (common_record_t *)((pointer_addr_t)flow_record + flow_record->size);	
						// go to next record
						continue;
					}

					// Records passed filter -> continue record processing
					// Update statistics
					UpdateStat(&stat_record, master_record);

					// update number of flows matching a given map
					extension_map_list->slot[map_id]->ref_count++;
	
					if ( flow_stat ) {
						AddFlow(flow_record, master_record, extension_map_list->slot[map_id]);
						if ( element_stat ) {
							AddStat(flow_record, master_record);
						} 
					} else if ( element_stat ) {
						AddStat(flow_record, master_record);
					} else if ( sort_flows ) {
						InsertFlow(flow_record, master_record, extension_map_list->slot[map_id]);
					} else {
						if ( write_file ) {
							AppendToBuffer(nffile_w, (void *)flow_record, flow_record->size);
							if ( xstat ) 
								UpdateXStat(xstat, master_record);
						} else if ( print_record ) {
							char *string;
							// if we need to print out this record
							print_record(master_record, &string, tag);
							if ( string ) {
								if ( limitflows ) {
									if ( (stat_record.numflows <= limitflows) )
										printf("%s\n", string);
								} else 
									printf("%s\n", string);
							}
						} else { 
							// mutually exclusive conditions should prevent executing this code
							// this is buggy!
							printf("Bug! - this code should never get executed in file %s line %d\n", __FILE__, __LINE__);
						}
					} // sort_flows - else
					} break; 
				case ExtensionMapType: {
					extension_map_t *map = (extension_map_t *)flow_record;
	
					if ( Insert_Extension_Map(extension_map_list, map) && write_file ) {
						// flush new map
						AppendToBuffer(nffile_w, (void *)map, map->size);
					} // else map already known and flushed
					} break;
				case ExporterRecordType:
				case SamplerRecordype:
						// Silently skip exporter records
					break;
				case ExporterInfoRecordType: {
					int ret = AddExporterInfo((exporter_info_record_t *)flow_record);
					if ( ret != 0 ) {
						if ( write_file && ret == 1 ) 
							AppendToBuffer(nffile_w, (void *)flow_record, flow_record->size);
					} else {
						LogError("Failed to add Exporter Record\n");
					}
					} break;
				case ExporterStatRecordType:
					AddExporterStat((exporter_stats_record_t *)flow_record);
					break;
				case SamplerInfoRecordype: {
					int ret = AddSamplerInfo((sampler_info_record_t *)flow_record);
					if ( ret != 0 ) {
						if ( write_file && ret == 1 ) 
							AppendToBuffer(nffile_w, (void *)flow_record, flow_record->size);
					} else {
						LogError("Failed to add Sampler Record\n");
					}
					} break;
				default: {
					LogError("Skip unknown record type %i\n", flow_record->type);
				}
			}

		// Advance pointer by number of bytes for netflow record
		flow_record = (common_record_t *)((pointer_addr_t)flow_record + flow_record->size);	


		} // for all records

		// check if we are done, due to -c option 
		if ( limitflows ) 
			done = stat_record.numflows >= limitflows;

	} // while

	CloseFile(nffile_r);

	// flush output file
	if ( write_file ) {
		// flush current buffer to disc
		if ( nffile_w->block_header->NumRecords ) {
			if ( WriteBlock(nffile_w) <= 0 ) {
				LogError("Failed to write output buffer to disk: '%s'" , strerror(errno));
			} 
		}

		if ( xstat ) {
			if ( WriteExtraBlock(nffile_w, xstat->block_header ) <= 0 ) {
				LogError("Failed to write xstat buffer to disk: '%s'" , strerror(errno));
			} 
		}

		/* Stat info */
		if ( write_file ) {
			/* Copy stat info and close file */
			memcpy((void *)nffile_w->stat_record, (void *)&stat_record, sizeof(stat_record_t));
			CloseUpdateFile(nffile_w, nffile_r->file_header->ident );
			nffile_w = DisposeFile(nffile_w);
		} // else stdout
	}	 

	PackExtensionMapList(extension_map_list);

	DisposeFile(nffile_r);
	return stat_record;

} // End of process_data
Exemple #7
0
static void send_data(char *rfile, time_t twin_start, 
			time_t twin_end, uint32_t count, unsigned int delay, int confirm, int netflow_version) {
master_record_t	master_record;
common_record_t	*flow_record;
nffile_t		*nffile;
int 			i, done, ret, again;
uint32_t		numflows, cnt;

#ifdef COMPAT15
int	v1_map_done = 0;
#endif
	
	// Get the first file handle
	nffile = GetNextFile(NULL, twin_start, twin_end);
	if ( !nffile ) {
		LogError("GetNextFile() error in %s line %d: %s\n", __FILE__, __LINE__, strerror(errno) );
		return;
	}
	if ( nffile == EMPTY_LIST ) {
		LogError("Empty file list. No files to process\n");
		return;
	}

	peer.send_buffer   	= malloc(UDP_PACKET_SIZE);
	peer.flush			= 0;
	if ( !peer.send_buffer ) {
		LogError("malloc() error in %s line %d: %s\n", __FILE__, __LINE__, strerror(errno) );
		CloseFile(nffile);
		DisposeFile(nffile);
		return;
	}
	peer.buff_ptr = peer.send_buffer;
	peer.endp  	  = (void *)((pointer_addr_t)peer.send_buffer + UDP_PACKET_SIZE - 1);

	if ( netflow_version == 5 ) 
		Init_v5_v7_output(&peer);
	else 
		Init_v9_output(&peer);

	numflows	= 0;
	done	 	= 0;

	// setup Filter Engine to point to master_record, as any record read from file
	// is expanded into this record
	Engine->nfrecord = (uint64_t *)&master_record;

	cnt = 0;
	while ( !done ) {
		// get next data block from file
		ret = ReadBlock(nffile);

		switch (ret) {
			case NF_CORRUPT:
			case NF_ERROR:
				if ( ret == NF_CORRUPT ) 
					LogError("Skip corrupt data file '%s'\n",GetCurrentFilename());
				else 
					LogError("Read error in file '%s': %s\n",GetCurrentFilename(), strerror(errno) );
				// fall through - get next file in chain
			case NF_EOF: {
				nffile_t *next = GetNextFile(nffile, twin_start, twin_end);
				if ( next == EMPTY_LIST ) {
					done = 1;
				}
				if ( next == NULL ) {
					done = 1;
					LogError("Unexpected end of file list\n");
				}
				// else continue with next file
				continue;
	
				} break; // not really needed
		}

#ifdef COMPAT15
		if ( nffile->block_header->id == DATA_BLOCK_TYPE_1 ) {
			common_record_v1_t *v1_record = (common_record_v1_t *)nffile->buff_ptr;
			// create an extension map for v1 blocks
			if ( v1_map_done == 0 ) {
				extension_map_t *map = malloc(sizeof(extension_map_t) + 2 * sizeof(uint16_t) );
				if ( ! map ) {
					perror("Memory allocation error");
					exit(255);
				}
				map->type 	= ExtensionMapType;
				map->size 	= sizeof(extension_map_t) + 2 * sizeof(uint16_t);
				if (( map->size & 0x3 ) != 0 ) {
					map->size += 4 - ( map->size & 0x3 );
				}
				map->map_id = INIT_ID;
				map->ex_id[0]  = EX_IO_SNMP_2;
				map->ex_id[1]  = EX_AS_2;
				map->ex_id[2]  = 0;
				
				map->extension_size  = 0;
				map->extension_size += extension_descriptor[EX_IO_SNMP_2].size;
				map->extension_size += extension_descriptor[EX_AS_2].size;
					
				Insert_Extension_Map(extension_map_list, map);
				v1_map_done = 1;
			}

			// convert the records to v2
			for ( i=0; i < nffile->block_header->NumRecords; i++ ) {
				common_record_t *v2_record = (common_record_t *)v1_record;
				Convert_v1_to_v2((void *)v1_record);
				// now we have a v2 record -> use size of v2_record->size
				v1_record = (common_record_v1_t *)((pointer_addr_t)v1_record + v2_record->size);
			}
			nffile->block_header->id = DATA_BLOCK_TYPE_2;
		}
#endif

		if ( nffile->block_header->id != DATA_BLOCK_TYPE_2 ) {
			LogError("Can't process block type %u. Skip block.\n", nffile->block_header->id);
			continue;
		}

		// cnt is the number of blocks, which survived the filter
		// and added to the output buffer
		flow_record = nffile->buff_ptr;

		for ( i=0; i < nffile->block_header->NumRecords; i++ ) {
			int match;

			switch ( flow_record->type ) {
				case CommonRecordType: {
					if ( extension_map_list->slot[flow_record->ext_map] == NULL ) {
						LogError("Corrupt data file. Missing extension map %u. Skip record.\n", flow_record->ext_map);
						flow_record = (common_record_t *)((pointer_addr_t)flow_record + flow_record->size);	
						continue;
					} 

					// if no filter is given, the result is always true
					ExpandRecord_v2( flow_record, extension_map_list->slot[flow_record->ext_map], NULL, &master_record);

					match = twin_start && (master_record.first < twin_start || master_record.last > twin_end) ? 0 : 1;

					// filter netflow record with user supplied filter
					if ( match ) 
						match = (*Engine->FilterEngine)(Engine);
	
					if ( match == 0 ) { // record failed to pass all filters
						// increment pointer by number of bytes for netflow record
						flow_record = (common_record_t *)((pointer_addr_t)flow_record + flow_record->size);	
						// go to next record
						continue;
					}
					// Records passed filter -> continue record processing

					if ( netflow_version == 5 ) 
						again = Add_v5_output_record(&master_record, &peer);
					else
						again = Add_v9_output_record(&master_record, &peer);
	
					cnt++;
					numflows++;

					if ( peer.flush ) {
						ret = FlushBuffer(confirm);
	
						if ( ret < 0 ) {
							perror("Error sending data");
							CloseFile(nffile);
							DisposeFile(nffile);
							return;
						}
			
						if ( delay ) {
							// sleep as specified
							usleep(delay);
						}
						cnt = 0;
					}
	
					if ( again ) {
						if ( netflow_version == 5 ) 
							Add_v5_output_record(&master_record, &peer);
						else
							Add_v9_output_record(&master_record, &peer);
						cnt++;
					}

					} break;
				case ExtensionMapType: {
					extension_map_t *map = (extension_map_t *)flow_record;
	
					if ( Insert_Extension_Map(extension_map_list, map) ) {
						// flush new map
						
					} // else map already known and flushed
	
					} break;
				case ExporterRecordType:
				case SamplerRecordype:
				case ExporterInfoRecordType:
				case ExporterStatRecordType:
				case SamplerInfoRecordype:
						// Silently skip exporter/sampler records
					break;
			 	default: {
					LogError("Skip unknown record type %i\n", flow_record->type);
				}
			}
			// Advance pointer by number of bytes for netflow record
			flow_record = (common_record_t *)((pointer_addr_t)flow_record + flow_record->size);	

		}
	} // while

	// flush still remaining records
	if ( cnt ) {
		ret = FlushBuffer(confirm);

		if ( ret < 0 ) {
			perror("Error sending data");
		}

	} // if cnt 

	if (nffile) {
		CloseFile(nffile);
		DisposeFile(nffile);
	}

	close(peer.sockfd);

	return;

} // End of send_data
Exemple #8
0
static void run(packet_function_t receive_packet, int socket, send_peer_t peer, 
	time_t twin, time_t t_begin, int report_seq, int use_subdirs, int compress, int do_xstat) {
common_flow_header_t	*nf_header;
FlowSource_t			*fs;
struct sockaddr_storage nf_sender;
socklen_t 	nf_sender_size = sizeof(nf_sender);
time_t 		t_start, t_now;
uint64_t	export_packets;
uint32_t	blast_cnt, blast_failures, ignored_packets;
uint16_t	version;
ssize_t		cnt;
void 		*in_buff;
int 		err;
char 		*string;
srecord_t	*commbuff;

	if ( !Init_v1() || !Init_v5_v7_input() || !Init_v9() || !Init_IPFIX() )
		return;

	in_buff  = malloc(NETWORK_INPUT_BUFF_SIZE);
	if ( !in_buff ) {
		LogError("malloc() allocation error in %s line %d: %s\n", __FILE__, __LINE__, strerror(errno) );
		return;
	}

	// init vars
	commbuff = (srecord_t *)shmem;
	nf_header = (common_flow_header_t *)in_buff;

	// Init each netflow source output data buffer
	fs = FlowSource;
	while ( fs ) {

		// prepare file
		fs->nffile = OpenNewFile(fs->current, NULL, compress, 0, NULL);
		if ( !fs->nffile ) {
			return;
		}
		if ( do_xstat ) {
			fs->xstat = InitXStat(fs->nffile);
			if ( !fs->xstat ) 
				return;
		}
		// init vars
		fs->bad_packets		= 0;
		fs->first_seen      = 0xffffffffffffLL;
		fs->last_seen 		= 0;

		// next source
		fs = fs->next;
	}

	export_packets = blast_cnt = blast_failures = 0;
	t_start = t_begin;

	cnt = 0;
	periodic_trigger = 0;
	ignored_packets  = 0;

	// wake up at least at next time slot (twin) + some Overdue time
	alarm(t_start + twin + OVERDUE_TIME - time(NULL));
	/*
	 * Main processing loop:
	 * this loop, continues until done = 1, set by the signal handler
	 * The while loop will be breaked by the periodic file renaming code
	 * for proper cleanup 
	 */
	while ( 1 ) {
		struct timeval tv;

		/* read next bunch of data into beginn of input buffer */
		if ( !done) {
#ifdef PCAP
			// Debug code to read from pcap file, or from socket 
			cnt = receive_packet(socket, in_buff, NETWORK_INPUT_BUFF_SIZE , 0, 
						(struct sockaddr *)&nf_sender, &nf_sender_size);
						
			// in case of reading from file EOF => -2
			if ( cnt == -2 ) 
				done = 1;
#else
			cnt = recvfrom (socket, in_buff, NETWORK_INPUT_BUFF_SIZE , 0, 
						(struct sockaddr *)&nf_sender, &nf_sender_size);
#endif

			if ( cnt == -1 && errno != EINTR ) {
				LogError("ERROR: recvfrom: %s", strerror(errno));
				continue;
			}

			if ( peer.hostname ) {
				ssize_t len;
				len = sendto(peer.sockfd, in_buff, cnt, 0, (struct sockaddr *)&(peer.addr), peer.addrlen);
				if ( len < 0 ) {
					LogError("ERROR: sendto(): %s", strerror(errno));
				}
			}
		}

		/* Periodic file renaming, if time limit reached or if we are done.  */
		// t_now = time(NULL);
		gettimeofday(&tv, NULL);
		t_now = tv.tv_sec;

		if ( ((t_now - t_start) >= twin) || done ) {
			char subfilename[64];
			struct  tm *now;
			char	*subdir;

			alarm(0);
			now = localtime(&t_start);

			// prepare sub dir hierarchy
			if ( use_subdirs ) {
				subdir = GetSubDir(now);
				if ( !subdir ) {
					// failed to generate subdir path - put flows into base directory
					LogError("Failed to create subdir path!");
			
					// failed to generate subdir path - put flows into base directory
					subdir = NULL;
					snprintf(subfilename, 63, "nfcapd.%i%02i%02i%02i%02i%02i",
						now->tm_year + 1900, now->tm_mon + 1, now->tm_mday, now->tm_hour, now->tm_min, now->tm_sec);
				} else {
					snprintf(subfilename, 63, "%s/nfcapd.%i%02i%02i%02i%02i%02i", subdir,
						now->tm_year + 1900, now->tm_mon + 1, now->tm_mday, now->tm_hour, now->tm_min, now->tm_sec);
				}
			} else {
				subdir = NULL;
				snprintf(subfilename, 63, "nfcapd.%i%02i%02i%02i%02i%02i",
					now->tm_year + 1900, now->tm_mon + 1, now->tm_mday, now->tm_hour, now->tm_min, now->tm_sec);
			}
			subfilename[63] = '\0';

			// for each flow source update the stats, close the file and re-initialize the new file
			fs = FlowSource;
			while ( fs ) {
				char nfcapd_filename[MAXPATHLEN];
				char error[255];
				nffile_t *nffile = fs->nffile;

				if ( verbose ) {
					// Dump to stdout
					format_file_block_header(nffile->block_header, &string, 0);
					printf("%s\n", string);
				}

				if ( nffile->block_header->NumRecords ) {
					// flush current buffer to disc
					if ( WriteBlock(nffile) <= 0 )
						LogError("Ident: %s, failed to write output buffer to disk: '%s'" , fs->Ident, strerror(errno));
				} // else - no new records in current block

	
				// prepare filename
				snprintf(nfcapd_filename, MAXPATHLEN-1, "%s/%s", fs->datadir, subfilename);
				nfcapd_filename[MAXPATHLEN-1] = '\0';
	
				// update stat record
				// if no flows were collected, fs->last_seen is still 0
				// set first_seen to start of this time slot, with twin window size.
				if ( fs->last_seen == 0 ) {
					fs->first_seen = (uint64_t)1000 * (uint64_t)t_start;
					fs->last_seen  = (uint64_t)1000 * (uint64_t)(t_start + twin);
				}
				nffile->stat_record->first_seen = fs->first_seen/1000;
				nffile->stat_record->msec_first	= fs->first_seen - nffile->stat_record->first_seen*1000;
				nffile->stat_record->last_seen 	= fs->last_seen/1000;
				nffile->stat_record->msec_last	= fs->last_seen - nffile->stat_record->last_seen*1000;

				if ( fs->xstat ) {
					if ( WriteExtraBlock(nffile, fs->xstat->block_header ) <= 0 ) 
						LogError("Ident: %s, failed to write xstat buffer to disk: '%s'" , fs->Ident, strerror(errno));

					ResetPortHistogram(fs->xstat->port_histogram);
					ResetBppHistogram(fs->xstat->bpp_histogram);
				}

				// Flush Exporter Stat to file
				FlushExporterStats(fs);
				// Close file
				CloseUpdateFile(nffile, fs->Ident);

				if ( subdir && !SetupSubDir(fs->datadir, subdir, error, 255) ) {
					// in this case the flows get lost! - the rename will fail
					// but this should not happen anyway, unless i/o problems, inode problems etc.
					LogError("Ident: %s, Failed to create sub hier directories: %s", fs->Ident, error );
				}

				// if rename fails, we are in big trouble, as we need to get rid of the old .current file
				// otherwise, we will loose flows and can not continue collecting new flows
				err = rename(fs->current, nfcapd_filename);
				if ( err ) {
					LogError("Ident: %s, Can't rename dump file: %s", fs->Ident,  strerror(errno));
					LogError("Ident: %s, Serious Problem! Fix manually", fs->Ident);
					if ( launcher_pid )
						commbuff->failed = 1;

					// we do not update the books here, as the file failed to rename properly
					// otherwise the books may be wrong
				} else {
					struct stat	fstat;
					if ( launcher_pid )
						commbuff->failed = 0;

					// Update books
					stat(nfcapd_filename, &fstat);
					UpdateBooks(fs->bookkeeper, t_start, 512*fstat.st_blocks);
				}

				// log stats
				LogInfo("Ident: '%s' Flows: %llu, Packets: %llu, Bytes: %llu, Sequence Errors: %u, Bad Packets: %u", 
					fs->Ident, (unsigned long long)nffile->stat_record->numflows, (unsigned long long)nffile->stat_record->numpackets, 
					(unsigned long long)nffile->stat_record->numbytes, nffile->stat_record->sequence_failure, fs->bad_packets);

				// reset stats
				fs->bad_packets = 0;
				fs->first_seen  = 0xffffffffffffLL;
				fs->last_seen 	= 0;

				if ( !done ) {
					nffile = OpenNewFile(fs->current, nffile, compress, 0, NULL);
					if ( !nffile ) {
						LogError("killed due to fatal error: ident: %s", fs->Ident);
						break;
					}
					/* XXX needs fixing */
					if ( fs->xstat ) {
						// to be implemented
					}
				}

				// Dump all extension maps and exporters to the buffer
				FlushStdRecords(fs);

				// next flow source
				fs = fs->next;
			} // end of while (fs)

			// All flow sources updated - signal launcher if required
			if ( launcher_pid ) {
				// Signal launcher
		
				// prepare filename for %f expansion
				strncpy(commbuff->fname, subfilename, FNAME_SIZE-1);
				commbuff->fname[FNAME_SIZE-1] = 0;
				snprintf(commbuff->tstring, 16, "%i%02i%02i%02i%02i", 
					now->tm_year + 1900, now->tm_mon + 1, now->tm_mday, now->tm_hour, now->tm_min);
				commbuff->tstring[15] = 0;
				commbuff->tstamp = t_start;
				if ( subdir ) 
					strncpy(commbuff->subdir, subdir, FNAME_SIZE);
				else
					commbuff->subdir[0] = '\0';

				if ( launcher_alive ) {
					LogInfo("Signal launcher");
					kill(launcher_pid, SIGHUP);
				} else 
					LogError("ERROR: Launcher did unexpectedly!");

			}
			
			LogInfo("Total ignored packets: %u", ignored_packets);
			ignored_packets = 0;

			if ( done )
				break;

			// update alarm for next cycle
			t_start += twin;
			/* t_start = filename time stamp: begin of slot
		 	* + twin = end of next time interval
		 	* + OVERDUE_TIME = if no data is collected, this is at latest to act
		 	* - , now->tm_sect_now = difference value to now
		 	*/
			alarm(t_start + twin + OVERDUE_TIME - t_now);

		}

		/* check for error condition or done . errno may only be EINTR */
		if ( cnt < 0 ) {
			if ( periodic_trigger ) {	
				// alarm triggered, no new flow data 
				periodic_trigger = 0;
				continue;
			}
			if ( done ) 
				// signaled to terminate - exit from loop
				break;
			else {
				/* this should never be executed as it should be caught in other places */
				LogError("error condition in '%s', line '%d', cnt: %i", __FILE__, __LINE__ ,(int)cnt);
				continue;
			}
		}

		/* enough data? */
		if ( cnt == 0 )
			continue;

		// get flow source record for current packet, identified by sender IP address
		fs = GetFlowSource(&nf_sender);
		if ( fs == NULL ) {
			fs = AddDynamicSource(&FlowSource, &nf_sender);
			if ( fs == NULL ) {
				LogError("Skip UDP packet. Ignored packets so far %u packets", ignored_packets);
				ignored_packets++;
				continue;
			}
			if ( InitBookkeeper(&fs->bookkeeper, fs->datadir, getpid(), launcher_pid) != BOOKKEEPER_OK ) {
				LogError("Failed to initialise bookkeeper for new source");
				// fatal error
				return;
			}
			fs->nffile = OpenNewFile(fs->current, NULL, compress, 0, NULL);
			if ( !fs->nffile ) {
				LogError("Failed to open new collector file");
				return;
			}
		}

		/* check for too little data - cnt must be > 0 at this point */
		if ( cnt < sizeof(common_flow_header_t) ) {
			LogError("Ident: %s, Data length error: too little data for common netflow header. cnt: %i",fs->Ident, (int)cnt);
			fs->bad_packets++;
			continue;
		}

		fs->received = tv;
		/* Process data - have a look at the common header */
		version = ntohs(nf_header->version);
		switch (version) {
			case 1: 
				Process_v1(in_buff, cnt, fs);
				break;
			case 5: // fall through
			case 7: 
				Process_v5_v7(in_buff, cnt, fs);
				break;
			case 9: 
				Process_v9(in_buff, cnt, fs);
				break;
			case 10: 
				Process_IPFIX(in_buff, cnt, fs);
				break;
			case 255:
				// blast test header
				if ( verbose ) {
					uint16_t count = ntohs(nf_header->count);
					if ( blast_cnt != count ) {
							// LogError("Missmatch blast check: Expected %u got %u\n", blast_cnt, count);
						blast_cnt = count;
						blast_failures++;
					} else {
						blast_cnt++;
					}
					if ( blast_cnt == 65535 ) {
						fprintf(stderr, "Total missed packets: %u\n", blast_failures);
						done = 1;
					}
					break;
				}
			default:
				// data error, while reading data from socket
				LogError("Ident: %s, Error reading netflow header: Unexpected netflow version %i", fs->Ident, version);
				fs->bad_packets++;
				continue;

				// not reached
				break;
		}
		// each Process_xx function has to process the entire input buffer, therefore it's empty now.
		export_packets++;

		// flush current buffer to disc
		if ( fs->nffile->block_header->size > BUFFSIZE ) {
			// fishy! - we already wrote into someone elses memory! - I'm sorry
			// reset output buffer - data may be lost, as we don not know, where it happen
			fs->nffile->block_header->size 		 = 0;
			fs->nffile->block_header->NumRecords = 0;
			fs->nffile->buff_ptr = (void *)((pointer_addr_t)fs->nffile->block_header + sizeof(data_block_header_t) );
			LogError("### Software bug ### Ident: %s, output buffer overflow: expect memory inconsitency", fs->Ident);
		}
	}

	if ( verbose && blast_failures ) {
		fprintf(stderr, "Total missed packets: %u\n", blast_failures);
	}
	free(in_buff);

	fs = FlowSource;
	while ( fs ) {
		DisposeFile(fs->nffile);
		fs = fs->next;
	}

} /* End of run */
Exemple #9
0
__attribute__((noreturn)) static void *p_flow_thread(void *thread_data) {
// argument dispatching
p_flow_thread_args_t *args = (p_flow_thread_args_t *)thread_data;
time_t t_win				 = args->t_win;
int subdir_index			 = args->subdir_index;
int compress			 	 = args->compress;
FlowSource_t *fs			 = args->fs;

// locals
time_t t_start, t_clock, t_udp_flush;
int err, done;

	done 	   = 0;
	args->done = 0;
	args->exit = 0;

	err = pthread_setspecific( buffer_key, (void *)args );
	if ( err ) {
		LogError("[%lu] pthread_setspecific() error in %s line %d: %s\n", 
			(long unsigned)args->tid, __FILE__, __LINE__, strerror(errno) );
		args->done = 1;
		args->exit = 255;
   		pthread_kill(args->parent, SIGUSR1);
		pthread_exit((void *)args);
	}

	if ( !Init_pcap2nf() ) {
		args->done = 1;
		args->exit = 255;
   		pthread_kill(args->parent, SIGUSR1);
		pthread_exit((void *)args);
	}

	// prepare file
	fs->nffile = OpenNewFile(fs->current, NULL, compress, 0, NULL);
	if ( !fs->nffile ) {
		args->done = 1;
		args->exit = 255;
   		pthread_kill(args->parent, SIGUSR1);
		pthread_exit((void *)args);
	}
	fs->xstat = NULL;

	// init vars
	fs->bad_packets		= 0;
	fs->first_seen      = 0xffffffffffffLL;
	fs->last_seen 		= 0;

	t_start = 0;
	t_clock = 0;
	t_udp_flush = 0;
	while ( 1 ) {
		struct FlowNode	*Node;

		Node = Pop_Node(args->NodeList, &args->done);
		if ( Node ) {
			t_clock = Node->t_last.tv_sec;
			dbg_printf("p_flow_thread() Next Node\n");
		} else {
			done = args->done;
			dbg_printf("p_flow_thread() NULL Node\n");
		}

		if ( t_start == 0 ) {
			t_udp_flush = t_start = t_clock - (t_clock % t_win);
		}

		if (((t_clock - t_start) >= t_win) || done) { /* rotate file */
			struct tm *when;
			nffile_t *nffile;
			char FullName[MAXPATHLEN];
			char netflowFname[128];
			char error[256];
			char *subdir;

			// flush all flows to disk
			DumpNodeStat();
			uint32_t NumFlows  = Flush_FlowTree(fs);

			when = localtime(&t_start);
			nffile = fs->nffile;

			// prepare sub dir hierarchy
			if ( subdir_index ) {
				subdir = GetSubDir(when);
				if ( !subdir ) {
					// failed to generate subdir path - put flows into base directory
					LogError("Failed to create subdir path!");
				
					// failed to generate subdir path - put flows into base directory
					subdir = NULL;
					snprintf(netflowFname, 127, "nfcapd.%i%02i%02i%02i%02i",
						when->tm_year + 1900, when->tm_mon + 1, when->tm_mday, when->tm_hour, when->tm_min);
				} else {
					snprintf(netflowFname, 127, "%s/nfcapd.%i%02i%02i%02i%02i", subdir,
						when->tm_year + 1900, when->tm_mon + 1, when->tm_mday, when->tm_hour, when->tm_min);
				}

			} else {
				subdir = NULL;
				snprintf(netflowFname, 127, "nfcapd.%i%02i%02i%02i%02i",
					when->tm_year + 1900, when->tm_mon + 1, when->tm_mday, when->tm_hour, when->tm_min);
			}
			netflowFname[127] = '\0';
	
			if ( subdir && !SetupSubDir(fs->datadir, subdir, error, 255) ) {
				// in this case the flows get lost! - the rename will fail
				// but this should not happen anyway, unless i/o problems, inode problems etc.
				LogError("Ident: %s, Failed to create sub hier directories: %s", fs->Ident, error );
			}

			if ( nffile->block_header->NumRecords ) {
				// flush current buffer to disc
				if ( WriteBlock(nffile) <= 0 )
					LogError("Ident: %s, failed to write output buffer to disk: '%s'" , fs->Ident, strerror(errno));
			} // else - no new records in current block

			// prepare full filename
			snprintf(FullName, MAXPATHLEN-1, "%s/%s", fs->datadir, netflowFname);
			FullName[MAXPATHLEN-1] = '\0';

			// update stat record
			// if no flows were collected, fs->last_seen is still 0
			// set first_seen to start of this time slot, with twin window size.
			if ( fs->last_seen == 0 ) {
				fs->first_seen = (uint64_t)1000 * (uint64_t)t_start;
				fs->last_seen  = (uint64_t)1000 * (uint64_t)(t_start + t_win);
			}
			nffile->stat_record->first_seen = fs->first_seen/1000;
			nffile->stat_record->msec_first	= fs->first_seen - nffile->stat_record->first_seen*1000;
			nffile->stat_record->last_seen 	= fs->last_seen/1000;
			nffile->stat_record->msec_last	= fs->last_seen - nffile->stat_record->last_seen*1000;
	
			// Flush Exporter Stat to file
			FlushExporterStats(fs);
			// Close file
			CloseUpdateFile(nffile, fs->Ident);
	
			// if rename fails, we are in big trouble, as we need to get rid of the old .current file
			// otherwise, we will loose flows and can not continue collecting new flows
			if ( !RenameAppend(fs->current, FullName) ) {
				LogError("Ident: %s, Can't rename dump file: %s", fs->Ident,  strerror(errno));
				LogError("Ident: %s, Serious Problem! Fix manually", fs->Ident);
	/* XXX
				if ( launcher_pid )
					commbuff->failed = 1;
	*/
				// we do not update the books here, as the file failed to rename properly
				// otherwise the books may be wrong
			} else {
				struct stat	fstat;
	/* XXX
				if ( launcher_pid )
					commbuff->failed = 0;
	*/
				// Update books
				stat(FullName, &fstat);
				UpdateBooks(fs->bookkeeper, t_start, 512*fstat.st_blocks);
			}

			LogInfo("Ident: '%s' Flows: %llu, Packets: %llu, Bytes: %llu, Max Flows: %u", 
				fs->Ident, (unsigned long long)nffile->stat_record->numflows, (unsigned long long)nffile->stat_record->numpackets, 
				(unsigned long long)nffile->stat_record->numbytes, NumFlows);

			// reset stats
			fs->bad_packets = 0;
			fs->first_seen  = 0xffffffffffffLL;
			fs->last_seen 	= 0;
	
			// Dump all extension maps and exporters to the buffer
			FlushStdRecords(fs);

			if ( done ) 
				break;
	
			t_start = t_clock - (t_clock % t_win);

			nffile = OpenNewFile(fs->current, nffile, compress, 0, NULL);
			if ( !nffile ) {
				LogError("Fatal: OpenNewFile() failed for ident: %s", fs->Ident);
				args->done = 1;
				args->exit = 255;
   				pthread_kill(args->parent, SIGUSR1);
				break;
			}
		}

		if (((t_clock - t_udp_flush) >= 10) || !done) { /* flush inactive UDP list */
			UDPexpire(fs, t_clock - 10 );
			t_udp_flush = t_clock;
		}

		if ( Node->fin != SIGNAL_NODE )
			// Process the Node
			ProcessFlowNode(fs, Node);

	}

	while ( fs ) {
		DisposeFile(fs->nffile);
		fs = fs->next;
	}
	LogInfo("Terminating flow processng: exit: %i", args->exit);
	dbg_printf("End flow thread[%lu]\n", (long unsigned)args->tid);

	pthread_exit((void *)args);
	/* NOTREACHED */

} // End of p_flow_thread
Exemple #10
0
static void process_data(void) {
master_record_t	master_record;
common_record_t *flow_record;
nffile_t		*nffile;
int 		i, done, ret;
#ifdef COMPAT15
int	v1_map_done = 0;
#endif

	// Get the first file handle
	nffile = GetNextFile(NULL, 0, 0);
	if ( !nffile ) {
		LogError("GetNextFile() error in %s line %d: %s\n", __FILE__, __LINE__, strerror(errno) );
		return;
	}
	if ( nffile == EMPTY_LIST ) {
		LogError("Empty file list. No files to process\n");
		return;
	}

	done = 0;
	while ( !done ) {
		// get next data block from file
		ret = ReadBlock(nffile);

		switch (ret) {
			case NF_CORRUPT:
			case NF_ERROR:
				if ( ret == NF_CORRUPT ) 
					fprintf(stderr, "Skip corrupt data file '%s'\n",GetCurrentFilename());
				else 
					fprintf(stderr, "Read error in file '%s': %s\n",GetCurrentFilename(), strerror(errno) );
				// fall through - get next file in chain
			case NF_EOF: {
				nffile_t *next = GetNextFile(nffile, 0, 0);
				if ( next == EMPTY_LIST ) {
					done = 1;
				}
				if ( next == NULL ) {
					done = 1;
					LogError("Unexpected end of file list\n");
				}
				// else continue with next file
				continue;

				} break; // not really needed
		}

#ifdef COMPAT15
		if ( nffile->block_header->id == DATA_BLOCK_TYPE_1 ) {
			common_record_v1_t *v1_record = (common_record_v1_t *)nffile->buff_ptr;
			// create an extension map for v1 blocks
			if ( v1_map_done == 0 ) {
				extension_map_t *map = malloc(sizeof(extension_map_t) + 2 * sizeof(uint16_t) );
				if ( ! map ) {
					LogError("malloc() allocation error in %s line %d: %s\n", __FILE__, __LINE__, strerror(errno) );
					exit(255);
				}
				map->type 	= ExtensionMapType;
				map->size 	= sizeof(extension_map_t) + 2 * sizeof(uint16_t);
				if (( map->size & 0x3 ) != 0 ) {
					map->size += 4 - ( map->size & 0x3 );
				}

				map->map_id = INIT_ID;

				map->ex_id[0]  = EX_IO_SNMP_2;
				map->ex_id[1]  = EX_AS_2;
				map->ex_id[2]  = 0;

				map->extension_size  = 0;

				Insert_Extension_Map(&extension_map_list, map);

				v1_map_done = 1;
			}

			// convert the records to v2
			for ( i=0; i < nffile->block_header->NumRecords; i++ ) {
				common_record_t *v2_record = (common_record_t *)v1_record;
				Convert_v1_to_v2((void *)v1_record);
				// now we have a v2 record -> use size of v2_record->size
				v1_record = (common_record_v1_t *)((pointer_addr_t)v1_record + v2_record->size);
			}
			nffile->block_header->id = DATA_BLOCK_TYPE_2;
		}
#endif

		if ( nffile->block_header->id == Large_BLOCK_Type ) {
			// skip
			continue;
		}

		if ( nffile->block_header->id != DATA_BLOCK_TYPE_2 ) {
			fprintf(stderr, "Can't process block type %u. Skip block.\n", nffile->block_header->id);
			continue;
		}

		flow_record = nffile->buff_ptr;
		for ( i=0; i < nffile->block_header->NumRecords; i++ ) {
			char        string[1024];

			switch ( flow_record->type ) {
				case CommonRecordType: {
					uint32_t map_id = flow_record->ext_map;
					generic_exporter_t *exp_info = exporter_list[flow_record->exporter_sysid];
					if ( extension_map_list.slot[map_id] == NULL ) {
						snprintf(string, 1024, "Corrupt data file! No such extension map id: %u. Skip record", flow_record->ext_map );
						string[1023] = '\0';
					} else {
						ExpandRecord_v2( flow_record, extension_map_list.slot[flow_record->ext_map], 
							exp_info ? &(exp_info->info) : NULL, &master_record);

						// update number of flows matching a given map
						extension_map_list.slot[map_id]->ref_count++;
			
						/* 
			 			* insert hier your calls to your processing routine 
			 			* master_record now contains the next flow record as specified in nffile.c
			 			* for example you can print each record:
			 			*
			 			*/
						print_record(&master_record, string);
					}
					printf("%s\n", string);
	
					} break;
				case ExtensionMapType: {
					extension_map_t *map = (extension_map_t *)flow_record;

					if ( Insert_Extension_Map(&extension_map_list, map) ) {
					 	// flush new map
					} // else map already known and flushed

					} break;
				case ExporterRecordType:
				case SamplerRecordype:
						// Silently skip exporter records
					break;
				default: {
					fprintf(stderr, "Skip unknown record type %i\n", flow_record->type);
				}
			}

			// Advance pointer by number of bytes for netflow record
			flow_record = (common_record_t *)((pointer_addr_t)flow_record + flow_record->size);	

		} // for all records

	} // while

	CloseFile(nffile);
	DisposeFile(nffile);

	PackExtensionMapList(&extension_map_list);

} // End of process_data
Exemple #11
0
static void process_data(profile_channel_info_t *channels, unsigned int num_channels, time_t tslot, int do_xstat) {
common_record_t	*flow_record;
nffile_t		*nffile;
FilterEngine_data_t	*engine;
int 		i, j, done, ret ;
#ifdef COMPAT15
int	v1_map_done = 0;
#endif

	nffile = GetNextFile(NULL, 0, 0);
	if ( !nffile ) {
		LogError("GetNextFile() error in %s line %d: %s\n", __FILE__, __LINE__, strerror(errno) );
		return;
	}
	if ( nffile == EMPTY_LIST ) {
		LogError("Empty file list. No files to process\n");
		return;
	}

	// store infos away for later use
	// although multiple files may be processed, it is assumed that all 
	// have the same settings
	is_anonymized = IP_ANONYMIZED(nffile);
	strncpy(Ident, nffile->file_header->ident, IDENTLEN);
	Ident[IDENTLEN-1] = '\0';

	done = 0;
	while ( !done ) {

		// get next data block from file
		ret = ReadBlock(nffile);

		switch (ret) {
			case NF_CORRUPT:
			case NF_ERROR:
				if ( ret == NF_CORRUPT ) 
					LogError("Skip corrupt data file '%s'\n",GetCurrentFilename());
				else 
					LogError("Read error in file '%s': %s\n",GetCurrentFilename(), strerror(errno) );
				// fall through - get next file in chain
			case NF_EOF: {
				nffile_t *next = GetNextFile(nffile, 0, 0);
				if ( next == EMPTY_LIST ) {
					done = 1;
				}
				if ( next == NULL ) {
					done = 1;
					LogError("Unexpected end of file list\n");
				}
				continue;
	
				} break; // not really needed
		}

#ifdef COMPAT15
		if ( nffile->block_header->id == DATA_BLOCK_TYPE_1 ) {
			common_record_v1_t *v1_record = (common_record_v1_t *)nffile->buff_ptr;
			// create an extension map for v1 blocks
			if ( v1_map_done == 0 ) {
				extension_map_t *map = malloc(sizeof(extension_map_t) + 2 * sizeof(uint16_t) );
				if ( ! map ) {
					LogError("malloc() allocation error in %s line %d: %s\n", __FILE__, __LINE__, strerror(errno) );
					exit(255);
				}
				map->type 	= ExtensionMapType;
				map->size 	= sizeof(extension_map_t) + 2 * sizeof(uint16_t);
				if (( map->size & 0x3 ) != 0 ) {
					map->size += 4 - ( map->size & 0x3 );
				}
				
				map->map_id = INIT_ID;
				map->ex_id[0]  = EX_IO_SNMP_2;
				map->ex_id[1]  = EX_AS_2;
				map->ex_id[2]  = 0;

				map->extension_size  = 0;
				map->extension_size += extension_descriptor[EX_IO_SNMP_2].size;
				map->extension_size += extension_descriptor[EX_AS_2].size;
				
				if ( Insert_Extension_Map(extension_map_list, map) ) {
					int j;
					for ( j=0; j < num_channels; j++ ) {
						if ( channels[j].nffile != NULL) {
							// flush new map
							AppendToBuffer(channels[j].nffile, (void *)map, map->size);
						}
					}
				} // else map already known and flushed
			
				v1_map_done = 1;
			}

			// convert the records to v2
			for ( i=0; i < nffile->block_header->NumRecords; i++ ) {
				common_record_t *v2_record = (common_record_t *)v1_record;
				Convert_v1_to_v2((void *)v1_record);
				// now we have a v2 record -> use size of v2_record->size
				v1_record = (common_record_v1_t *)((pointer_addr_t)v1_record + v2_record->size);
			}
			nffile->block_header->id = DATA_BLOCK_TYPE_2;
		}
#endif

		if ( nffile->block_header->id == Large_BLOCK_Type ) {
			// skip
			continue;
		}

		if ( nffile->block_header->id != DATA_BLOCK_TYPE_2 ) {
			LogError("Can't process block type %u. Skip block.\n", nffile->block_header->id);
			continue;
		}

		flow_record = nffile->buff_ptr;
		for ( i=0; i < nffile->block_header->NumRecords; i++ ) {
			switch ( flow_record->type ) { 
					case CommonRecordType: {
					generic_exporter_t *exp_info = exporter_list[flow_record->exporter_sysid];
					uint32_t map_id = flow_record->ext_map;
					master_record_t	*master_record;

					if ( extension_map_list->slot[map_id] == NULL ) {
						LogError("Corrupt data file. Missing extension map %u. Skip record.\n", flow_record->ext_map);
						flow_record = (common_record_t *)((pointer_addr_t)flow_record + flow_record->size);	
						continue;
					} 
	
					master_record = &(extension_map_list->slot[map_id]->master_record);
					ExpandRecord_v2( flow_record, extension_map_list->slot[flow_record->ext_map], 
						exp_info ? &(exp_info->info) : NULL, master_record);

					for ( j=0; j < num_channels; j++ ) {
						int match;
	
						// apply profile filter
						(channels[j].engine)->nfrecord 	= (uint64_t *)master_record;
						engine = channels[j].engine;
						match = (*engine->FilterEngine)(engine);
	
						// if profile filter failed -> next profile
						if ( !match )
							continue;
	
						// filter was successful -> continue record processing
	
						// update statistics
						UpdateStat(&channels[j].stat_record, master_record);
						if ( channels[j].nffile ) 
							UpdateStat(channels[j].nffile->stat_record, master_record);
	
						if ( channels[j].xstat ) 
							UpdateXStat(channels[j].xstat, master_record);
	
						// do we need to write data to new file - shadow profiles do not have files.
						// check if we need to flush the output buffer
						if ( channels[j].nffile != NULL ) {
							// write record to output buffer
							AppendToBuffer(channels[j].nffile, (void *)flow_record, flow_record->size);
						} 
	
					} // End of for all channels
	
					} break;
				case ExtensionMapType: {
					extension_map_t *map = (extension_map_t *)flow_record;
	
					if ( Insert_Extension_Map(extension_map_list, map) ) {
						int j;
						for ( j=0; j < num_channels; j++ ) {
							if ( channels[j].nffile != NULL ) {
								// flush new map
								AppendToBuffer(channels[j].nffile, (void *)map, map->size);
							}
						}
					} // else map already known and flushed
	
					} break; 
				case ExporterInfoRecordType: {
					int ret = AddExporterInfo((exporter_info_record_t *)flow_record);
					if ( ret != 0 ) {
						int j;
						for ( j=0; j < num_channels; j++ ) {
							if ( channels[j].nffile != NULL && ret == 1) {
								// flush new exporter
								AppendToBuffer(channels[j].nffile, (void *)flow_record, flow_record->size);
							}
						}
					} else {
						LogError("Failed to add Exporter Record\n");
					}
					} break;
				case SamplerInfoRecordype: {
					int ret = AddSamplerInfo((sampler_info_record_t *)flow_record);
					if ( ret != 0 ) {
						int j;
						for ( j=0; j < num_channels; j++ ) {
							if ( channels[j].nffile != NULL && ret == 1 ) {
								// flush new map
								AppendToBuffer(channels[j].nffile, (void *)flow_record, flow_record->size);
							}
						}
					} else {
						LogError("Failed to add Sampler Record\n");
					}
					} break;
				case ExporterRecordType:
				case SamplerRecordype:
				case ExporterStatRecordType:
						// Silently skip exporter records
					break;
				default:  {
					LogError("Skip unknown record type %i\n", flow_record->type);
				}
			}
			// Advance pointer by number of bytes for netflow record
			flow_record = (common_record_t *)((pointer_addr_t)flow_record + flow_record->size);

		} // End of for all umRecords
	} // End of while !done

	// do we need to write data to new file - shadow profiles do not have files.
	for ( j=0; j < num_channels; j++ ) {
		if ( channels[j].nffile != NULL ) {
			// flush output buffer
			if ( channels[j].nffile->block_header->NumRecords ) {
				if ( WriteBlock(channels[j].nffile) <= 0 ) {
					LogError("Failed to write output buffer to disk: '%s'" , strerror(errno));
				} 
			} 
		}
	}
	CloseFile(nffile);
	DisposeFile(nffile);

} // End of process_data
Exemple #12
0
int flows2nfdump(struct ftio *ftio, char *wfile, int compress, extension_info_t *extension_info, int extended, uint32_t limitflows) {
// required flow tools variables
struct fttime 		ftt;
struct fts3rec_offsets fo;
struct ftver 		ftv;
char				*rec;
// nfdump variables
nffile_t			*nffile;
master_record_t	 record;
char				*s;
uint32_t			cnt;

	s = "flow-tools";
	nffile = OpenNewFile( wfile , NULL, compress, 0, s);
	if ( !nffile ) {
		fprintf(stderr, "%s\n", s);
		return 1;
	}

	AppendToBuffer(nffile, (void *)extension_info->map, extension_info->map->size);
	
	ftio_get_ver(ftio, &ftv);
	fts3rec_compute_offsets(&fo, &ftv);

	memset((void *)&record, 0, sizeof(record));
	record.map_ref 		  = extension_info->map;
	record.type 		  = CommonRecordType;
	record.exporter_sysid = 0;

	// only v4 addresses
	ClearFlag(record.flags, FLAG_IPV6_ADDR);

	cnt = 0;
	while ((rec = ftio_read(ftio))) {
		uint32_t when, unix_secs, unix_nsecs, sysUpTime;
		int i, id;

		unix_secs  = *((uint32_t*)(rec+fo.unix_secs));
		unix_nsecs = *((uint32_t*)(rec+fo.unix_nsecs));
		sysUpTime  = *((uint32_t*)(rec+fo.sysUpTime));

		when	   = *((uint32_t*)(rec+fo.First));
		ftt = ftltime(sysUpTime, unix_secs, unix_nsecs, when);
		record.first 		= ftt.secs;
		record.msec_first 	= ftt.msecs;
	
		when	   = *((uint32_t*)(rec+fo.Last));
		ftt = ftltime(sysUpTime, unix_secs, unix_nsecs, when);
		record.last 		= ftt.secs;
		record.msec_last 	= ftt.msecs;
	
		record.v4.srcaddr 	= *((uint32_t*)(rec+fo.srcaddr));
		record.v4.dstaddr 	= *((uint32_t*)(rec+fo.dstaddr));
		record.srcport 		= *((uint16_t*)(rec+fo.srcport));
		record.dstport 		= *((uint16_t*)(rec+fo.dstport));

		record.prot 		= *((uint8_t*)(rec+fo.prot));
		record.tcp_flags	= *((uint8_t*)(rec+fo.tcp_flags));
		record.tos 			= *((uint8_t*)(rec+fo.tos));

		record.dOctets 		= *((uint32_t*)(rec+fo.dOctets));
		record.dPkts 		= *((uint32_t*)(rec+fo.dPkts));

		i = 0;
		while ( (id = extension_info->map->ex_id[i]) != 0 ) {
			switch (id) {
				case EX_IO_SNMP_2:
					record.input 		= *((uint16_t*)(rec+fo.input));
					record.output 		= *((uint16_t*)(rec+fo.output));
					break;
				case EX_AS_2:
					record.srcas 		= *((uint16_t*)(rec+fo.src_as));
					record.dstas 		= *((uint16_t*)(rec+fo.dst_as));
					break;
				case EX_MULIPLE:
    				record.src_mask 	= *((uint8_t*)(rec+fo.src_mask));
    				record.dst_mask 	= *((uint8_t*)(rec+fo.dst_mask));
					record.dir			= 0;
					record.dst_tos  	= 0;
					break;
				case EX_ROUTER_IP_v4:
					record.ip_nexthop.v4 = *((uint32_t*)(rec+fo.peer_nexthop));
					break;
				case EX_NEXT_HOP_v4:
					record.ip_router.v4 = *((uint32_t*)(rec+fo.router_sc));
					break;
				case EX_ROUTER_ID:
					record.engine_type = *((uint8_t*)(rec+fo.engine_type));
					record.engine_id   = *((uint8_t*)(rec+fo.engine_id));
					break;
				// default: Other extensions can not be sent with v5
			}
			i++;
		}

		PackRecord(&record, nffile);

		if ( extended ) {
			char *string;
			format_file_block_record(&record, &string, 0);
			fprintf(stderr, "%s\n", string);
		} 

		cnt++;
		if ( cnt == limitflows )
			break;

	} /* while */

	// write the last records in buffer
	if ( nffile->block_header->NumRecords ) {
		if ( WriteBlock(nffile) <= 0 ) {
			fprintf(stderr, "Failed to write output buffer: '%s'" , strerror(errno));
		} 
	}

	free((void *)extension_info->map);
	free((void *)extension_info);
	DisposeFile(nffile);

	return 0;

} // End of flows2nfdump