コード例 #1
0
ファイル: nfreplay.c プロジェクト: phaag/nfdump
static void send_data(char *rfile, time_t twin_start, 
			time_t twin_end, uint32_t count, unsigned int delay, int confirm, int netflow_version) {
master_record_t	master_record;
common_record_t	*flow_record;
nffile_t		*nffile;
int 			i, done, ret, again;
uint32_t		numflows, cnt;

#ifdef COMPAT15
int	v1_map_done = 0;
#endif
	
	// Get the first file handle
	nffile = GetNextFile(NULL, twin_start, twin_end);
	if ( !nffile ) {
		LogError("GetNextFile() error in %s line %d: %s\n", __FILE__, __LINE__, strerror(errno) );
		return;
	}
	if ( nffile == EMPTY_LIST ) {
		LogError("Empty file list. No files to process\n");
		return;
	}

	peer.send_buffer   	= malloc(UDP_PACKET_SIZE);
	peer.flush			= 0;
	if ( !peer.send_buffer ) {
		LogError("malloc() error in %s line %d: %s\n", __FILE__, __LINE__, strerror(errno) );
		CloseFile(nffile);
		DisposeFile(nffile);
		return;
	}
	peer.buff_ptr = peer.send_buffer;
	peer.endp  	  = (void *)((pointer_addr_t)peer.send_buffer + UDP_PACKET_SIZE - 1);

	if ( netflow_version == 5 ) 
		Init_v5_v7_output(&peer);
	else 
		Init_v9_output(&peer);

	numflows	= 0;
	done	 	= 0;

	// setup Filter Engine to point to master_record, as any record read from file
	// is expanded into this record
	Engine->nfrecord = (uint64_t *)&master_record;

	cnt = 0;
	while ( !done ) {
		// get next data block from file
		ret = ReadBlock(nffile);

		switch (ret) {
			case NF_CORRUPT:
			case NF_ERROR:
				if ( ret == NF_CORRUPT ) 
					LogError("Skip corrupt data file '%s'\n",GetCurrentFilename());
				else 
					LogError("Read error in file '%s': %s\n",GetCurrentFilename(), strerror(errno) );
				// fall through - get next file in chain
			case NF_EOF: {
				nffile_t *next = GetNextFile(nffile, twin_start, twin_end);
				if ( next == EMPTY_LIST ) {
					done = 1;
				}
				if ( next == NULL ) {
					done = 1;
					LogError("Unexpected end of file list\n");
				}
				// else continue with next file
				continue;
	
				} break; // not really needed
		}

#ifdef COMPAT15
		if ( nffile->block_header->id == DATA_BLOCK_TYPE_1 ) {
			common_record_v1_t *v1_record = (common_record_v1_t *)nffile->buff_ptr;
			// create an extension map for v1 blocks
			if ( v1_map_done == 0 ) {
				extension_map_t *map = malloc(sizeof(extension_map_t) + 2 * sizeof(uint16_t) );
				if ( ! map ) {
					perror("Memory allocation error");
					exit(255);
				}
				map->type 	= ExtensionMapType;
				map->size 	= sizeof(extension_map_t) + 2 * sizeof(uint16_t);
				if (( map->size & 0x3 ) != 0 ) {
					map->size += 4 - ( map->size & 0x3 );
				}
				map->map_id = INIT_ID;
				map->ex_id[0]  = EX_IO_SNMP_2;
				map->ex_id[1]  = EX_AS_2;
				map->ex_id[2]  = 0;
				
				map->extension_size  = 0;
				map->extension_size += extension_descriptor[EX_IO_SNMP_2].size;
				map->extension_size += extension_descriptor[EX_AS_2].size;
					
				Insert_Extension_Map(extension_map_list, map);
				v1_map_done = 1;
			}

			// convert the records to v2
			for ( i=0; i < nffile->block_header->NumRecords; i++ ) {
				common_record_t *v2_record = (common_record_t *)v1_record;
				Convert_v1_to_v2((void *)v1_record);
				// now we have a v2 record -> use size of v2_record->size
				v1_record = (common_record_v1_t *)((pointer_addr_t)v1_record + v2_record->size);
			}
			nffile->block_header->id = DATA_BLOCK_TYPE_2;
		}
#endif

		if ( nffile->block_header->id != DATA_BLOCK_TYPE_2 ) {
			LogError("Can't process block type %u. Skip block.\n", nffile->block_header->id);
			continue;
		}

		// cnt is the number of blocks, which survived the filter
		// and added to the output buffer
		flow_record = nffile->buff_ptr;

		for ( i=0; i < nffile->block_header->NumRecords; i++ ) {
			int match;

			switch ( flow_record->type ) {
				case CommonRecordType: {
					if ( extension_map_list->slot[flow_record->ext_map] == NULL ) {
						LogError("Corrupt data file. Missing extension map %u. Skip record.\n", flow_record->ext_map);
						flow_record = (common_record_t *)((pointer_addr_t)flow_record + flow_record->size);	
						continue;
					} 

					// if no filter is given, the result is always true
					ExpandRecord_v2( flow_record, extension_map_list->slot[flow_record->ext_map], NULL, &master_record);

					match = twin_start && (master_record.first < twin_start || master_record.last > twin_end) ? 0 : 1;

					// filter netflow record with user supplied filter
					if ( match ) 
						match = (*Engine->FilterEngine)(Engine);
	
					if ( match == 0 ) { // record failed to pass all filters
						// increment pointer by number of bytes for netflow record
						flow_record = (common_record_t *)((pointer_addr_t)flow_record + flow_record->size);	
						// go to next record
						continue;
					}
					// Records passed filter -> continue record processing

					if ( netflow_version == 5 ) 
						again = Add_v5_output_record(&master_record, &peer);
					else
						again = Add_v9_output_record(&master_record, &peer);
	
					cnt++;
					numflows++;

					if ( peer.flush ) {
						ret = FlushBuffer(confirm);
	
						if ( ret < 0 ) {
							perror("Error sending data");
							CloseFile(nffile);
							DisposeFile(nffile);
							return;
						}
			
						if ( delay ) {
							// sleep as specified
							usleep(delay);
						}
						cnt = 0;
					}
	
					if ( again ) {
						if ( netflow_version == 5 ) 
							Add_v5_output_record(&master_record, &peer);
						else
							Add_v9_output_record(&master_record, &peer);
						cnt++;
					}

					} break;
				case ExtensionMapType: {
					extension_map_t *map = (extension_map_t *)flow_record;
	
					if ( Insert_Extension_Map(extension_map_list, map) ) {
						// flush new map
						
					} // else map already known and flushed
	
					} break;
				case ExporterRecordType:
				case SamplerRecordype:
				case ExporterInfoRecordType:
				case ExporterStatRecordType:
				case SamplerInfoRecordype:
						// Silently skip exporter/sampler records
					break;
			 	default: {
					LogError("Skip unknown record type %i\n", flow_record->type);
				}
			}
			// Advance pointer by number of bytes for netflow record
			flow_record = (common_record_t *)((pointer_addr_t)flow_record + flow_record->size);	

		}
	} // while

	// flush still remaining records
	if ( cnt ) {
		ret = FlushBuffer(confirm);

		if ( ret < 0 ) {
			perror("Error sending data");
		}

	} // if cnt 

	if (nffile) {
		CloseFile(nffile);
		DisposeFile(nffile);
	}

	close(peer.sockfd);

	return;

} // End of send_data
コード例 #2
0
ファイル: nfreplay.c プロジェクト: haegardev/libnfdump
static void send_data(char *rfile, time_t twin_start, 
			time_t twin_end, uint32_t count, unsigned int delay, int confirm, int anon, int netflow_version) {
data_block_header_t in_block_header;					
master_record_t		master_record;
common_record_t		*flow_record, *in_buff;
stat_record_t 		*stat_record;
int 		i, rfd, done, ret, again;
uint32_t	numflows, cnt;
char 		*string;

#ifdef COMPAT15
int	v1_map_done = 0;
#endif
	
	rfd = GetNextFile(0, twin_start, twin_end, &stat_record);
	if ( rfd < 0 ) {
		if ( rfd == FILE_ERROR )
			fprintf(stderr, "Can't open file for reading: %s\n", strerror(errno));
		return;
	}

	// prepare read and send buffer
	in_buff = (common_record_t *) malloc(BUFFSIZE);
	peer.send_buffer   	= malloc(UDP_PACKET_SIZE);
	peer.flush			= 0;
	if ( !in_buff || !peer.send_buffer ) {
		perror("Memory allocation error");
		close(rfd);
		return;
	}
	peer.writeto  = peer.send_buffer;
	peer.endp  	  = (void *)((pointer_addr_t)peer.send_buffer + UDP_PACKET_SIZE - 1);

	if ( netflow_version == 5 ) 
		Init_v5_v7_output(&peer);
	else 
		Init_v9_output(&peer);

	numflows	= 0;
	done	 	= 0;

	// setup Filter Engine to point to master_record, as any record read from file
	// is expanded into this record
	Engine->nfrecord = (uint64_t *)&master_record;

	cnt = 0;
	while ( !done ) {
		// get next data block from file
		ret = ReadBlock(rfd, &in_block_header, (void *)in_buff, &string);

		switch (ret) {
			case NF_CORRUPT:
			case NF_ERROR:
				if ( ret == NF_CORRUPT ) 
					fprintf(stderr, "Skip corrupt data file '%s': '%s'\n",GetCurrentFilename(), string);
				else 
					fprintf(stderr, "Read error in file '%s': %s\n",GetCurrentFilename(), strerror(errno) );
				// fall through - get next file in chain
			case NF_EOF:
				rfd = GetNextFile(rfd, twin_start, twin_end, NULL);
				if ( rfd < 0 ) {
					if ( rfd == NF_ERROR )
						fprintf(stderr, "Read error in file '%s': %s\n",GetCurrentFilename(), strerror(errno) );

					// rfd == EMPTY_LIST
					done = 1;
				} // else continue with next file
				continue;
	
				break; // not really needed
		}

#ifdef COMPAT15
		if ( in_block_header.id == DATA_BLOCK_TYPE_1 ) {
			common_record_v1_t *v1_record = (common_record_v1_t *)in_buff;
			// create an extension map for v1 blocks
			if ( v1_map_done == 0 ) {
				extension_map_t *map = malloc(sizeof(extension_map_t) + 2 * sizeof(uint16_t) );
				if ( ! map ) {
					perror("Memory allocation error");
					exit(255);
				}
				map->type 	= ExtensionMapType;
				map->size 	= sizeof(extension_map_t) + 2 * sizeof(uint16_t);
				map->map_id = INIT_ID;
				map->ex_id[0]  = EX_IO_SNMP_2;
				map->ex_id[1]  = EX_AS_2;
				map->ex_id[2]  = 0;
				
				Insert_Extension_Map(&extension_map_list, map);
				v1_map_done = 1;
			}

			// convert the records to v2
			for ( i=0; i < in_block_header.NumRecords; i++ ) {
				common_record_t *v2_record = (common_record_t *)v1_record;
				Convert_v1_to_v2((void *)v1_record);
				// now we have a v2 record -> use size of v2_record->size
				v1_record = (common_record_v1_t *)((pointer_addr_t)v1_record + v2_record->size);
			}
			in_block_header.id = DATA_BLOCK_TYPE_2;
		}
#endif

		if ( in_block_header.id != DATA_BLOCK_TYPE_2 ) {
			fprintf(stderr, "Can't process block type %u. Skip block.\n", in_block_header.id);
			continue;
		}

		// cnt is the number of blocks, which survived the filter
		// and added to the output buffer
		flow_record = in_buff;

		for ( i=0; i < in_block_header.NumRecords; i++ ) {
			int match;

			if ( flow_record->type == CommonRecordType ) {
				if ( extension_map_list.slot[flow_record->ext_map] == NULL ) {
					fprintf(stderr, "Corrupt data file. Missing extension map %u. Skip record.\n", flow_record->ext_map);
					flow_record = (common_record_t *)((pointer_addr_t)flow_record + flow_record->size);	
					continue;
				} 

				// if no filter is given, the result is always true
				ExpandRecord_v2( flow_record, extension_map_list.slot[flow_record->ext_map], &master_record);

				match = twin_start && (master_record.first < twin_start || master_record.last > twin_end) ? 0 : 1;

				// filter netflow record with user supplied filter
				if ( match ) 
					match = (*Engine->FilterEngine)(Engine);

				if ( match == 0 ) { // record failed to pass all filters
					// increment pointer by number of bytes for netflow record
					flow_record = (common_record_t *)((pointer_addr_t)flow_record + flow_record->size);	
					// go to next record
					continue;
				}
				// Records passed filter -> continue record processing

				if ( anon ) {
					if ( (flow_record->flags & FLAG_IPV6_ADDR ) == 0 ) {
						master_record.v4.srcaddr = anonymize(master_record.v4.srcaddr);
						master_record.v4.dstaddr = anonymize(master_record.v4.dstaddr);
					} else {
						uint64_t	anon_ip[2];
						anonymize_v6(master_record.v6.srcaddr, anon_ip);
						master_record.v6.srcaddr[0] = anon_ip[0];
						master_record.v6.srcaddr[1] = anon_ip[1];
	
						anonymize_v6(master_record.v6.dstaddr, anon_ip);
						master_record.v6.dstaddr[0] = anon_ip[0];
						master_record.v6.dstaddr[1] = anon_ip[1];
					}
				}

				if ( netflow_version == 5 ) 
					again = Add_v5_output_record(&master_record, &peer);
				else
					again = Add_v9_output_record(&master_record, &peer);

				cnt++;
				numflows++;

				if ( peer.flush ) {
					ret = FlushBuffer(confirm);
	
					if ( ret < 0 ) {
						perror("Error sending data");
						close(rfd);
						return;
					}
		
					if ( delay ) {
						// sleep as specified
						usleep(delay);
					}
					cnt = 0;
				}

				if ( again ) {
					if ( netflow_version == 5 ) 
						Add_v5_output_record(&master_record, &peer);
					else
						Add_v9_output_record(&master_record, &peer);
					cnt++;
				}

			} else if ( flow_record->type == ExtensionMapType ) {
				extension_map_t *map = (extension_map_t *)flow_record;

				if ( Insert_Extension_Map(&extension_map_list, map) ) {
					// flush new map
					
				} // else map already known and flushed

			} else {
				fprintf(stderr, "Skip unknown record type %i\n", flow_record->type);
			}

			// Advance pointer by number of bytes for netflow record
			flow_record = (common_record_t *)((pointer_addr_t)flow_record + flow_record->size);	

		}
	} // while

	// flush still remaining records
	if ( cnt ) {
		ret = FlushBuffer(confirm);

		if ( ret < 0 ) {
			perror("Error sending data");
		}

	} // if cnt 

	if ( rfd ) 
		close(rfd);

	close(peer.sockfd);

	return;

} // End of send_data