/* status of read and fill pre-prepared structure lnf_rec */ int lnf_read(lnf_file_t *lnf_file, lnf_rec_t *lnf_rec) { //master_record_t *master_record; int ret; uint32_t map_id; extension_map_t *map; int i; common_record_t *common_record_ptr; #ifdef COMPAT15 int v1_map_done = 0; #endif begin: if (lnf_file->blk_record_remains == 0) { /* all records in block have been processed, we are going to load nex block */ // get next data block from file if (lnf_file->nffile) { ret = ReadBlock(lnf_file->nffile); lnf_file->processed_blocks++; } else { ret = NF_EOF; /* the firt file in the list */ } switch (ret) { case NF_CORRUPT: return LNF_ERR_CORRUPT; case NF_ERROR: return LNF_ERR_READ; case NF_EOF: return LNF_EOF; default: // successfully read block lnf_file->processed_bytes += ret; } /* block types to be skipped -> goto begin */ /* block types that are unknown -> return */ switch (lnf_file->nffile->block_header->id) { case DATA_BLOCK_TYPE_1: /* old record type - nfdump 1.5 */ lnf_file->skipped_blocks++; goto begin; return LNF_ERR_COMPAT15; break; case DATA_BLOCK_TYPE_2: /* common record type - normally processed */ break; case Large_BLOCK_Type: lnf_file->skipped_blocks++; goto begin; break; default: lnf_file->skipped_blocks++; return LNF_ERR_UNKBLOCK; } lnf_file->flow_record = lnf_file->nffile->buff_ptr; lnf_file->blk_record_remains = lnf_file->nffile->block_header->NumRecords; } /* reading block */ /* there are some records to process - we are going continue reading next record */ lnf_file->blk_record_remains--; switch (lnf_file->flow_record->type) { case ExporterRecordType: case SamplerRecordype: case ExporterInfoRecordType: case ExporterStatRecordType: case SamplerInfoRecordype: /* just skip */ FLOW_RECORD_NEXT(lnf_file->flow_record); goto begin; break; case ExtensionMapType: map = (extension_map_t *)lnf_file->flow_record; //Insert_Extension_Map(&instance->extension_map_list, map); Insert_Extension_Map(lnf_file->extension_map_list, map); FLOW_RECORD_NEXT(lnf_file->flow_record); goto begin; break; case CommonRecordV0Type: // convert common record v0 if ( lnf_file->v1convert_buffer == NULL) { lnf_file->v1convert_buffer = malloc(65536); /* very suspisous part of code taken from nfdump */ if ( lnf_file->v1convert_buffer == NULL ) { return LNF_ERR_NOMEM; } } ConvertCommonV0((void *)lnf_file->flow_record, (common_record_t *)lnf_file->v1convert_buffer); common_record_ptr = (common_record_t *)lnf_file->v1convert_buffer; break; case CommonRecordType: /* data record type - go ahead */ common_record_ptr = lnf_file->flow_record; break; default: FLOW_RECORD_NEXT(lnf_file->flow_record); return LNF_ERR_UNKREC; } /* we are sure that record is CommonRecordType */ map_id = lnf_file->flow_record->ext_map; if ( map_id >= MAX_EXTENSION_MAPS ) { FLOW_RECORD_NEXT(lnf_file->flow_record); return LNF_ERR_EXTMAPB; } if ( lnf_file->extension_map_list->slot[map_id] == NULL ) { FLOW_RECORD_NEXT(lnf_file->flow_record); return LNF_ERR_EXTMAPM; } // changed in 1.6.8 - added exporter info // ExpandRecord_v2( flow_record, extension_map_list.slot[map_id], master_record); ExpandRecord_v2(common_record_ptr, lnf_file->extension_map_list->slot[map_id], NULL, lnf_rec->master_record); // update number of flows matching a given map lnf_file->extension_map_list->slot[map_id]->ref_count++; // Move pointer by number of bytes for netflow record FLOW_RECORD_NEXT(lnf_file->flow_record); /* { char *s; PrintExtensionMap(instance->extension_map_list.slot[map_id]->map); format_file_block_record(master_record, &s, 0); printf("READ: %s\n", s); } */ // processing map //bit_array_clear(&lnf_file->extensions_arr); bit_array_clear(lnf_rec->extensions_arr); i = 0; while (lnf_rec->master_record->map_ref->ex_id[i]) { __bit_array_set(lnf_rec->extensions_arr, lnf_rec->master_record->map_ref->ex_id[i], 1); i++; } // lnf_rec->extensions_arr = &(lnf_file->extensions_arr); /* the record seems OK. We prepare hash reference with items */ // lnf_file->lnf_rec = lnf_rec; /* XXX temporary */ return LNF_OK; } /* end of _readfnction */
stat_record_t process_data(char *wfile, int element_stat, int flow_stat, int sort_flows, printer_t print_header, printer_t print_record, time_t twin_start, time_t twin_end, uint64_t limitflows, int tag, int compress, int do_xstat) { common_record_t *flow_record, *record_ptr; master_record_t *master_record; nffile_t *nffile_w, *nffile_r; xstat_t *xstat; stat_record_t stat_record; int done, write_file; #ifdef COMPAT15 int v1_map_done = 0; #endif // time window of all matched flows memset((void *)&stat_record, 0, sizeof(stat_record_t)); stat_record.first_seen = 0x7fffffff; stat_record.msec_first = 999; // Do the logic first // do not print flows when doing any stats are sorting if ( sort_flows || flow_stat || element_stat ) { print_record = NULL; } // do not write flows to file, when doing any stats // -w may apply for flow_stats later write_file = !(sort_flows || flow_stat || element_stat) && wfile; nffile_r = NULL; nffile_w = NULL; xstat = NULL; // Get the first file handle nffile_r = GetNextFile(NULL, twin_start, twin_end); if ( !nffile_r ) { LogError("GetNextFile() error in %s line %d: %s\n", __FILE__, __LINE__, strerror(errno) ); return stat_record; } if ( nffile_r == EMPTY_LIST ) { LogError("Empty file list. No files to process\n"); return stat_record; } // preset time window of all processed flows to the stat record in first flow file t_first_flow = nffile_r->stat_record->first_seen; t_last_flow = nffile_r->stat_record->last_seen; // store infos away for later use // although multiple files may be processed, it is assumed that all // have the same settings is_anonymized = IP_ANONYMIZED(nffile_r); strncpy(Ident, nffile_r->file_header->ident, IDENTLEN); Ident[IDENTLEN-1] = '\0'; // prepare output file if requested if ( write_file ) { nffile_w = OpenNewFile(wfile, NULL, compress, IP_ANONYMIZED(nffile_r), NULL ); if ( !nffile_w ) { if ( nffile_r ) { CloseFile(nffile_r); DisposeFile(nffile_r); } return stat_record; } if ( do_xstat ) { xstat = InitXStat(nffile_w); if ( !xstat ) { if ( nffile_r ) { CloseFile(nffile_r); DisposeFile(nffile_r); } return stat_record; } } } // setup Filter Engine to point to master_record, as any record read from file // is expanded into this record // Engine->nfrecord = (uint64_t *)master_record; done = 0; while ( !done ) { int i, ret; char *ConvertBuffer = NULL; // get next data block from file ret = ReadBlock(nffile_r); switch (ret) { case NF_CORRUPT: case NF_ERROR: if ( ret == NF_CORRUPT ) LogError("Skip corrupt data file '%s'\n",GetCurrentFilename()); else LogError("Read error in file '%s': %s\n",GetCurrentFilename(), strerror(errno) ); // fall through - get next file in chain case NF_EOF: { nffile_t *next = GetNextFile(nffile_r, twin_start, twin_end); if ( next == EMPTY_LIST ) { done = 1; } else if ( next == NULL ) { done = 1; LogError("Unexpected end of file list\n"); } else { // Update global time span window if ( next->stat_record->first_seen < t_first_flow ) t_first_flow = next->stat_record->first_seen; if ( next->stat_record->last_seen > t_last_flow ) t_last_flow = next->stat_record->last_seen; // continue with next file } continue; } break; // not really needed default: // successfully read block total_bytes += ret; } #ifdef COMPAT15 if ( nffile_r->block_header->id == DATA_BLOCK_TYPE_1 ) { common_record_v1_t *v1_record = (common_record_v1_t *)nffile_r->buff_ptr; // create an extension map for v1 blocks if ( v1_map_done == 0 ) { extension_map_t *map = malloc(sizeof(extension_map_t) + 2 * sizeof(uint16_t) ); if ( ! map ) { LogError("malloc() allocation error in %s line %d: %s\n", __FILE__, __LINE__, strerror(errno) ); exit(255); } map->type = ExtensionMapType; map->size = sizeof(extension_map_t) + 2 * sizeof(uint16_t); if (( map->size & 0x3 ) != 0 ) { map->size += 4 - ( map->size & 0x3 ); } map->map_id = INIT_ID; map->ex_id[0] = EX_IO_SNMP_2; map->ex_id[1] = EX_AS_2; map->ex_id[2] = 0; map->extension_size = 0; map->extension_size += extension_descriptor[EX_IO_SNMP_2].size; map->extension_size += extension_descriptor[EX_AS_2].size; if ( Insert_Extension_Map(extension_map_list,map) && write_file ) { // flush new map AppendToBuffer(nffile_w, (void *)map, map->size); } // else map already known and flushed v1_map_done = 1; } // convert the records to v2 for ( i=0; i < nffile_r->block_header->NumRecords; i++ ) { common_record_t *v2_record = (common_record_t *)v1_record; Convert_v1_to_v2((void *)v1_record); // now we have a v2 record -> use size of v2_record->size v1_record = (common_record_v1_t *)((pointer_addr_t)v1_record + v2_record->size); } nffile_r->block_header->id = DATA_BLOCK_TYPE_2; } #endif if ( nffile_r->block_header->id == Large_BLOCK_Type ) { // skip printf("Xstat block skipped ...\n"); continue; } if ( nffile_r->block_header->id != DATA_BLOCK_TYPE_2 ) { if ( nffile_r->block_header->id == DATA_BLOCK_TYPE_1 ) { LogError("Can't process nfdump 1.5.x block type 1. Add --enable-compat15 to compile compatibility code. Skip block.\n"); } else { LogError("Can't process block type %u. Skip block.\n", nffile_r->block_header->id); } skipped_blocks++; continue; } record_ptr = nffile_r->buff_ptr; for ( i=0; i < nffile_r->block_header->NumRecords; i++ ) { flow_record = record_ptr; switch ( record_ptr->type ) { case CommonRecordV0Type: // convert common record v0 if ( !ConvertBuffer ) { ConvertBuffer = malloc(65536); if ( !ConvertBuffer ) { LogError("malloc() error in %s line %d: %s\n", __FILE__, __LINE__, strerror(errno) ); exit(255); } } ConvertCommonV0((void *)record_ptr, (common_record_t *)ConvertBuffer); flow_record = (common_record_t *)ConvertBuffer; dbg_printf("Converted type %u to %u record\n", CommonRecordV0Type, CommonRecordType); case CommonRecordType: { int match; uint32_t map_id; generic_exporter_t *exp_info; // valid flow_record converted if needed map_id = flow_record->ext_map; exp_info = exporter_list[flow_record->exporter_sysid]; if ( map_id >= MAX_EXTENSION_MAPS ) { LogError("Corrupt data file. Extension map id %u too big.\n", flow_record->ext_map); exit(255); } if ( extension_map_list->slot[map_id] == NULL ) { LogError("Corrupt data file. Missing extension map %u. Skip record.\n", flow_record->ext_map); record_ptr = (common_record_t *)((pointer_addr_t)record_ptr + record_ptr->size); continue; } total_flows++; master_record = &(extension_map_list->slot[map_id]->master_record); Engine->nfrecord = (uint64_t *)master_record; ExpandRecord_v2( flow_record, extension_map_list->slot[map_id], exp_info ? &(exp_info->info) : NULL, master_record); // Time based filter // if no time filter is given, the result is always true match = twin_start && (master_record->first < twin_start || master_record->last > twin_end) ? 0 : 1; match &= limitflows ? stat_record.numflows < limitflows : 1; // filter netflow record with user supplied filter if ( match ) match = (*Engine->FilterEngine)(Engine); if ( match == 0 ) { // record failed to pass all filters // increment pointer by number of bytes for netflow record record_ptr = (common_record_t *)((pointer_addr_t)record_ptr + record_ptr->size); // go to next record continue; } // Records passed filter -> continue record processing // Update statistics UpdateStat(&stat_record, master_record); // update number of flows matching a given map extension_map_list->slot[map_id]->ref_count++; if ( flow_stat ) { AddFlow(flow_record, master_record, extension_map_list->slot[map_id]); if ( element_stat ) { AddStat(flow_record, master_record); } } else if ( element_stat ) { AddStat(flow_record, master_record); } else if ( sort_flows ) { InsertFlow(flow_record, master_record, extension_map_list->slot[map_id]); } else { if ( write_file ) { AppendToBuffer(nffile_w, (void *)flow_record, flow_record->size); if ( xstat ) UpdateXStat(xstat, master_record); } else if ( print_record ) { char *string; // if we need to print out this record print_record(master_record, &string, tag); if ( string ) { if ( limitflows ) { if ( (stat_record.numflows <= limitflows) ) printf("%s\n", string); } else printf("%s\n", string); } } else { // mutually exclusive conditions should prevent executing this code // this is buggy! printf("Bug! - this code should never get executed in file %s line %d\n", __FILE__, __LINE__); } } // sort_flows - else } break; case ExtensionMapType: { extension_map_t *map = (extension_map_t *)record_ptr; if ( Insert_Extension_Map(extension_map_list, map) && write_file ) { // flush new map AppendToBuffer(nffile_w, (void *)map, map->size); } // else map already known and flushed } break; case ExporterRecordType: case SamplerRecordype: // Silently skip exporter records break; case ExporterInfoRecordType: { int ret = AddExporterInfo((exporter_info_record_t *)record_ptr); if ( ret != 0 ) { if ( write_file && ret == 1 ) AppendToBuffer(nffile_w, (void *)record_ptr, record_ptr->size); } else { LogError("Failed to add Exporter Record\n"); } } break; case ExporterStatRecordType: AddExporterStat((exporter_stats_record_t *)record_ptr); break; case SamplerInfoRecordype: { int ret = AddSamplerInfo((sampler_info_record_t *)record_ptr); if ( ret != 0 ) { if ( write_file && ret == 1 ) AppendToBuffer(nffile_w, (void *)flow_record, flow_record->size); } else { LogError("Failed to add Sampler Record\n"); } } break; default: { LogError("Skip unknown record type %i\n", record_ptr->type); } } // Advance pointer by number of bytes for netflow record record_ptr = (common_record_t *)((pointer_addr_t)record_ptr + record_ptr->size); } // for all records // check if we are done, due to -c option if ( limitflows ) done = stat_record.numflows >= limitflows; } // while CloseFile(nffile_r); // flush output file if ( write_file ) { // flush current buffer to disc if ( nffile_w->block_header->NumRecords ) { if ( WriteBlock(nffile_w) <= 0 ) { LogError("Failed to write output buffer to disk: '%s'" , strerror(errno)); } } if ( xstat ) { if ( WriteExtraBlock(nffile_w, xstat->block_header ) <= 0 ) { LogError("Failed to write xstat buffer to disk: '%s'" , strerror(errno)); } } /* Stat info */ if ( write_file ) { /* Copy stat info and close file */ memcpy((void *)nffile_w->stat_record, (void *)&stat_record, sizeof(stat_record_t)); CloseUpdateFile(nffile_w, nffile_r->file_header->ident ); nffile_w = DisposeFile(nffile_w); } // else stdout } PackExtensionMapList(extension_map_list); DisposeFile(nffile_r); return stat_record; } // End of process_data