void WindowsHost::PrepareShutdown() { symbolMap.SaveSymbolMap(SymbolMapFilename(GetCurrentFilename()).c_str()); }
stat_record_t process_data(char *wfile, int element_stat, int flow_stat, int sort_flows, printer_t print_header, printer_t print_record, time_t twin_start, time_t twin_end, uint64_t limitflows, int tag, int compress, int do_xstat) { common_record_t *flow_record; master_record_t *master_record; nffile_t *nffile_w, *nffile_r; xstat_t *xstat; stat_record_t stat_record; int done, write_file; #ifdef COMPAT15 int v1_map_done = 0; #endif // time window of all matched flows memset((void *)&stat_record, 0, sizeof(stat_record_t)); stat_record.first_seen = 0x7fffffff; stat_record.msec_first = 999; // Do the logic first // print flows later, when all records are processed and sorted // flow limits apply at that time if ( sort_flows ) { print_record = NULL; limitflows = 0; } // do not print flows when doing any stats if ( flow_stat || element_stat ) { print_record = NULL; limitflows = 0; } // do not write flows to file, when doing any stats // -w may apply for flow_stats later write_file = !(sort_flows || flow_stat || element_stat) && wfile; nffile_r = NULL; nffile_w = NULL; xstat = NULL; // Get the first file handle nffile_r = GetNextFile(NULL, twin_start, twin_end); if ( !nffile_r ) { LogError("GetNextFile() error in %s line %d: %s\n", __FILE__, __LINE__, strerror(errno) ); return stat_record; } if ( nffile_r == EMPTY_LIST ) { LogError("Empty file list. No files to process\n"); return stat_record; } // preset time window of all processed flows to the stat record in first flow file t_first_flow = nffile_r->stat_record->first_seen; t_last_flow = nffile_r->stat_record->last_seen; // store infos away for later use // although multiple files may be processed, it is assumed that all // have the same settings is_anonymized = IP_ANONYMIZED(nffile_r); strncpy(Ident, nffile_r->file_header->ident, IDENTLEN); Ident[IDENTLEN-1] = '\0'; // prepare output file if requested if ( write_file ) { nffile_w = OpenNewFile(wfile, NULL, compress, IP_ANONYMIZED(nffile_r), NULL ); if ( !nffile_w ) { if ( nffile_r ) { CloseFile(nffile_r); DisposeFile(nffile_r); } return stat_record; } if ( do_xstat ) { xstat = InitXStat(nffile_w); if ( !xstat ) { if ( nffile_r ) { CloseFile(nffile_r); DisposeFile(nffile_r); } return stat_record; } } } // setup Filter Engine to point to master_record, as any record read from file // is expanded into this record // Engine->nfrecord = (uint64_t *)master_record; done = 0; while ( !done ) { int i, ret; // get next data block from file ret = ReadBlock(nffile_r); switch (ret) { case NF_CORRUPT: case NF_ERROR: if ( ret == NF_CORRUPT ) LogError("Skip corrupt data file '%s'\n",GetCurrentFilename()); else LogError("Read error in file '%s': %s\n",GetCurrentFilename(), strerror(errno) ); // fall through - get next file in chain case NF_EOF: { nffile_t *next = GetNextFile(nffile_r, twin_start, twin_end); if ( next == EMPTY_LIST ) { done = 1; } else if ( next == NULL ) { done = 1; LogError("Unexpected end of file list\n"); } else { // Update global time span window if ( next->stat_record->first_seen < t_first_flow ) t_first_flow = next->stat_record->first_seen; if ( next->stat_record->last_seen > t_last_flow ) t_last_flow = next->stat_record->last_seen; // continue with next file } continue; } break; // not really needed default: // successfully read block total_bytes += ret; } #ifdef COMPAT15 if ( nffile_r->block_header->id == DATA_BLOCK_TYPE_1 ) { common_record_v1_t *v1_record = (common_record_v1_t *)nffile_r->buff_ptr; // create an extension map for v1 blocks if ( v1_map_done == 0 ) { extension_map_t *map = malloc(sizeof(extension_map_t) + 2 * sizeof(uint16_t) ); if ( ! map ) { LogError("malloc() allocation error in %s line %d: %s\n", __FILE__, __LINE__, strerror(errno) ); exit(255); } map->type = ExtensionMapType; map->size = sizeof(extension_map_t) + 2 * sizeof(uint16_t); if (( map->size & 0x3 ) != 0 ) { map->size += 4 - ( map->size & 0x3 ); } map->map_id = INIT_ID; map->ex_id[0] = EX_IO_SNMP_2; map->ex_id[1] = EX_AS_2; map->ex_id[2] = 0; map->extension_size = 0; map->extension_size += extension_descriptor[EX_IO_SNMP_2].size; map->extension_size += extension_descriptor[EX_AS_2].size; if ( Insert_Extension_Map(extension_map_list,map) && write_file ) { // flush new map AppendToBuffer(nffile_w, (void *)map, map->size); } // else map already known and flushed v1_map_done = 1; } // convert the records to v2 for ( i=0; i < nffile_r->block_header->NumRecords; i++ ) { common_record_t *v2_record = (common_record_t *)v1_record; Convert_v1_to_v2((void *)v1_record); // now we have a v2 record -> use size of v2_record->size v1_record = (common_record_v1_t *)((pointer_addr_t)v1_record + v2_record->size); } nffile_r->block_header->id = DATA_BLOCK_TYPE_2; } #endif if ( nffile_r->block_header->id == Large_BLOCK_Type ) { // skip printf("Xstat block skipped ...\n"); continue; } if ( nffile_r->block_header->id != DATA_BLOCK_TYPE_2 ) { if ( nffile_r->block_header->id == DATA_BLOCK_TYPE_1 ) { LogError("Can't process nfdump 1.5.x block type 1. Add --enable-compat15 to compile compatibility code. Skip block.\n"); } else { LogError("Can't process block type %u. Skip block.\n", nffile_r->block_header->id); } skipped_blocks++; continue; } flow_record = nffile_r->buff_ptr; for ( i=0; i < nffile_r->block_header->NumRecords; i++ ) { switch ( flow_record->type ) { case CommonRecordType: { int match; uint32_t map_id = flow_record->ext_map; generic_exporter_t *exp_info = exporter_list[flow_record->exporter_sysid]; if ( map_id >= MAX_EXTENSION_MAPS ) { LogError("Corrupt data file. Extension map id %u too big.\n", flow_record->ext_map); exit(255); } if ( extension_map_list->slot[map_id] == NULL ) { LogError("Corrupt data file. Missing extension map %u. Skip record.\n", flow_record->ext_map); flow_record = (common_record_t *)((pointer_addr_t)flow_record + flow_record->size); continue; } total_flows++; master_record = &(extension_map_list->slot[map_id]->master_record); Engine->nfrecord = (uint64_t *)master_record; ExpandRecord_v2( flow_record, extension_map_list->slot[map_id], exp_info ? &(exp_info->info) : NULL, master_record); // Time based filter // if no time filter is given, the result is always true match = twin_start && (master_record->first < twin_start || master_record->last > twin_end) ? 0 : 1; match &= limitflows ? stat_record.numflows < limitflows : 1; // filter netflow record with user supplied filter if ( match ) match = (*Engine->FilterEngine)(Engine); if ( match == 0 ) { // record failed to pass all filters // increment pointer by number of bytes for netflow record flow_record = (common_record_t *)((pointer_addr_t)flow_record + flow_record->size); // go to next record continue; } // Records passed filter -> continue record processing // Update statistics UpdateStat(&stat_record, master_record); // update number of flows matching a given map extension_map_list->slot[map_id]->ref_count++; if ( flow_stat ) { AddFlow(flow_record, master_record, extension_map_list->slot[map_id]); if ( element_stat ) { AddStat(flow_record, master_record); } } else if ( element_stat ) { AddStat(flow_record, master_record); } else if ( sort_flows ) { InsertFlow(flow_record, master_record, extension_map_list->slot[map_id]); } else { if ( write_file ) { AppendToBuffer(nffile_w, (void *)flow_record, flow_record->size); if ( xstat ) UpdateXStat(xstat, master_record); } else if ( print_record ) { char *string; // if we need to print out this record print_record(master_record, &string, tag); if ( string ) { if ( limitflows ) { if ( (stat_record.numflows <= limitflows) ) printf("%s\n", string); } else printf("%s\n", string); } } else { // mutually exclusive conditions should prevent executing this code // this is buggy! printf("Bug! - this code should never get executed in file %s line %d\n", __FILE__, __LINE__); } } // sort_flows - else } break; case ExtensionMapType: { extension_map_t *map = (extension_map_t *)flow_record; if ( Insert_Extension_Map(extension_map_list, map) && write_file ) { // flush new map AppendToBuffer(nffile_w, (void *)map, map->size); } // else map already known and flushed } break; case ExporterRecordType: case SamplerRecordype: // Silently skip exporter records break; case ExporterInfoRecordType: { int ret = AddExporterInfo((exporter_info_record_t *)flow_record); if ( ret != 0 ) { if ( write_file && ret == 1 ) AppendToBuffer(nffile_w, (void *)flow_record, flow_record->size); } else { LogError("Failed to add Exporter Record\n"); } } break; case ExporterStatRecordType: AddExporterStat((exporter_stats_record_t *)flow_record); break; case SamplerInfoRecordype: { int ret = AddSamplerInfo((sampler_info_record_t *)flow_record); if ( ret != 0 ) { if ( write_file && ret == 1 ) AppendToBuffer(nffile_w, (void *)flow_record, flow_record->size); } else { LogError("Failed to add Sampler Record\n"); } } break; default: { LogError("Skip unknown record type %i\n", flow_record->type); } } // Advance pointer by number of bytes for netflow record flow_record = (common_record_t *)((pointer_addr_t)flow_record + flow_record->size); } // for all records // check if we are done, due to -c option if ( limitflows ) done = stat_record.numflows >= limitflows; } // while CloseFile(nffile_r); // flush output file if ( write_file ) { // flush current buffer to disc if ( nffile_w->block_header->NumRecords ) { if ( WriteBlock(nffile_w) <= 0 ) { LogError("Failed to write output buffer to disk: '%s'" , strerror(errno)); } } if ( xstat ) { if ( WriteExtraBlock(nffile_w, xstat->block_header ) <= 0 ) { LogError("Failed to write xstat buffer to disk: '%s'" , strerror(errno)); } } /* Stat info */ if ( write_file ) { /* Copy stat info and close file */ memcpy((void *)nffile_w->stat_record, (void *)&stat_record, sizeof(stat_record_t)); CloseUpdateFile(nffile_w, nffile_r->file_header->ident ); nffile_w = DisposeFile(nffile_w); } // else stdout } PackExtensionMapList(extension_map_list); DisposeFile(nffile_r); return stat_record; } // End of process_data
bool WindowsHost::AttemptLoadSymbolMap() { return symbolMap.LoadSymbolMap(SymbolMapFilename(GetCurrentFilename()).c_str()); }
void CollectDeadl(int complain) { FILE *p_f; char buf[LINEMAX], buf2[LINEMAX]; struct p_i_object *pip; struct p_i_l_type *pil=NULL; int i, j, np; struct place_object *plp; char string[300]; strcpy(buf,GetCurrentFilename()); strcpy(buf2,GetCurrentFilename()); strcat(buf,".mdead"); strcat(buf2,".net"); { struct stat stb, stb2; if ((stat(buf, &stb) < 0) || (stat(buf2, &stb2) < 0) || (stb2.st_mtime > stb.st_mtime)) { if (complain) { sprintf(string,"No up-to-date Deadlocks for net %s", GetCurrentFilename()); ShowInfoDialog(string,frame_w); } return; } } if ((p_f = fopen(buf, "r")) == NULL) { if (complain) { sprintf(string,"Can't open file %s for read", buf); ShowErrorDialog(string,frame_w); } return; } ClearDeadl(); fscanf(p_f, "%d\n", &num_pinv); p_invars = NULL; if (num_pinv <= 0) return; for (i = num_pinv; i-- > 0;) { if (p_invars == NULL) p_invars = pil = (struct p_i_l_type *) emalloc(sizeof(struct p_i_l_type)); else { pil->next = (struct p_i_l_type *) emalloc(sizeof(struct p_i_l_type)); pil = pil->next; } fscanf(p_f, "%d", &j); pil->i_l = pip = NULL; for (; j-- > 0;) { if (pip == NULL) pil->i_l = pip = (struct p_i_object *) emalloc(sizeof(struct p_i_object)); else { pip->next = (struct p_i_object *) emalloc(sizeof(struct p_i_object)); pip = pip->next; } fscanf(p_f, "%d", &np); for (plp = netobj->places; --np > 0; plp = plp->next); pip->p_p = plp; } pip->next = NULL; } pil->next = p_invars; (void) fclose(p_f); }
UObject* UMP3SoundFactory::FactoryCreateBinary(UClass* Class, UObject* InParent, FName Name, EObjectFlags Flags, UObject* Context, const TCHAR* Type, const uint8*& Buffer, const uint8* BufferEnd, FFeedbackContext* Warn) { FEditorDelegates::OnAssetPreImport.Broadcast(this, Class, InParent, Name, Type); if (mpg123_init == nullptr) { Warn->Logf(ELogVerbosity::Error, TEXT("Function pointer was null. Was %s found?"), DLL_NAME); FEditorDelegates::OnAssetPostImport.Broadcast(this, nullptr); return nullptr; } // if the sound already exists, remember the user settings USoundWave* ExistingSound = FindObject<USoundWave>(InParent, *Name.ToString()); // stop playing the file, if it already exists (e.g. reimport) TArray<UAudioComponent*> ComponentsToRestart; FAudioDeviceManager* AudioDeviceManager = GEngine->GetAudioDeviceManager(); if (AudioDeviceManager && ExistingSound) { AudioDeviceManager->StopSoundsUsingResource(ExistingSound, ComponentsToRestart); } // Read the mp3 header and make sure we have valid data UMP3Decoder Decoder(Warn); Decoder.Init(Buffer, BufferEnd); if (Decoder.BitsPerSample != 16) { Warn->Logf(ELogVerbosity::Error, TEXT("Unreal only supports 16bit WAVE data (%s)."), *Name.ToString()); FEditorDelegates::OnAssetPostImport.Broadcast(this, nullptr); return nullptr; } if (Decoder.Channels != 1 && Decoder.Channels != 2) { Warn->Logf(ELogVerbosity::Error, TEXT("Unreal only supports 1-2 channel WAVE data (Mono/Stereo). (%s)."), *Name.ToString()); FEditorDelegates::OnAssetPostImport.Broadcast(this, nullptr); return nullptr; } //on reimport, reuse settings, wipe data. otherwise create new. (UE4 WAVE import has some more checks, maybe implement, too?) USoundWave* Sound; if (ExistingSound && bMP3SoundFactoryIsReimport) { Sound = ExistingSound; Sound->FreeResources(); Sound->InvalidateCompressedData(); } else { Sound = NewObject<USoundWave>(InParent, Name, Flags); } Sound->AssetImportData->Update(GetCurrentFilename()); TArray<uint8> RawWavBuffer; RawWavBuffer.Reserve((BufferEnd - Buffer) * 16); //actual decoding Decoder.Decode(RawWavBuffer); Sound->RawData.Lock(LOCK_READ_WRITE); void* LockedData = Sound->RawData.Realloc(RawWavBuffer.Num() * RawWavBuffer.GetTypeSize()); FMemory::Memcpy(LockedData, RawWavBuffer.GetData(), RawWavBuffer.Num() * RawWavBuffer.GetTypeSize()); Sound->RawData.Unlock(); RawWavBuffer.Empty(); // Calculate duration. Sound->Duration = (float)Decoder.SizeInBytes / Decoder.Samplerate / Decoder.Channels / (BITS_PER_SAMPLE / 8); Sound->SampleRate = Decoder.Samplerate; Sound->NumChannels = Decoder.Channels; Sound->RawPCMDataSize = Decoder.SizeInBytes; FEditorDelegates::OnAssetPostImport.Broadcast(this, Sound); if (ExistingSound) { Sound->PostEditChange(); } for (int32 ComponentIndex = 0; ComponentIndex < ComponentsToRestart.Num(); ++ComponentIndex) { ComponentsToRestart[ComponentIndex]->Play(); } return Sound; }
bool QtHost::AttemptLoadSymbolMap() { return symbolMap.LoadSymbolMap(SymbolMapFilename(GetCurrentFilename()).toStdString().c_str()); }
void QtHost::PrepareShutdown() { symbolMap.SaveSymbolMap(SymbolMapFilename(GetCurrentFilename()).toStdString().c_str()); }
UObject* UFaceFXActorFactory::FactoryCreateNew(UClass* InClass, UObject* InParent, FName InName, EObjectFlags Flags, UObject* Context, FFeedbackContext* Warn) { return CreateNew(InClass, InParent, InName, Flags, FCompilationBeforeDeletionDelegate::CreateStatic(&UFaceFXActorFactory::OnFxActorCompilationBeforeDelete), GetCurrentFilename()); }
static data_row *process(char *filter) { data_block_header_t block_header; master_record_t master_record; common_record_t *flow_record, *in_buff; int i, rfd, done, ret; uint32_t buffer_size; data_row * port_table; char *string; uint64_t total_bytes; rfd = GetNextFile(0, 0, 0, NULL); if ( rfd < 0 ) { if ( errno ) perror("Can't open file for reading"); return NULL; } // prepare read and send buffer buffer_size = BUFFSIZE; in_buff = (common_record_t *) malloc(buffer_size); if ( !in_buff ) { perror("Memory allocation error"); close(rfd); return NULL; } port_table = (data_row *)calloc(65536, sizeof(data_row)); if ( !port_table) { perror("Memory allocation error"); close(rfd); return NULL; } memset((void *)port_table, 0, 65536 * sizeof(data_row)); // setup Filter Engine to point to master_record, as any record read from file // is expanded into this record Engine->nfrecord = (uint64_t *)&master_record; done = 0; while ( !done ) { // get next data block from file ret = ReadBlock(rfd, &block_header, (void *)in_buff, &string); switch (ret) { case NF_CORRUPT: case NF_ERROR: if ( ret == NF_CORRUPT ) fprintf(stderr, "Skip corrupt data file '%s': '%s'\n",GetCurrentFilename(), string); else fprintf(stderr, "Read error in file '%s': %s\n",GetCurrentFilename(), strerror(errno) ); // fall through - get next file in chain case NF_EOF: rfd = GetNextFile(rfd, 0, 0, NULL); if ( rfd < 0 ) { if ( rfd == NF_ERROR ) fprintf(stderr, "Read error in file '%s': %s\n",GetCurrentFilename(), strerror(errno) ); // rfd == EMPTY_LIST done = 1; } // else continue with next file continue; break; // not really needed default: // successfully read block total_bytes += ret; } if ( block_header.id != DATA_BLOCK_TYPE_2 ) { fprintf(stderr, "Can't process block type %u\n", block_header.id); continue; } flow_record = in_buff; for ( i=0; i < block_header.NumRecords; i++ ) { char string[1024]; int ret; if ( flow_record->type == CommonRecordType ) { if ( extension_map_list.slot[flow_record->ext_map] == NULL ) { snprintf(string, 1024, "Corrupt data file! No such extension map id: %u. Skip record", flow_record->ext_map ); string[1023] = '\0'; } else { ExpandRecord_v2( flow_record, extension_map_list.slot[flow_record->ext_map], &master_record); ret = (*Engine->FilterEngine)(Engine); if ( ret == 0 ) { // record failed to pass the filter // increment pointer by number of bytes for netflow record flow_record = (common_record_t *)((pointer_addr_t)flow_record + flow_record->size); // go to next record continue; } // Add to stat record if ( master_record.prot == 6 ) { port_table[master_record.dstport].proto[tcp].type[flows]++; port_table[master_record.dstport].proto[tcp].type[packets] += master_record.dPkts; port_table[master_record.dstport].proto[tcp].type[bytes] += master_record.dOctets; } else if ( master_record.prot == 17 ) { port_table[master_record.dstport].proto[udp].type[flows]++; port_table[master_record.dstport].proto[udp].type[packets] += master_record.dPkts; port_table[master_record.dstport].proto[udp].type[bytes] += master_record.dOctets; } } } else if ( flow_record->type == ExtensionMapType ) { extension_map_t *map = (extension_map_t *)flow_record; if ( Insert_Extension_Map(&extension_map_list, map) ) { // flush new map } // else map already known and flushed } else { fprintf(stderr, "Skip unknown record type %i\n", flow_record->type); } // Advance pointer by number of bytes for netflow record flow_record = (common_record_t *)((pointer_addr_t)flow_record + flow_record->size); } } // while return port_table; } // End of process
static void process_data(void) { master_record_t master_record; common_record_t *flow_record; nffile_t *nffile; int i, done, ret; #ifdef COMPAT15 int v1_map_done = 0; #endif // Get the first file handle nffile = GetNextFile(NULL, 0, 0); if ( !nffile ) { LogError("GetNextFile() error in %s line %d: %s\n", __FILE__, __LINE__, strerror(errno) ); return; } if ( nffile == EMPTY_LIST ) { LogError("Empty file list. No files to process\n"); return; } done = 0; while ( !done ) { // get next data block from file ret = ReadBlock(nffile); switch (ret) { case NF_CORRUPT: case NF_ERROR: if ( ret == NF_CORRUPT ) fprintf(stderr, "Skip corrupt data file '%s'\n",GetCurrentFilename()); else fprintf(stderr, "Read error in file '%s': %s\n",GetCurrentFilename(), strerror(errno) ); // fall through - get next file in chain case NF_EOF: { nffile_t *next = GetNextFile(nffile, 0, 0); if ( next == EMPTY_LIST ) { done = 1; } if ( next == NULL ) { done = 1; LogError("Unexpected end of file list\n"); } // else continue with next file continue; } break; // not really needed } #ifdef COMPAT15 if ( nffile->block_header->id == DATA_BLOCK_TYPE_1 ) { common_record_v1_t *v1_record = (common_record_v1_t *)nffile->buff_ptr; // create an extension map for v1 blocks if ( v1_map_done == 0 ) { extension_map_t *map = malloc(sizeof(extension_map_t) + 2 * sizeof(uint16_t) ); if ( ! map ) { LogError("malloc() allocation error in %s line %d: %s\n", __FILE__, __LINE__, strerror(errno) ); exit(255); } map->type = ExtensionMapType; map->size = sizeof(extension_map_t) + 2 * sizeof(uint16_t); if (( map->size & 0x3 ) != 0 ) { map->size += 4 - ( map->size & 0x3 ); } map->map_id = INIT_ID; map->ex_id[0] = EX_IO_SNMP_2; map->ex_id[1] = EX_AS_2; map->ex_id[2] = 0; map->extension_size = 0; Insert_Extension_Map(&extension_map_list, map); v1_map_done = 1; } // convert the records to v2 for ( i=0; i < nffile->block_header->NumRecords; i++ ) { common_record_t *v2_record = (common_record_t *)v1_record; Convert_v1_to_v2((void *)v1_record); // now we have a v2 record -> use size of v2_record->size v1_record = (common_record_v1_t *)((pointer_addr_t)v1_record + v2_record->size); } nffile->block_header->id = DATA_BLOCK_TYPE_2; } #endif if ( nffile->block_header->id == Large_BLOCK_Type ) { // skip continue; } if ( nffile->block_header->id != DATA_BLOCK_TYPE_2 ) { fprintf(stderr, "Can't process block type %u. Skip block.\n", nffile->block_header->id); continue; } flow_record = nffile->buff_ptr; for ( i=0; i < nffile->block_header->NumRecords; i++ ) { char string[1024]; switch ( flow_record->type ) { case CommonRecordType: { uint32_t map_id = flow_record->ext_map; generic_exporter_t *exp_info = exporter_list[flow_record->exporter_sysid]; if ( extension_map_list.slot[map_id] == NULL ) { snprintf(string, 1024, "Corrupt data file! No such extension map id: %u. Skip record", flow_record->ext_map ); string[1023] = '\0'; } else { ExpandRecord_v2( flow_record, extension_map_list.slot[flow_record->ext_map], exp_info ? &(exp_info->info) : NULL, &master_record); // update number of flows matching a given map extension_map_list.slot[map_id]->ref_count++; /* * insert hier your calls to your processing routine * master_record now contains the next flow record as specified in nffile.c * for example you can print each record: * */ print_record(&master_record, string); } printf("%s\n", string); } break; case ExtensionMapType: { extension_map_t *map = (extension_map_t *)flow_record; if ( Insert_Extension_Map(&extension_map_list, map) ) { // flush new map } // else map already known and flushed } break; case ExporterRecordType: case SamplerRecordype: // Silently skip exporter records break; default: { fprintf(stderr, "Skip unknown record type %i\n", flow_record->type); } } // Advance pointer by number of bytes for netflow record flow_record = (common_record_t *)((pointer_addr_t)flow_record + flow_record->size); } // for all records } // while CloseFile(nffile); DisposeFile(nffile); PackExtensionMapList(&extension_map_list); } // End of process_data
static void process_data(profile_channel_info_t *channels, unsigned int num_channels, time_t tslot, int do_xstat) { common_record_t *flow_record; nffile_t *nffile; FilterEngine_data_t *engine; int i, j, done, ret ; #ifdef COMPAT15 int v1_map_done = 0; #endif nffile = GetNextFile(NULL, 0, 0); if ( !nffile ) { LogError("GetNextFile() error in %s line %d: %s\n", __FILE__, __LINE__, strerror(errno) ); return; } if ( nffile == EMPTY_LIST ) { LogError("Empty file list. No files to process\n"); return; } // store infos away for later use // although multiple files may be processed, it is assumed that all // have the same settings is_anonymized = IP_ANONYMIZED(nffile); strncpy(Ident, nffile->file_header->ident, IDENTLEN); Ident[IDENTLEN-1] = '\0'; done = 0; while ( !done ) { // get next data block from file ret = ReadBlock(nffile); switch (ret) { case NF_CORRUPT: case NF_ERROR: if ( ret == NF_CORRUPT ) LogError("Skip corrupt data file '%s'\n",GetCurrentFilename()); else LogError("Read error in file '%s': %s\n",GetCurrentFilename(), strerror(errno) ); // fall through - get next file in chain case NF_EOF: { nffile_t *next = GetNextFile(nffile, 0, 0); if ( next == EMPTY_LIST ) { done = 1; } if ( next == NULL ) { done = 1; LogError("Unexpected end of file list\n"); } continue; } break; // not really needed } #ifdef COMPAT15 if ( nffile->block_header->id == DATA_BLOCK_TYPE_1 ) { common_record_v1_t *v1_record = (common_record_v1_t *)nffile->buff_ptr; // create an extension map for v1 blocks if ( v1_map_done == 0 ) { extension_map_t *map = malloc(sizeof(extension_map_t) + 2 * sizeof(uint16_t) ); if ( ! map ) { LogError("malloc() allocation error in %s line %d: %s\n", __FILE__, __LINE__, strerror(errno) ); exit(255); } map->type = ExtensionMapType; map->size = sizeof(extension_map_t) + 2 * sizeof(uint16_t); if (( map->size & 0x3 ) != 0 ) { map->size += 4 - ( map->size & 0x3 ); } map->map_id = INIT_ID; map->ex_id[0] = EX_IO_SNMP_2; map->ex_id[1] = EX_AS_2; map->ex_id[2] = 0; map->extension_size = 0; map->extension_size += extension_descriptor[EX_IO_SNMP_2].size; map->extension_size += extension_descriptor[EX_AS_2].size; if ( Insert_Extension_Map(extension_map_list, map) ) { int j; for ( j=0; j < num_channels; j++ ) { if ( channels[j].nffile != NULL) { // flush new map AppendToBuffer(channels[j].nffile, (void *)map, map->size); } } } // else map already known and flushed v1_map_done = 1; } // convert the records to v2 for ( i=0; i < nffile->block_header->NumRecords; i++ ) { common_record_t *v2_record = (common_record_t *)v1_record; Convert_v1_to_v2((void *)v1_record); // now we have a v2 record -> use size of v2_record->size v1_record = (common_record_v1_t *)((pointer_addr_t)v1_record + v2_record->size); } nffile->block_header->id = DATA_BLOCK_TYPE_2; } #endif if ( nffile->block_header->id == Large_BLOCK_Type ) { // skip continue; } if ( nffile->block_header->id != DATA_BLOCK_TYPE_2 ) { LogError("Can't process block type %u. Skip block.\n", nffile->block_header->id); continue; } flow_record = nffile->buff_ptr; for ( i=0; i < nffile->block_header->NumRecords; i++ ) { switch ( flow_record->type ) { case CommonRecordType: { generic_exporter_t *exp_info = exporter_list[flow_record->exporter_sysid]; uint32_t map_id = flow_record->ext_map; master_record_t *master_record; if ( extension_map_list->slot[map_id] == NULL ) { LogError("Corrupt data file. Missing extension map %u. Skip record.\n", flow_record->ext_map); flow_record = (common_record_t *)((pointer_addr_t)flow_record + flow_record->size); continue; } master_record = &(extension_map_list->slot[map_id]->master_record); ExpandRecord_v2( flow_record, extension_map_list->slot[flow_record->ext_map], exp_info ? &(exp_info->info) : NULL, master_record); for ( j=0; j < num_channels; j++ ) { int match; // apply profile filter (channels[j].engine)->nfrecord = (uint64_t *)master_record; engine = channels[j].engine; match = (*engine->FilterEngine)(engine); // if profile filter failed -> next profile if ( !match ) continue; // filter was successful -> continue record processing // update statistics UpdateStat(&channels[j].stat_record, master_record); if ( channels[j].nffile ) UpdateStat(channels[j].nffile->stat_record, master_record); if ( channels[j].xstat ) UpdateXStat(channels[j].xstat, master_record); // do we need to write data to new file - shadow profiles do not have files. // check if we need to flush the output buffer if ( channels[j].nffile != NULL ) { // write record to output buffer AppendToBuffer(channels[j].nffile, (void *)flow_record, flow_record->size); } } // End of for all channels } break; case ExtensionMapType: { extension_map_t *map = (extension_map_t *)flow_record; if ( Insert_Extension_Map(extension_map_list, map) ) { int j; for ( j=0; j < num_channels; j++ ) { if ( channels[j].nffile != NULL ) { // flush new map AppendToBuffer(channels[j].nffile, (void *)map, map->size); } } } // else map already known and flushed } break; case ExporterInfoRecordType: { int ret = AddExporterInfo((exporter_info_record_t *)flow_record); if ( ret != 0 ) { int j; for ( j=0; j < num_channels; j++ ) { if ( channels[j].nffile != NULL && ret == 1) { // flush new exporter AppendToBuffer(channels[j].nffile, (void *)flow_record, flow_record->size); } } } else { LogError("Failed to add Exporter Record\n"); } } break; case SamplerInfoRecordype: { int ret = AddSamplerInfo((sampler_info_record_t *)flow_record); if ( ret != 0 ) { int j; for ( j=0; j < num_channels; j++ ) { if ( channels[j].nffile != NULL && ret == 1 ) { // flush new map AppendToBuffer(channels[j].nffile, (void *)flow_record, flow_record->size); } } } else { LogError("Failed to add Sampler Record\n"); } } break; case ExporterRecordType: case SamplerRecordype: case ExporterStatRecordType: // Silently skip exporter records break; default: { LogError("Skip unknown record type %i\n", flow_record->type); } } // Advance pointer by number of bytes for netflow record flow_record = (common_record_t *)((pointer_addr_t)flow_record + flow_record->size); } // End of for all umRecords } // End of while !done // do we need to write data to new file - shadow profiles do not have files. for ( j=0; j < num_channels; j++ ) { if ( channels[j].nffile != NULL ) { // flush output buffer if ( channels[j].nffile->block_header->NumRecords ) { if ( WriteBlock(channels[j].nffile) <= 0 ) { LogError("Failed to write output buffer to disk: '%s'" , strerror(errno)); } } } } CloseFile(nffile); DisposeFile(nffile); } // End of process_data
UObject* USspjFactory::FactoryCreateBinary(UClass* InClass, UObject* InParent, FName InName, EObjectFlags Flags, UObject* Context, const TCHAR* Type, const uint8*& Buffer, const uint8* InBufferEnd, FFeedbackContext* Warn) { bool bReimport = this->IsA(UReimportSspjFactory::StaticClass()); TMap<FString, UTexture*>* ExistImages = NULL; if(bReimport) { ExistImages = &(Cast<UReimportSspjFactory>(this)->ExistImages); } FAssetToolsModule& AssetToolsModule = FModuleManager::LoadModuleChecked<FAssetToolsModule>("AssetTools"); FString ProjectNameStr = InName.ToString(); FName ProjectName = InName; UPackage* InParentPackage = Cast<UPackage>(InParent); if(InParentPackage && !bReimport) { FString ProjectPackageName; FString BasePackageName = FPackageName::GetLongPackagePath(InParent->GetOutermost()->GetName()) / ProjectNameStr; AssetToolsModule.Get().CreateUniqueAssetName(BasePackageName, TEXT(""), ProjectPackageName, ProjectNameStr); InParentPackage->Rename(*ProjectPackageName); } // インポート設定の取得 const USsImportSettings* ImportSettings = GetDefault<USsImportSettings>(); // インポート開始 FEditorDelegates::OnAssetPreImport.Broadcast(this, InClass, InParent, ProjectName, Type); // sspj USsProject* NewProject = FSsLoader::LoadSsProject(InParent, ProjectName, Flags, Buffer, (InBufferEnd - Buffer) + 1); NewProject->SetFilepath( GetCurrentFilename() ); if(NewProject) { if(NewProject->AssetImportData == nullptr) { NewProject->AssetImportData = NewObject<UAssetImportData>(NewProject); } NewProject->AssetImportData->Update(CurrentFilename); FString CurPath = FPaths::GetPath(GetCurrentFilename()); TArray<FString> ImagePaths; TArray<SsTexWrapMode::Type> ImageWrapModes; TArray<SsTexFilterMode::Type> ImageFilterModes; // ssce NewProject->CellmapList.Empty(); NewProject->CellmapList.AddZeroed(NewProject->CellmapNames.Num()); for(int i = 0; i < NewProject->CellmapNames.Num(); ++i) { FString FileName = GetFilePath(CurPath, NewProject->Settings.CellMapBaseDirectory, NewProject->CellmapNames[i].ToString()); TArray<uint8> Data; if(FFileHelper::LoadFileToArray(Data, *FileName)) { const uint8* BufferBegin = Data.GetData(); const uint8* BufferEnd = BufferBegin + Data.Num() - 1; if(FSsLoader::LoadSsCellMap(&(NewProject->CellmapList[i]), BufferBegin, (BufferEnd - BufferBegin) + 1)) { NewProject->CellmapList[i].FileName = NewProject->CellmapNames[i]; if(0 < NewProject->CellmapList[i].ImagePath.Len()) { if(INDEX_NONE == ImagePaths.Find(NewProject->CellmapList[i].ImagePath)) { ImagePaths.Add(NewProject->CellmapList[i].ImagePath); if(NewProject->CellmapList[i].OverrideTexSettings) { ImageWrapModes.Add(NewProject->CellmapList[i].WrapMode); ImageFilterModes.Add(NewProject->CellmapList[i].FilterMode); } else { ImageWrapModes.Add(NewProject->Settings.WrapMode); ImageFilterModes.Add(NewProject->Settings.FilterMode); } } } } } } // ssae NewProject->AnimeList.Empty(); NewProject->AnimeList.AddZeroed(NewProject->AnimepackNames.Num()); for(int i = 0; i < NewProject->AnimepackNames.Num(); ++i) { FString FileName = GetFilePath(CurPath, NewProject->Settings.AnimeBaseDirectory, NewProject->AnimepackNames[i].ToString()); TArray<uint8> Data; if(FFileHelper::LoadFileToArray(Data, *FileName)) { const uint8* BufferBegin = Data.GetData(); const uint8* BufferEnd = BufferBegin + Data.Num() - 1; FSsLoader::LoadSsAnimePack(&(NewProject->AnimeList[i]), BufferBegin, (BufferEnd - BufferBegin) + 1); } } // texture for(int i = 0; i < ImagePaths.Num(); ++i) { FString FileName = GetFilePath(CurPath, NewProject->Settings.ImageBaseDirectory, ImagePaths[i]); UTexture* ImportedTexture = NULL; if(ExistImages && ExistImages->Contains(ImagePaths[i])) { ImportedTexture = ExistImages->FindChecked(ImagePaths[i]); } TArray<uint8> Data; if(FFileHelper::LoadFileToArray(Data, *FileName)) { UTextureFactory* TextureFact = NewObject<UTextureFactory>(); TextureFact->AddToRoot(); FString TextureName = FPaths::GetBaseFilename(ImagePaths[i]); UPackage* TexturePackage = NULL; if(ImportedTexture) { TexturePackage = ImportedTexture->GetOutermost(); } else { FString TexturePackageName; FString BasePackageName = FPackageName::GetLongPackagePath(InParent->GetOutermost()->GetName()) / TextureName; AssetToolsModule.Get().CreateUniqueAssetName(BasePackageName, TEXT(""), TexturePackageName, TextureName); TexturePackage = CreatePackage(NULL, *TexturePackageName); } const uint8* BufferBegin = Data.GetData(); const uint8* BufferEnd = BufferBegin + Data.Num(); UTexture2D* NewTexture = (UTexture2D*)TextureFact->FactoryCreateBinary( UTexture2D::StaticClass(), TexturePackage, FName(*TextureName), Flags, NULL, *FPaths::GetExtension(ImagePaths[i]), BufferBegin, BufferEnd, Warn ); if(NewTexture) { if(ImportSettings->bOverwriteMipGenSettings) { NewTexture->MipGenSettings = TMGS_NoMipmaps; } if(ImportSettings->bOverwriteTextureGroup) { NewTexture->LODGroup = ImportSettings->TextureGroup; } if(ImportSettings->bOverwriteCompressionSettings) { NewTexture->CompressionSettings = TextureCompressionSettings::TC_EditorIcon; } if(ImportSettings->bOverwriteTilingMethodFromSspj) { switch(ImageWrapModes[i]) { case SsTexWrapMode::Clamp: { NewTexture->AddressX = NewTexture->AddressY = TA_Clamp; } break; case SsTexWrapMode::Repeat: { NewTexture->AddressX = NewTexture->AddressY = TA_Wrap; } break; case SsTexWrapMode::Mirror: { NewTexture->AddressX = NewTexture->AddressY = TA_Mirror; } break; } } if(ImportSettings->bOverwriteNeverStream) { NewTexture->NeverStream = true; } if(ImportSettings->bOverwriteFilterFromSspj) { switch(ImageFilterModes[i]) { case SsTexFilterMode::Nearest: { NewTexture->Filter = TF_Nearest; } break; case SsTexFilterMode::Linear: { NewTexture->Filter = TF_Bilinear; } break; } } NewTexture->UpdateResource(); FAssetRegistryModule::AssetCreated(NewTexture); TexturePackage->SetDirtyFlag(true); TextureFact->RemoveFromRoot(); ImportedTexture = NewTexture; } } if(ImportedTexture) { for(int ii = 0; ii < NewProject->CellmapList.Num(); ++ii) { if(NewProject->CellmapList[ii].ImagePath == ImagePaths[i]) { NewProject->CellmapList[ii].Texture = ImportedTexture; } } } } } // インポート終了 FEditorDelegates::OnAssetPostImport.Broadcast(this, NewProject); return NewProject; }