bool CTaksiProcess::StartGAPI( TAKSI_GAPI_TYPE eGAPI ) { // PresentFrameBegin() was called for this API. // This API/mode has successfully attached if ( m_Stats.m_eGAPI == eGAPI ) // its already the primary eGAPI { return true; } if ( eGAPI < m_Stats.m_eGAPI ) // lower priority than the current eGAPI. { return false; } ASSERT( eGAPI > TAKSI_GAPI_NONE && eGAPI < TAKSI_GAPI_QTY ); // Unhook any lower priority types. for ( int i=m_Stats.m_eGAPI; i<eGAPI; i++ ) { if ( s_aGAPIs[i] == NULL ) continue; s_aGAPIs[i]->FreeDll(); } m_Stats.m_eGAPI = eGAPI; UpdateStat( TAKSI_PROCSTAT_GAPI ); return true; }
int __fastcall TQSODlgBox::ShowFind(CLogFind *fp) { m_fp = fp; if( fp->GetCount() ){ Log.GetData(&m_sd, fp->pFindTbl[0]); m_CurNo = fp->pFindTbl[0]; } else { memset(&m_sd, 0, sizeof(SDMMLOG)); OKBtn->Enabled = FALSE; } SetCountry(); char bf[512]; UpdateTextData(&m_sd, fp->pFindTbl[0]); Grid->RowCount = fp->GetCount() ? fp->GetCount() + 1 : 2; Grid->TopRow = 1; Grid->Row = 1; sprintf(bf, "[%s]", fp->GetText()); strcat(bf, m_Country.c_str()); Caption = bf; UpdateStat(); m_EditFlag = 0; UpdateBtn(); if( ShowModal() == IDOK ){ if( m_EditFlag ){ UpdateCurData(&m_sd); Log.PutData(&m_sd, m_CurNo); } return TRUE; } else { return FALSE; } }
void CDownloads_Bittorrent_Peers::set_ActiveDownload(vmsDownloadSmartPtr dld) { m_dld = dld; if (dld == NULL) return; m_bDldChanged = true; UpdateStat (); }
NTSTATUS GatherKernelStats(BALLOON_STAT stats[VIRTIO_BALLOON_S_NR]) { SYSTEM_BASIC_INFORMATION basicInfo; SYSTEM_PERFORMANCE_INFORMATION perfInfo; ULONG outLen = 0; NTSTATUS ntStatus; ULONG idx = 0; UINT64 SoftFaults; RtlZeroMemory(&basicInfo,sizeof(basicInfo)); RtlZeroMemory(&perfInfo,sizeof(perfInfo)); ntStatus = ZwQuerySystemInformation(SystemBasicInformation, &basicInfo, sizeof(basicInfo), &outLen); if(!NT_SUCCESS(ntStatus)) { TraceEvents(TRACE_LEVEL_ERROR, DBG_HW_ACCESS, "GatherKernelStats (SystemBasicInformation) failed 0x%08x (outLen=0x%x)\n", ntStatus, outLen); return ntStatus; } if ((!bBasicInfoWarning)&&(outLen != sizeof(basicInfo))) { bBasicInfoWarning = TRUE; TraceEvents(TRACE_LEVEL_WARNING, DBG_HW_ACCESS, "GatherKernelStats (SystemBasicInformation) expected outLen=0x%08x returned with 0x%0x", sizeof(basicInfo), outLen); } ntStatus = ZwQuerySystemInformation(SystemPerformanceInformation, &perfInfo, sizeof(perfInfo), &outLen); if(!NT_SUCCESS(ntStatus)) { TraceEvents(TRACE_LEVEL_ERROR, DBG_HW_ACCESS, "GatherKernelStats (SystemPerformanceInformation) failed 0x%08x (outLen=0x%x)\n", ntStatus, outLen); return ntStatus; } if ((!bPerfInfoWarning)&&(outLen != sizeof(perfInfo))) { bPerfInfoWarning = TRUE; TraceEvents(TRACE_LEVEL_WARNING, DBG_HW_ACCESS, "GatherKernelStats (SystemPerformanceInformation) expected outLen=0x%08x returned with 0x%0x", sizeof(perfInfo), outLen); } #define UpdateNoOverflow(x) UpdateOverflowFreeCounter(&Counters[_##x],perfInfo.##x) UpdateStat(&stats[idx++], VIRTIO_BALLOON_S_SWAP_IN, UpdateNoOverflow(PageReadCount) << PAGE_SHIFT); UpdateStat(&stats[idx++], VIRTIO_BALLOON_S_SWAP_OUT, (UpdateNoOverflow(DirtyPagesWriteCount) + UpdateNoOverflow(MappedPagesWriteCount)) << PAGE_SHIFT); SoftFaults = UpdateNoOverflow(CopyOnWriteCount) + UpdateNoOverflow(TransitionCount) + UpdateNoOverflow(CacheTransitionCount) + UpdateNoOverflow(DemandZeroCount); UpdateStat(&stats[idx++], VIRTIO_BALLOON_S_MAJFLT, UpdateNoOverflow(PageReadCount)); UpdateStat(&stats[idx++], VIRTIO_BALLOON_S_MINFLT, SoftFaults); UpdateStat(&stats[idx++], VIRTIO_BALLOON_S_MEMFREE, U32_2_S64(perfInfo.AvailablePages) << PAGE_SHIFT); UpdateStat(&stats[idx++], VIRTIO_BALLOON_S_MEMTOT, U32_2_S64(basicInfo.NumberOfPhysicalPages) << PAGE_SHIFT); #undef UpdateNoOverflow return ntStatus; }
BOOL CDlgUploading::OnInitDialog() { CDialog::OnInitDialog(); SetIcon (LoadIcon (AfxGetResourceHandle (), MAKEINTRESOURCE (IDI_MAIN)), FALSE); UpdateStat (TRUE); SetTimer (1, 1000, NULL); ApplyLanguage (); return TRUE; }
int __fastcall TQSODlgBox::Execute(CLogFind *fp, SDMMLOG *sp, int n) { m_fp = fp; UpdateTextData(sp, n); Grid->RowCount = fp->GetCount() ? fp->GetCount() + 1 : 2; // Grid->TopRow = 1; Grid->Row = 1; Grid->Enabled = FALSE; for( int i = 0; i < fp->GetCount(); i++ ){ if( n == fp->pFindTbl[i] ){ Grid->Enabled = TRUE; Grid->Row = i + 1; Grid->TopRow = (i > 4) ? Grid->Row - 4 : 1; break; } } SetCountry(); char bf[512]; strcpy(bf, MsgEng ? "Initial Data":"QSOデータ初期値"); if( Grid->Enabled == TRUE ){ if( sp->btime ){ sprintf(bf, "[%s]", fp->GetText()); strcat(bf, m_Country.c_str()); } else if( *fp->GetText() ){ sprintf(bf, "[%s]", fp->GetText()); strcat(bf, m_Country.c_str()); } } Caption = bf; UpdateStat(); m_EditFlag = 0; UpdateBtn(); m_CurNo = n; if( ShowModal() == IDOK ){ if( m_EditFlag ){ UpdateCurData(&m_sd); Log.PutData(&m_sd, m_CurNo); } } if( m_CurNo == n ){ memcpy(sp, &m_sd, sizeof(SDMMLOG)); } else { Log.GetData(sp, n); } return TRUE; }
void CTaksiProcess::DetachGAPIs() { // we are unloading or some other app now has the main focus/hook. StopGAPIs(); // give graphics module a chance to clean up. for ( int i=TAKSI_GAPI_NONE+1; i<COUNTOF(s_aGAPIs); i++ ) { if ( s_aGAPIs[i] == NULL ) continue; s_aGAPIs[i]->FreeDll(); } m_Stats.m_eGAPI = TAKSI_GAPI_NONE; UpdateStat( TAKSI_PROCSTAT_GAPI ); }
//--------------------------------------------------------------------------- void __fastcall TFileViewDlg::TabSChange(TObject *Sender) { if( m_DisEvent ) return; m_DisEvent++; m_CurFile = -1; m_CurPage = pTabS->TabIndex; pCurPage = pFileV[m_CurPage]; UD->Max = SHORT(pCurPage->m_MaxPage-1); UD->Position = SHORT(pCurPage->m_CurPage); if( (pCurPage->pList == NULL) || (pCurPage->pBitmapS == NULL) ){ LoadImage(); } UpdateBitmap(); UpdateStat(); m_DisEvent--; }
void TaskStart (void *data) { auto UBYTE i; // Create N_TASKS identical tasks for (i = 0; i < N_TASKS; i++) { TaskData[i] = i; // Each task has its own number OSTaskCreate(Task, (void *)&TaskData[i], TASK_STK_SIZE, 11+i); } InitTimerInt(); // Setup Timer A internal interrupts OSStatInit(); // Initialize statistics task DispStr(0, 12, "#Tasks : xxxxx CPU Usage: xxxxx %"); DispStr(0, 13, "#Task switch/sec: xxxxx"); DispStr(0, 14, "<-PRESS 'Q' TO QUIT->"); for (;;) { UpdateStat(); OSTimeDly(OS_TICKS_PER_SEC); // Wait one second } }
void CDlgUploading::OnTimer(UINT nIDEvent) { if (m_bDontReleaseIfStopped == false && m_upl->pMgr->IsRunning () == FALSE) { KillTimer (nIDEvent); if (m_upl->pMgr->IsDone ()) { } Release (); return; } UpdateStat (); CDialog::OnTimer(nIDEvent); }
void CMA::Run() { k=1; while (true) { UpdateStat(k,BestState->f); if (k>=maxk) break; GenCandidate(); pFunc->Eval(NewState); if (NewState.f < BestState->f) { NewBestState(&NewState); UpdateStepSize(k,1.0); UpdateCov(); } else { UpdateStepSize(k,0.0); } k++; } }
/* status of read and fill pre-prepared structure lnf_rec */ int lnf_write(lnf_file_t *lnf_file, lnf_rec_t *lnf_rec) { extension_map_t *map; /* lookup and add map into file it it is nescessary */ map = lnf_lookup_map(lnf_file, lnf_rec->extensions_arr); if (map == NULL) { return LNF_ERR_WRITE; } lnf_rec->master_record->map_ref = map; lnf_rec->master_record->ext_map = map->map_id; lnf_rec->master_record->type = CommonRecordType; UpdateStat(lnf_file->nffile->stat_record, lnf_rec->master_record); PackRecord(lnf_rec->master_record, lnf_file->nffile); return LNF_OK; }
void CDownloads_Bittorrent_Peers::OnCmdUpdate() { UpdateStat (); }
int ExportFlowTable(nffile_t *nffile, int aggregate, int bidir, int date_sorted, extension_map_list_t *extension_map_list) { hash_FlowTable *FlowTable; FlowTableRecord_t *r; SortElement_t *SortList; master_record_t *aggr_record_mask; uint32_t i; uint32_t maxindex, c; #ifdef DEVEL char *string; #endif ExportExtensionMaps(aggregate, bidir, nffile, extension_map_list); ExportExporterList(nffile); aggr_record_mask = GetMasterAggregateMask(); FlowTable = GetFlowTable(); c = 0; maxindex = FlowTable->NumRecords; if ( date_sorted ) { // Sort records according the date SortList = (SortElement_t *)calloc(maxindex, sizeof(SortElement_t)); if ( !SortList ) { LogError("malloc() error in %s line %d: %s\n", __FILE__, __LINE__, strerror (errno)); return 0; } // preset SortList table - still unsorted for ( i=0; i<=FlowTable->IndexMask; i++ ) { r = FlowTable->bucket[i]; if ( !r ) continue; // foreach elem in this bucket while ( r ) { SortList[c].count = 1000LL * r->flowrecord.first + r->flowrecord.msec_first; // sort according the date SortList[c].record = (void *)r; c++; r = r->next; } } if ( c != maxindex ) { LogError("Abort: Mismatch %s line %d: %s\n", __FILE__, __LINE__, strerror (errno)); return 0; } if ( c >= 2 ) heapSort(SortList, c, 0); for ( i = 0; i < c; i++ ) { master_record_t *flow_record; common_record_t *raw_record; extension_info_t *extension_info; r = (FlowTableRecord_t *)(SortList[i].record); raw_record = &(r->flowrecord); extension_info = r->map_info_ref; flow_record = &(extension_info->master_record); ExpandRecord_v2( raw_record, extension_info, r->exp_ref, flow_record); flow_record->dPkts = r->counter[INPACKETS]; flow_record->dOctets = r->counter[INBYTES]; flow_record->out_pkts = r->counter[OUTPACKETS]; flow_record->out_bytes = r->counter[OUTBYTES]; flow_record->aggr_flows = r->counter[FLOWS]; // apply IP mask from aggregation, to provide a pretty output if ( FlowTable->has_masks ) { flow_record->V6.srcaddr[0] &= FlowTable->IPmask[0]; flow_record->V6.srcaddr[1] &= FlowTable->IPmask[1]; flow_record->V6.dstaddr[0] &= FlowTable->IPmask[2]; flow_record->V6.dstaddr[1] &= FlowTable->IPmask[3]; } if ( FlowTable->apply_netbits ) ApplyNetMaskBits(flow_record, FlowTable->apply_netbits); if ( aggr_record_mask ) { ApplyAggrMask(flow_record, aggr_record_mask); } // switch to output extension map flow_record->map_ref = extension_info->map; flow_record->ext_map = extension_info->map->map_id; PackRecord(flow_record, nffile); #ifdef DEVEL format_file_block_record((void *)flow_record, &string, 0); printf("%s\n", string); #endif // Update statistics UpdateStat(nffile->stat_record, flow_record); } } else { // print them as they came for ( i=0; i<=FlowTable->IndexMask; i++ ) { r = FlowTable->bucket[i]; while ( r ) { master_record_t *flow_record; common_record_t *raw_record; extension_info_t *extension_info; raw_record = &(r->flowrecord); extension_info = r->map_info_ref; flow_record = &(extension_info->master_record); ExpandRecord_v2( raw_record, extension_info, r->exp_ref, flow_record); flow_record->dPkts = r->counter[INPACKETS]; flow_record->dOctets = r->counter[INBYTES]; flow_record->out_pkts = r->counter[OUTPACKETS]; flow_record->out_bytes = r->counter[OUTBYTES]; flow_record->aggr_flows = r->counter[FLOWS]; // apply IP mask from aggregation, to provide a pretty output if ( FlowTable->has_masks ) { flow_record->V6.srcaddr[0] &= FlowTable->IPmask[0]; flow_record->V6.srcaddr[1] &= FlowTable->IPmask[1]; flow_record->V6.dstaddr[0] &= FlowTable->IPmask[2]; flow_record->V6.dstaddr[1] &= FlowTable->IPmask[3]; } if ( FlowTable->apply_netbits ) ApplyNetMaskBits(flow_record, FlowTable->apply_netbits); if ( aggr_record_mask ) { ApplyAggrMask(flow_record, aggr_record_mask); } // switch to output extension map flow_record->map_ref = extension_info->map; flow_record->ext_map = extension_info->map->map_id; PackRecord(flow_record, nffile); #ifdef DEVEL format_file_block_record((void *)flow_record, &string, 0); printf("%s\n", string); #endif // Update statistics UpdateStat(nffile->stat_record, flow_record); r = r->next; } } } if ( nffile->block_header->NumRecords ) { if ( WriteBlock(nffile) <= 0 ) { LogError("Failed to write output buffer to disk: '%s'" , strerror(errno)); return 0; } } return 1; } // End of ExportFlowTable
//--------------------------------------------------------------------------- void __fastcall TFileViewDlg::LoadImage(void) { if( !m_Max ) return; if( pCurPage->pList == NULL ) LoadFileList(); m_CurFile = -1; UpdateStat(); CWaitCursor wait; MultProc(); SetCurrentDirectory(pCurPage->m_Folder.c_str()); Graphics::TBitmap *pBitmap = new Graphics::TBitmap(); pBitmap->PixelFormat = pf24bit; pBitmap->Width = 16; pBitmap->Height = 16; MultProc(); int i; int n = pCurPage->m_CurPage * m_Max; SetBitmapSize(); for( i = 0; i < m_Max; i++, n++ ){ TRect rc; GetRect(rc, i); if( n < pCurPage->pList->Count ){ USHORT crc; LPCSTR pn = pCurPage->pList->Get(crc, n); if( !pCurPage->m_Thumb.LoadThumb(n, pCurPage->pBitmapS, rc, crc, pCurPage->m_Size[i]) ){ LoadFile(pBitmap, pn); pCurPage->m_Size[i] = (pBitmap->Height << 16) + pBitmap->Width; Graphics::TBitmap *pBitmapT = CreateBitmap(m_RectS.Right, m_RectS.Bottom, -1); ::SetStretchBltMode(pBitmapT->Canvas->Handle, HALFTONE); MultProcA(); if( sys.m_FileViewKeep ){ FillBitmap(pBitmapT, clGray); if( ((pBitmap->Width <= pBitmapT->Width) && (pBitmap->Height <= pBitmapT->Height)) ){ pBitmapT->Canvas->Draw(0, 0, pBitmap); } else { KeepAspectDraw(pBitmapT->Canvas, pBitmapT->Width, pBitmapT->Height, pBitmap); } } else { pBitmapT->Canvas->StretchDraw(m_RectS, pBitmap); } pCurPage->m_Thumb.SaveThumb(n, pBitmapT, m_RectS, crc, pCurPage->m_Size[i]); pCurPage->pBitmapS->Canvas->CopyRect(rc, pBitmapT->Canvas, m_RectS); if( pBox[i] != NULL ){ pBox[i]->Canvas->Draw(0, 0, pBitmapT); } delete pBitmapT; } } else { pCurPage->pBitmapS->Canvas->Brush->Style = bsSolid; pCurPage->pBitmapS->Canvas->Brush->Color = clWhite; pCurPage->pBitmapS->Canvas->FillRect(rc); } MultProc(); } delete pBitmap; UpdateBitmap(); }
//--------------------------------------------------------------------------- void __fastcall TFileViewDlg::LoadFileList(void) { if( pCurPage->pList == NULL ){ pCurPage->pList = new CFILEL; } pCurPage->pList->Delete(); WIN32_FIND_DATA FileData; HANDLE hSearch; MultProc(); if( pCurPage->m_Folder.IsEmpty() || (::SetCurrentDirectory(pCurPage->m_Folder.c_str()) != TRUE) ){ if( NewFolder() == TRUE ){ if( ::SetCurrentDirectory(pCurPage->m_Folder.c_str()) != TRUE ){ pCurPage->m_Folder = "\\"; ::SetCurrentDirectory(pCurPage->m_Folder.c_str()); } } else { pCurPage->m_Folder = "\\"; ::SetCurrentDirectory(pCurPage->m_Folder.c_str()); } } MultProc(); hSearch = FindFirstFile("*.*", &FileData); if(hSearch == INVALID_HANDLE_VALUE){ UpdateStat(); return; } MultProc(); while(1){ LPCSTR pExt = GetEXT(FileData.cFileName); int f = 0; switch(pCurPage->m_Type){ case 0: if( IsPic(pExt) ) f = 1; break; case 1: if( !strcmpi(pExt, "MTM") ) f = 1; break; case 2: if( !strcmpi(pExt, "MTI") ) f = 1; break; case 3: if( !strcmpi(pExt, "DLL") ) f = 1; break; case 4: if( !(FileData.dwFileAttributes & FILE_ATTRIBUTE_DIRECTORY) ){ if( *pExt && strcmpi(pExt, "DLL") && strcmpi(pExt, "EXE") ) f = 1; } break; } if( f ){ pCurPage->pList->Add(FileData.cFileName, GetCRC(&FileData)); } if(!FindNextFile(hSearch, &FileData)) break; MultProcA(); } FindClose(hSearch); pCurPage->pList->Sort(); if( pCurPage->m_UseIndex && pCurPage->pList->Count ){ pCurPage->m_Thumb.OpenFolder(m_MyIndex, m_CurPage, pCurPage->pList->Count, GetCRC(pCurPage->m_Folder.c_str())); pCurPage->m_Thumb.SetSize(m_RectS.Right, m_RectS.Bottom); } UpdateStat(); MultProc(); }
int main( int argc, char **argv ) { struct stat stat_buff; char c, *wfile, *rfile, *Rfile, *Mdirs, *ffile, *filter, *timeslot, *DBdir; char datestr[64]; int ffd, ret, DBinit, AddDB, GenStat, AvStat, output_mode; unsigned int lastupdate, topN; data_row *port_table; time_t when; struct tm * t1; wfile = rfile = Rfile = Mdirs = ffile = filter = DBdir = timeslot = NULL; DBinit = AddDB = GenStat = AvStat = 0; lastupdate = output_mode = 0; topN = 10; while ((c = getopt(argc, argv, "d:hln:pr:st:w:AIM:R:SV")) != EOF) { switch (c) { case 'h': usage(argv[0]); exit(0); break; case 'I': DBinit = 1; break; case 'M': Mdirs = strdup(optarg); break; case 'R': Rfile = strdup(optarg); break; case 'd': DBdir = strdup(optarg); ret = stat(DBdir, &stat_buff); if ( !(stat_buff.st_mode & S_IFDIR) ) { fprintf(stderr, "No such directory: %s\n", DBdir); exit(255); } break; case 'l': lastupdate = 1; break; case 'n': topN = atoi(optarg); if ( topN < 0 ) { fprintf(stderr, "TopnN number %i out of range\n", topN); exit(255); } break; case 'p': output_mode = 1; break; case 'r': rfile = strdup(optarg); break; case 'w': wfile = strdup(optarg); break; case 's': GenStat = 1; break; case 't': timeslot = optarg; if ( !ISO2UNIX(timeslot) ) { exit(255); } break; case 'A': AddDB = 1; break; case 'S': AvStat = 1; break; default: usage(argv[0]); exit(0); } } if (argc - optind > 1) { usage(argv[0]); exit(255); } else { /* user specified a pcap filter */ filter = argv[optind]; } openlog(argv[0] , LOG_CONS|LOG_PID, LOG_DAEMON); if ( !filter && ffile ) { if ( stat(ffile, &stat_buff) ) { perror("Can't stat file"); exit(255); } filter = (char *)malloc(stat_buff.st_size); if ( !filter ) { perror("Memory error"); exit(255); } ffd = open(ffile, O_RDONLY); if ( ffd < 0 ) { perror("Can't open file"); exit(255); } ret = read(ffd, (void *)filter, stat_buff.st_size); if ( ret < 0 ) { perror("Error reading file"); close(ffd); exit(255); } close(ffd); } if ( !DBdir ) { fprintf(stderr, "DB directory required\n"); exit(255); } InitStat(DBdir); if ( !filter ) filter = "any"; Engine = CompileFilter(filter); if ( !Engine ) exit(254); if ( DBinit ) { when = time(NULL); when -= ((when % 300) + 300); InitStatFile(when, NUM_AV_SLOTS); if ( !CreateRRDBs(DBdir, when) ) { fprintf(stderr, "Init DBs failed\n"); exit(255); } fprintf(stderr, "Port DBs initialized.\n"); exit(0); } if ( lastupdate ) { when = RRD_LastUpdate(DBdir); if ( !when ) exit(255); t1 = localtime(&when); strftime(datestr, 63, "%b %d %Y %T", t1); printf("Last Update: %i, %s\n", (int)when, datestr); exit(0); } port_table = NULL; if ( Mdirs || Rfile || rfile ) { SetupInputFileSequence(Mdirs, rfile, Rfile); port_table = process(filter); // Lister(port_table); if ( !port_table ) { exit(255); } if ( AddDB ) { if ( !timeslot ) { fprintf(stderr, "Timeslot required!\n"); exit(255); } UpdateStat(port_table, ISO2UNIX(timeslot)); RRD_StoreDataRow(DBdir, timeslot, port_table); } } if ( AvStat ) { port_table = GetStat(); if ( !port_table ) { fprintf(stderr, "Unable to get port table!\n"); exit(255); } // DoStat Generate_TopN(port_table, topN, NUM_AV_SLOTS, 0, output_mode, wfile); } if ( GenStat ) { when = ISO2UNIX(timeslot); if ( !port_table ) { if ( !timeslot ) { fprintf(stderr, "Timeslot required!\n"); exit(255); } port_table = RRD_GetDataRow(DBdir, when); } if ( !port_table ) { fprintf(stderr, "Unable to get port table!\n"); exit(255); } // DoStat Generate_TopN(port_table, topN, 1, when, output_mode, wfile); } CloseStat(); return 0; }
stat_record_t process_data(char *wfile, int element_stat, int flow_stat, int sort_flows, printer_t print_header, printer_t print_record, time_t twin_start, time_t twin_end, uint64_t limitflows, int tag, int compress, int do_xstat) { common_record_t *flow_record; master_record_t *master_record; nffile_t *nffile_w, *nffile_r; xstat_t *xstat; stat_record_t stat_record; int done, write_file; #ifdef COMPAT15 int v1_map_done = 0; #endif // time window of all matched flows memset((void *)&stat_record, 0, sizeof(stat_record_t)); stat_record.first_seen = 0x7fffffff; stat_record.msec_first = 999; // Do the logic first // do not print flows when doing any stats are sorting if ( sort_flows || flow_stat || element_stat ) { print_record = NULL; } // do not write flows to file, when doing any stats // -w may apply for flow_stats later write_file = !(sort_flows || flow_stat || element_stat) && wfile; nffile_r = NULL; nffile_w = NULL; xstat = NULL; // Get the first file handle nffile_r = GetNextFile(NULL, twin_start, twin_end); if ( !nffile_r ) { LogError("GetNextFile() error in %s line %d: %s\n", __FILE__, __LINE__, strerror(errno) ); return stat_record; } if ( nffile_r == EMPTY_LIST ) { LogError("Empty file list. No files to process\n"); return stat_record; } // preset time window of all processed flows to the stat record in first flow file t_first_flow = nffile_r->stat_record->first_seen; t_last_flow = nffile_r->stat_record->last_seen; // store infos away for later use // although multiple files may be processed, it is assumed that all // have the same settings is_anonymized = IP_ANONYMIZED(nffile_r); strncpy(Ident, nffile_r->file_header->ident, IDENTLEN); Ident[IDENTLEN-1] = '\0'; // prepare output file if requested if ( write_file ) { nffile_w = OpenNewFile(wfile, NULL, compress, IP_ANONYMIZED(nffile_r), NULL ); if ( !nffile_w ) { if ( nffile_r ) { CloseFile(nffile_r); DisposeFile(nffile_r); } return stat_record; } if ( do_xstat ) { xstat = InitXStat(nffile_w); if ( !xstat ) { if ( nffile_r ) { CloseFile(nffile_r); DisposeFile(nffile_r); } return stat_record; } } } // setup Filter Engine to point to master_record, as any record read from file // is expanded into this record // Engine->nfrecord = (uint64_t *)master_record; done = 0; while ( !done ) { int i, ret; // get next data block from file ret = ReadBlock(nffile_r); switch (ret) { case NF_CORRUPT: case NF_ERROR: if ( ret == NF_CORRUPT ) LogError("Skip corrupt data file '%s'\n",GetCurrentFilename()); else LogError("Read error in file '%s': %s\n",GetCurrentFilename(), strerror(errno) ); // fall through - get next file in chain case NF_EOF: { nffile_t *next = GetNextFile(nffile_r, twin_start, twin_end); if ( next == EMPTY_LIST ) { done = 1; } else if ( next == NULL ) { done = 1; LogError("Unexpected end of file list\n"); } else { // Update global time span window if ( next->stat_record->first_seen < t_first_flow ) t_first_flow = next->stat_record->first_seen; if ( next->stat_record->last_seen > t_last_flow ) t_last_flow = next->stat_record->last_seen; // continue with next file } continue; } break; // not really needed default: // successfully read block total_bytes += ret; } #ifdef COMPAT15 if ( nffile_r->block_header->id == DATA_BLOCK_TYPE_1 ) { common_record_v1_t *v1_record = (common_record_v1_t *)nffile_r->buff_ptr; // create an extension map for v1 blocks if ( v1_map_done == 0 ) { extension_map_t *map = malloc(sizeof(extension_map_t) + 2 * sizeof(uint16_t) ); if ( ! map ) { LogError("malloc() allocation error in %s line %d: %s\n", __FILE__, __LINE__, strerror(errno) ); exit(255); } map->type = ExtensionMapType; map->size = sizeof(extension_map_t) + 2 * sizeof(uint16_t); if (( map->size & 0x3 ) != 0 ) { map->size += 4 - ( map->size & 0x3 ); } map->map_id = INIT_ID; map->ex_id[0] = EX_IO_SNMP_2; map->ex_id[1] = EX_AS_2; map->ex_id[2] = 0; map->extension_size = 0; map->extension_size += extension_descriptor[EX_IO_SNMP_2].size; map->extension_size += extension_descriptor[EX_AS_2].size; if ( Insert_Extension_Map(extension_map_list,map) && write_file ) { // flush new map AppendToBuffer(nffile_w, (void *)map, map->size); } // else map already known and flushed v1_map_done = 1; } // convert the records to v2 for ( i=0; i < nffile_r->block_header->NumRecords; i++ ) { common_record_t *v2_record = (common_record_t *)v1_record; Convert_v1_to_v2((void *)v1_record); // now we have a v2 record -> use size of v2_record->size v1_record = (common_record_v1_t *)((pointer_addr_t)v1_record + v2_record->size); } nffile_r->block_header->id = DATA_BLOCK_TYPE_2; } #endif if ( nffile_r->block_header->id == Large_BLOCK_Type ) { // skip printf("Xstat block skipped ...\n"); continue; } if ( nffile_r->block_header->id != DATA_BLOCK_TYPE_2 ) { if ( nffile_r->block_header->id == DATA_BLOCK_TYPE_1 ) { LogError("Can't process nfdump 1.5.x block type 1. Add --enable-compat15 to compile compatibility code. Skip block.\n"); } else { LogError("Can't process block type %u. Skip block.\n", nffile_r->block_header->id); } skipped_blocks++; continue; } flow_record = nffile_r->buff_ptr; for ( i=0; i < nffile_r->block_header->NumRecords; i++ ) { switch ( flow_record->type ) { case CommonRecordV0Type: case CommonRecordType: { int match; uint32_t map_id = flow_record->ext_map; generic_exporter_t *exp_info = exporter_list[flow_record->exporter_sysid]; if ( map_id >= MAX_EXTENSION_MAPS ) { LogError("Corrupt data file. Extension map id %u too big.\n", flow_record->ext_map); exit(255); } if ( extension_map_list->slot[map_id] == NULL ) { LogError("Corrupt data file. Missing extension map %u. Skip record.\n", flow_record->ext_map); flow_record = (common_record_t *)((pointer_addr_t)flow_record + flow_record->size); continue; } total_flows++; master_record = &(extension_map_list->slot[map_id]->master_record); Engine->nfrecord = (uint64_t *)master_record; ExpandRecord_v2( flow_record, extension_map_list->slot[map_id], exp_info ? &(exp_info->info) : NULL, master_record); // Time based filter // if no time filter is given, the result is always true match = twin_start && (master_record->first < twin_start || master_record->last > twin_end) ? 0 : 1; match &= limitflows ? stat_record.numflows < limitflows : 1; // filter netflow record with user supplied filter if ( match ) match = (*Engine->FilterEngine)(Engine); if ( match == 0 ) { // record failed to pass all filters // increment pointer by number of bytes for netflow record flow_record = (common_record_t *)((pointer_addr_t)flow_record + flow_record->size); // go to next record continue; } // Records passed filter -> continue record processing // Update statistics UpdateStat(&stat_record, master_record); // update number of flows matching a given map extension_map_list->slot[map_id]->ref_count++; if ( flow_stat ) { AddFlow(flow_record, master_record, extension_map_list->slot[map_id]); if ( element_stat ) { AddStat(flow_record, master_record); } } else if ( element_stat ) { AddStat(flow_record, master_record); } else if ( sort_flows ) { InsertFlow(flow_record, master_record, extension_map_list->slot[map_id]); } else { if ( write_file ) { AppendToBuffer(nffile_w, (void *)flow_record, flow_record->size); if ( xstat ) UpdateXStat(xstat, master_record); } else if ( print_record ) { char *string; // if we need to print out this record print_record(master_record, &string, tag); if ( string ) { if ( limitflows ) { if ( (stat_record.numflows <= limitflows) ) printf("%s\n", string); } else printf("%s\n", string); } } else { // mutually exclusive conditions should prevent executing this code // this is buggy! printf("Bug! - this code should never get executed in file %s line %d\n", __FILE__, __LINE__); } } // sort_flows - else } break; case ExtensionMapType: { extension_map_t *map = (extension_map_t *)flow_record; if ( Insert_Extension_Map(extension_map_list, map) && write_file ) { // flush new map AppendToBuffer(nffile_w, (void *)map, map->size); } // else map already known and flushed } break; case ExporterRecordType: case SamplerRecordype: // Silently skip exporter records break; case ExporterInfoRecordType: { int ret = AddExporterInfo((exporter_info_record_t *)flow_record); if ( ret != 0 ) { if ( write_file && ret == 1 ) AppendToBuffer(nffile_w, (void *)flow_record, flow_record->size); } else { LogError("Failed to add Exporter Record\n"); } } break; case ExporterStatRecordType: AddExporterStat((exporter_stats_record_t *)flow_record); break; case SamplerInfoRecordype: { int ret = AddSamplerInfo((sampler_info_record_t *)flow_record); if ( ret != 0 ) { if ( write_file && ret == 1 ) AppendToBuffer(nffile_w, (void *)flow_record, flow_record->size); } else { LogError("Failed to add Sampler Record\n"); } } break; default: { LogError("Skip unknown record type %i\n", flow_record->type); } } // Advance pointer by number of bytes for netflow record flow_record = (common_record_t *)((pointer_addr_t)flow_record + flow_record->size); } // for all records // check if we are done, due to -c option if ( limitflows ) done = stat_record.numflows >= limitflows; } // while CloseFile(nffile_r); // flush output file if ( write_file ) { // flush current buffer to disc if ( nffile_w->block_header->NumRecords ) { if ( WriteBlock(nffile_w) <= 0 ) { LogError("Failed to write output buffer to disk: '%s'" , strerror(errno)); } } if ( xstat ) { if ( WriteExtraBlock(nffile_w, xstat->block_header ) <= 0 ) { LogError("Failed to write xstat buffer to disk: '%s'" , strerror(errno)); } } /* Stat info */ if ( write_file ) { /* Copy stat info and close file */ memcpy((void *)nffile_w->stat_record, (void *)&stat_record, sizeof(stat_record_t)); CloseUpdateFile(nffile_w, nffile_r->file_header->ident ); nffile_w = DisposeFile(nffile_w); } // else stdout } PackExtensionMapList(extension_map_list); DisposeFile(nffile_r); return stat_record; } // End of process_data
static void process_data(profile_channel_info_t *channels, unsigned int num_channels, time_t tslot, int do_xstat) { common_record_t *flow_record; nffile_t *nffile; FilterEngine_data_t *engine; int i, j, done, ret ; #ifdef COMPAT15 int v1_map_done = 0; #endif nffile = GetNextFile(NULL, 0, 0); if ( !nffile ) { LogError("GetNextFile() error in %s line %d: %s\n", __FILE__, __LINE__, strerror(errno) ); return; } if ( nffile == EMPTY_LIST ) { LogError("Empty file list. No files to process\n"); return; } // store infos away for later use // although multiple files may be processed, it is assumed that all // have the same settings is_anonymized = IP_ANONYMIZED(nffile); strncpy(Ident, nffile->file_header->ident, IDENTLEN); Ident[IDENTLEN-1] = '\0'; done = 0; while ( !done ) { // get next data block from file ret = ReadBlock(nffile); switch (ret) { case NF_CORRUPT: case NF_ERROR: if ( ret == NF_CORRUPT ) LogError("Skip corrupt data file '%s'\n",GetCurrentFilename()); else LogError("Read error in file '%s': %s\n",GetCurrentFilename(), strerror(errno) ); // fall through - get next file in chain case NF_EOF: { nffile_t *next = GetNextFile(nffile, 0, 0); if ( next == EMPTY_LIST ) { done = 1; } if ( next == NULL ) { done = 1; LogError("Unexpected end of file list\n"); } continue; } break; // not really needed } #ifdef COMPAT15 if ( nffile->block_header->id == DATA_BLOCK_TYPE_1 ) { common_record_v1_t *v1_record = (common_record_v1_t *)nffile->buff_ptr; // create an extension map for v1 blocks if ( v1_map_done == 0 ) { extension_map_t *map = malloc(sizeof(extension_map_t) + 2 * sizeof(uint16_t) ); if ( ! map ) { LogError("malloc() allocation error in %s line %d: %s\n", __FILE__, __LINE__, strerror(errno) ); exit(255); } map->type = ExtensionMapType; map->size = sizeof(extension_map_t) + 2 * sizeof(uint16_t); if (( map->size & 0x3 ) != 0 ) { map->size += 4 - ( map->size & 0x3 ); } map->map_id = INIT_ID; map->ex_id[0] = EX_IO_SNMP_2; map->ex_id[1] = EX_AS_2; map->ex_id[2] = 0; map->extension_size = 0; map->extension_size += extension_descriptor[EX_IO_SNMP_2].size; map->extension_size += extension_descriptor[EX_AS_2].size; if ( Insert_Extension_Map(extension_map_list, map) ) { int j; for ( j=0; j < num_channels; j++ ) { if ( channels[j].nffile != NULL) { // flush new map AppendToBuffer(channels[j].nffile, (void *)map, map->size); } } } // else map already known and flushed v1_map_done = 1; } // convert the records to v2 for ( i=0; i < nffile->block_header->NumRecords; i++ ) { common_record_t *v2_record = (common_record_t *)v1_record; Convert_v1_to_v2((void *)v1_record); // now we have a v2 record -> use size of v2_record->size v1_record = (common_record_v1_t *)((pointer_addr_t)v1_record + v2_record->size); } nffile->block_header->id = DATA_BLOCK_TYPE_2; } #endif if ( nffile->block_header->id == Large_BLOCK_Type ) { // skip continue; } if ( nffile->block_header->id != DATA_BLOCK_TYPE_2 ) { LogError("Can't process block type %u. Skip block.\n", nffile->block_header->id); continue; } flow_record = nffile->buff_ptr; for ( i=0; i < nffile->block_header->NumRecords; i++ ) { switch ( flow_record->type ) { case CommonRecordType: { generic_exporter_t *exp_info = exporter_list[flow_record->exporter_sysid]; uint32_t map_id = flow_record->ext_map; master_record_t *master_record; if ( extension_map_list->slot[map_id] == NULL ) { LogError("Corrupt data file. Missing extension map %u. Skip record.\n", flow_record->ext_map); flow_record = (common_record_t *)((pointer_addr_t)flow_record + flow_record->size); continue; } master_record = &(extension_map_list->slot[map_id]->master_record); ExpandRecord_v2( flow_record, extension_map_list->slot[flow_record->ext_map], exp_info ? &(exp_info->info) : NULL, master_record); for ( j=0; j < num_channels; j++ ) { int match; // apply profile filter (channels[j].engine)->nfrecord = (uint64_t *)master_record; engine = channels[j].engine; match = (*engine->FilterEngine)(engine); // if profile filter failed -> next profile if ( !match ) continue; // filter was successful -> continue record processing // update statistics UpdateStat(&channels[j].stat_record, master_record); if ( channels[j].nffile ) UpdateStat(channels[j].nffile->stat_record, master_record); if ( channels[j].xstat ) UpdateXStat(channels[j].xstat, master_record); // do we need to write data to new file - shadow profiles do not have files. // check if we need to flush the output buffer if ( channels[j].nffile != NULL ) { // write record to output buffer AppendToBuffer(channels[j].nffile, (void *)flow_record, flow_record->size); } } // End of for all channels } break; case ExtensionMapType: { extension_map_t *map = (extension_map_t *)flow_record; if ( Insert_Extension_Map(extension_map_list, map) ) { int j; for ( j=0; j < num_channels; j++ ) { if ( channels[j].nffile != NULL ) { // flush new map AppendToBuffer(channels[j].nffile, (void *)map, map->size); } } } // else map already known and flushed } break; case ExporterInfoRecordType: { int ret = AddExporterInfo((exporter_info_record_t *)flow_record); if ( ret != 0 ) { int j; for ( j=0; j < num_channels; j++ ) { if ( channels[j].nffile != NULL && ret == 1) { // flush new exporter AppendToBuffer(channels[j].nffile, (void *)flow_record, flow_record->size); } } } else { LogError("Failed to add Exporter Record\n"); } } break; case SamplerInfoRecordype: { int ret = AddSamplerInfo((sampler_info_record_t *)flow_record); if ( ret != 0 ) { int j; for ( j=0; j < num_channels; j++ ) { if ( channels[j].nffile != NULL && ret == 1 ) { // flush new map AppendToBuffer(channels[j].nffile, (void *)flow_record, flow_record->size); } } } else { LogError("Failed to add Sampler Record\n"); } } break; case ExporterRecordType: case SamplerRecordype: case ExporterStatRecordType: // Silently skip exporter records break; default: { LogError("Skip unknown record type %i\n", flow_record->type); } } // Advance pointer by number of bytes for netflow record flow_record = (common_record_t *)((pointer_addr_t)flow_record + flow_record->size); } // End of for all umRecords } // End of while !done // do we need to write data to new file - shadow profiles do not have files. for ( j=0; j < num_channels; j++ ) { if ( channels[j].nffile != NULL ) { // flush output buffer if ( channels[j].nffile->block_header->NumRecords ) { if ( WriteBlock(channels[j].nffile) <= 0 ) { LogError("Failed to write output buffer to disk: '%s'" , strerror(errno)); } } } } CloseFile(nffile); DisposeFile(nffile); } // End of process_data