void FileTransferWidget::ReadyRead() { switch (m_transferMode) { case TM_RECIEVE_CLIENT: { if (m_currentStatus == FT_WAIT_FOR_HELLO) { QString cmd(m_socket->readAll()); qDebug()<<"File transfer cmd recieved: "<<cmd; if (cmd.contains("MRA_FT_HELLO") && cmd.contains(m_req.From)) { GetNextFile(); } } else { m_currentFileSize += m_socket->bytesAvailable(); m_speedBytes += m_socket->bytesAvailable(); m_ui->doneLabel->setText(MRIMCommonUtils::GetFileSize(m_currentFileSize)); m_ui->progressBar->setValue(m_currentFileSize); m_currentFile.write(m_socket->readAll()); if (m_currentFileSize >= m_filesHashIter->value()) { //done with current file m_currentFile.close(); m_currentStatus = FT_TRANSFER_FILE_COMPLETED; GetNextFile(); } } } break; case TM_SEND_SERVER: { if (m_currentStatus == FT_WAIT_FOR_HELLO) { QString cmd(m_socket->readAll()); qDebug()<<"File transfer cmd recieved: "<<cmd; if (cmd.contains("MRA_FT_HELLO") && cmd.contains(m_req.To)) { SendCmd("MRA_FT_HELLO "+m_client->GetAccountInfo().account_name); m_currentStatus = FT_WAIT_FOR_TRANSFER; } } else if (m_currentStatus == FT_WAIT_FOR_TRANSFER && m_sentFilesCount < m_req.FilesInfo.count()) { QString cmdStr(m_socket->readAll()); qDebug()<<"File transfer cmd recieved: "<<cmdStr; QStringList cmd = cmdStr.split(' '); if (cmd.contains("MRA_FT_GET_FILE")) { m_currentStatus = FT_TRANSFER; SendFile(cmd[1]); } } } break; } }
CFileList* CLibraryMaps::WhatsNew(const CQuerySearch* pSearch, int nMaximum) const { ASSUME_LOCK( Library.m_pSection ); const DWORD tNow = static_cast< DWORD >( time( NULL ) ); CFileList* pHits = NULL; for ( POSITION pos = GetFileIterator() ; pos ; ) { CLibraryFile* pFile = GetNextFile( pos ); if ( pFile->IsAvailable() && pFile->IsShared() && pFile->m_oSHA1 && ( ! pSearch->m_pSchema || pSearch->m_pSchema->Equals( pFile->m_pSchema ) ) ) { const DWORD nTime = pFile->GetCreationTime(); if ( nTime && nTime + 12 * 60 * 60 > tNow ) // 12 hours { pFile->m_nHitsToday++; pFile->m_nHitsTotal++; if ( ! pHits ) pHits = new CFileList; pHits->AddTail( pFile ); if ( nMaximum && pHits->GetCount() >= nMaximum ) break; } } } return pHits; }
// eng: Finds the first ptc-file in emitters folder // rus: Поиск первого ptc-файла в папке с эмиттерами const char* MP_Platform_WIN_POSIX::GetFirstFile() { std::string ptc_path=GetPathToPTC(); #ifdef _WINDOWS ptc_path+="*.ptc"; const wchar_t* mask=utf8_to_wchar(ptc_path.c_str()); hFindFile=FindFirstFileW(mask,&fd); if (hFindFile!=INVALID_HANDLE_VALUE) { file=wchar_to_utf8(fd.cFileName); return file.c_str(); } FindClose(hFindFile); #else dir=opendir(ptc_path.c_str()); if (dir) return GetNextFile(); #endif return NULL; }
CCollectionFile::File* CCollectionFile::FindByURN(LPCTSTR pszURN) { Hashes::Sha1Hash oSHA1; Hashes::TigerHash oTiger; Hashes::Md5Hash oMD5; Hashes::Ed2kHash oED2K; Hashes::BtHash oBTH; oSHA1.fromUrn( pszURN ); oMD5.fromUrn( pszURN ); oTiger.fromUrn( pszURN ); oED2K.fromUrn( pszURN ); oBTH.fromUrn( pszURN ) || oBTH.fromUrn< Hashes::base16Encoding >( pszURN ); for ( POSITION pos = GetFileIterator(); pos; ) { File* pFile = GetNextFile( pos ); if ( validAndEqual( oSHA1, pFile->m_oSHA1 ) ) return pFile; if ( validAndEqual( oMD5, pFile->m_oMD5 ) ) return pFile; if ( validAndEqual( oTiger, pFile->m_oTiger ) ) return pFile; if ( validAndEqual( oED2K, pFile->m_oED2K ) ) return pFile; if ( validAndEqual( oBTH, pFile->m_oBTH ) ) return pFile; } return NULL; }
FX_POSITION CFX_FontSourceEnum_File::GetStartPosition() { m_wsNext = GetNextFile().UTF8Decode(); if (m_wsNext.GetLength() == 0) { return (FX_POSITION)0; } return (FX_POSITION)-1; }
void CLibraryMaps::Clear() { for ( POSITION pos = GetFileIterator() ; pos ; ) delete GetNextFile( pos ); ASSERT( m_pIndexMap.IsEmpty() ); ASSERT( m_pPathMap.IsEmpty() ); #ifdef _DEBUG for ( POSITION p = m_pPathMap.GetStartPosition() ; p ; ) { CString k; CLibraryFile* v; m_pPathMap.GetNextAssoc( p, k, v ); TRACE ( _T("m_pPathMap lost : %ls = 0x%08x\n"), (LPCTSTR)k, v ); } #endif ZeroMemory( m_pSHA1Map, HASH_SIZE * sizeof *m_pSHA1Map ); ZeroMemory( m_pTigerMap, HASH_SIZE * sizeof *m_pTigerMap ); ZeroMemory( m_pED2KMap, HASH_SIZE * sizeof *m_pED2KMap ); ZeroMemory( m_pBTHMap, HASH_SIZE * sizeof *m_pBTHMap ); ZeroMemory( m_pMD5Map, HASH_SIZE * sizeof *m_pMD5Map ); m_nFiles = 0; m_nVolume = 0; }
BOOL CAlbumFolder::MetaToFiles(BOOL bAggressive) { if ( m_pSchema == NULL || m_pXML == NULL ) return FALSE; for ( POSITION pos = GetFileIterator() ; pos ; ) { CLibraryFile* pFile = GetNextFile( pos ); CSchema* pSchema = pFile->m_pSchema; if ( pSchema == NULL ) continue; if ( CSchemaChild* pChild = m_pSchema->GetContained( pSchema->m_sURI ) ) { CXMLElement* pXML = pFile->m_pMetadata->Clone(); if ( pChild->MemberCopy( m_pXML, pXML, TRUE, bAggressive ) ) { CXMLElement* pRoot = pSchema->Instantiate( TRUE ); pRoot->AddElement( pXML ); pFile->SetMetadata( pRoot ); delete pRoot; } else { delete pXML; } } } return TRUE; }
/* * Link any object and library files. Returns LINK_NOACTION if there was no * file to compile, LINK_ERROR if the linker returned a bad status code or * if the compiler could not be spawned, or else LINK_SUCCESS if everything * went smoothly. */ static int link( const OPT_STORAGE *cmdOpts, CmdLine *linkCmdLine ) /*****************************************************************/ { char ** args; char * filename; int fileType; int numFiles; int rc; char * defFile; char * prevDefFile = NULL; cmdOpts = cmdOpts; /*** Process all object and library file names ***/ for( numFiles=0; ; numFiles++ ) { filename = GetNextFile( &fileType, TYPE_OBJ_FILE, TYPE_LIB_FILE, TYPE_RES_FILE, TYPE_INVALID_FILE ); if( filename == NULL ) break; AppendCmdLine( linkCmdLine, CL_L_FILENAMES_SECTION, filename ); } /*** Process .def files ***/ for( ;; ) { defFile = GetNextFile( NULL, TYPE_DEF_FILE, TYPE_INVALID_FILE ); if( defFile == NULL ) break; if( prevDefFile != NULL ) { Warning( "Overriding %s with %s", prevDefFile, defFile ); } prevDefFile = defFile; }; if( prevDefFile != NULL ) { AppendFmtCmdLine( linkCmdLine, CL_L_OPTS_SECTION, "/DEF:%s", prevDefFile ); } else { if( numFiles == 0 ) return( LINK_NOACTION ); } /*** Spawn the linker ***/ AppendCmdLine( linkCmdLine, CL_L_PROGNAME_SECTION, LINKER ); args = MergeCmdLine( linkCmdLine, INVALID_MERGE_CMDLINE ); rc = spawnvp( P_WAIT, LINKER, (const char **)args ); if( rc != 0 ) { if( rc == -1 || rc == 255 ) { FatalError( "Unable to execute '%s'", LINKER ); } else { return( LINK_ERROR ); } } return( LINK_SUCCESS ); }
//call this over and over until it returns NULL byte* IsoSwarmer::GetNextFile(UINT& data_length, CString& file_name) { if(mv_process.size()==0) return NULL; file_name=mv_process[0].c_str(); CString tmp="c:\\syncher\\rcv\\Swarmer.New\\"+file_name; //open the file HANDLE m_file = CreateFile((LPCSTR)tmp, // open file at local_path GENERIC_READ, // open for reading FILE_SHARE_READ, // share for reading NULL, // no security OPEN_EXISTING, // existing file only FILE_ATTRIBUTE_NORMAL, // normal file NULL); // no attr. template if(m_file==INVALID_HANDLE_VALUE || m_file==NULL){ TRACE("IsoSwarmer encountered a bad input file. Failed Open. filename=%s\n",(LPCSTR)mv_process[0].c_str()); mv_process.erase(mv_process.begin()+0); //bad file for some reason, this should never happen, but we can just ignore this evil file. return GetNextFile(data_length,file_name); } DWORD hsize=0; data_length=GetFileSize(m_file,&hsize); //we will only work with up to 2^32 size files for now. Thats a 4 gig file. if(data_length<300000 || data_length>600000000){ //evil file check TRACE("IsoSwarmer encountered a bad input file. Bad Size. filename=%s size=%d\n",(LPCSTR)mv_process[0].c_str(),data_length); mv_process.erase(mv_process.begin()+0); CloseHandle(m_file); return GetNextFile(data_length,file_name); } byte *buffer=new byte[data_length]; BOOL stat=ReadFile(m_file,buffer,data_length,(DWORD*)&data_length,NULL); CloseHandle(m_file); if(!stat){ //this would be rather wierd if this was false. EVIL FILE. TRACE("IsoSwarmer encountered a bad input file. Failed Read. filename=%s size=%d\n",(LPCSTR)mv_process[0].c_str(),data_length); mv_process.erase(mv_process.begin()+0); //forget about this evil file delete []buffer; return GetNextFile(data_length,file_name); } //WARNING: MEMORY LEAK POSSIBLITY return buffer; //THE USER OF THIS FUNCTION IS RESPONSIBLE FOR DELETING THIS ALLOCATED MEMORY! }
/** * Interface to CTestList-derived classes for getting all information about the next test to be run. * * @return * Returns a pointer to a CTestInfo object containing all available information about the next test. */ CTestInfo* CWineTest::GetNextTestInfo() { while(!m_CurrentFile.empty() || GetNextFile()) { try { while(GetNextTest()) { /* If the user specified a test through the command line, check this here */ if(!Configuration.GetTest().empty() && Configuration.GetTest() != m_CurrentTest) continue; { auto_ptr<CTestInfo> TestInfo(new CTestInfo()); size_t UnderscorePosition; /* Build the command line */ TestInfo->CommandLine = m_TestPath; TestInfo->CommandLine += m_CurrentFile; TestInfo->CommandLine += ' '; TestInfo->CommandLine += AsciiToUnicode(m_CurrentTest); /* Store the Module name */ UnderscorePosition = m_CurrentFile.find_last_of('_'); if(UnderscorePosition == m_CurrentFile.npos) { stringstream ss; ss << "Invalid test file name: " << UnicodeToAscii(m_CurrentFile) << endl; SSEXCEPTION; } TestInfo->Module = UnicodeToAscii(m_CurrentFile.substr(0, UnderscorePosition)); /* Store the test */ TestInfo->Test = m_CurrentTest; return TestInfo.release(); } } } catch(CTestException& e) { stringstream ss; ss << "An exception occurred trying to list tests for: " << UnicodeToAscii(m_CurrentFile) << endl; StringOut(ss.str()); StringOut(e.GetMessage()); StringOut("\n"); m_CurrentFile.clear(); delete[] m_ListBuffer; } } return NULL; }
//CString CParser::ParseProject(CString sProject) CString CParser::ParseProject(CString sProject) { m_sProgress = "ParseProject " + sProject + "\r\n"; //CString sFile = "x"; // = "temp"; //CString sRetVal = "true"; //CParserDlg *dlg = (CParserDlg*)GetParent(this); //HWND *hwnd = GetParent(this); //CObject *obj = GetParent(this); //FILE *mfpProject; //int nsLen; //MessageBox(m_sPjtFile); // / * m_fpProject = fopen(sProject, "r"); if(m_fpProject == NULL) { //MessageBox("Project file opening error"); //CMsgDlg mdlg("Error in opening project file"); //mdlg.DoModal(); //return "Error in opening project file"; return "Error in opening project file"; //ExitPjt(); } // * / while(1) { m_sFile = GetNextFile(); //nsLen = sFile.GetLength(); if(m_sFile == "") // empty mean file not found break; // process file //sRetVal = ParseFile(sFile); WinExec("Notepad " + m_sFile, SW_SHOW); ParseFile(); //CMsgDlg mdlg(sRetVal); //mdlg.DoModal(); } fclose(m_fpProject); //CString sErrors = "no error found"; //if(m_sErrors.IsEmpty() == FALSE) //sErrors = m_sErrors; //CMsgDlg mdlg(sErrors); //mdlg.DoModal(); //m_sProgress = m_sProgress + "END ParseProject " + sProject; //return "PROGRESS\r\n" + m_sProgress + "\r\nERRORS\r\n" + sErrors; return m_sProgress; //return sRetVal; // } // ParseProject()
/* * Spawn the resource compiler. */ static int res_compile( const OPT_STORAGE *cmdOpts, CmdLine *cmdLine ) /********************************************************************/ { char ** args; char * filename; char * nextFilename; int rc; int count; /*** Get the name of the .rc file to compiler ***/ filename = GetNextFile( NULL, TYPE_RC_FILE, TYPE_INVALID_FILE ); if( filename == NULL ) return( RC_NOACTION ); nextFilename = GetNextFile( NULL, TYPE_RC_FILE, TYPE_INVALID_FILE ); if( nextFilename != NULL ) { FatalError( "Can only compile one file at a time" ); } /*** Prepare to spawn the resource compiler ***/ AppendCmdLine( cmdLine, RC_PROGNAME_SECTION, RESCOMPILER ); AppendCmdLine( cmdLine, RC_FILENAMES_SECTION, filename ); args = MergeCmdLine( cmdLine, RC_PROGNAME_SECTION, RC_OPTS_SECTION, RC_FILENAMES_SECTION, INVALID_MERGE_CMDLINE ); /*** Spawn the compiler ***/ if( cmdOpts->showwopts ) { for( count=0; args[count]!=NULL; count++ ) { fprintf( stderr, "%s ", args[count] ); } fprintf( stderr, "\n" ); } if( !cmdOpts->noinvoke ) { rc = spawnvp( P_WAIT, RESCOMPILER, (const char **)args ); if( rc != 0 ) { if( rc == -1 || rc == 255 ) { FatalError( "Error executing '%s'", RESCOMPILER ); } else { return( RC_ERROR ); } } } return( RC_SUCCESS ); }
int CCollectionFile::GetMissingCount() const { int nCount = 0; for ( POSITION pos = GetFileIterator(); pos; ) { const File* pFile = GetNextFile( pos ); if ( ! pFile->IsComplete() && ! pFile->IsDownloading() ) nCount++; } return nCount; }
CFX_ByteString CFX_FontSourceEnum_File::GetNextFile() { FX_FileHandle* pCurHandle = m_FolderQueue.GetSize() != 0 ? m_FolderQueue.GetDataPtr(m_FolderQueue.GetSize() - 1)->pFileHandle : nullptr; if (!pCurHandle) { if (m_FolderPaths.GetSize() < 1) return ""; pCurHandle = FX_OpenFolder(m_FolderPaths[m_FolderPaths.GetSize() - 1].c_str()); FX_HandleParentPath hpp; hpp.pFileHandle = pCurHandle; hpp.bsParentPath = m_FolderPaths[m_FolderPaths.GetSize() - 1]; m_FolderQueue.Add(hpp); } CFX_ByteString bsName; bool bFolder; CFX_ByteString bsFolderSpearator = CFX_ByteString::FromUnicode(CFX_WideString(FX_GetFolderSeparator())); while (true) { if (!FX_GetNextFile(pCurHandle, &bsName, &bFolder)) { FX_CloseFolder(pCurHandle); m_FolderQueue.RemoveAt(m_FolderQueue.GetSize() - 1); if (m_FolderQueue.GetSize() == 0) { m_FolderPaths.RemoveAt(m_FolderPaths.GetSize() - 1); return m_FolderPaths.GetSize() != 0 ? GetNextFile() : ""; } pCurHandle = m_FolderQueue.GetDataPtr(m_FolderQueue.GetSize() - 1)->pFileHandle; continue; } if (bsName == "." || bsName == "..") continue; if (bFolder) { FX_HandleParentPath hpp; hpp.bsParentPath = m_FolderQueue.GetDataPtr(m_FolderQueue.GetSize() - 1)->bsParentPath + bsFolderSpearator + bsName; hpp.pFileHandle = FX_OpenFolder(hpp.bsParentPath.c_str()); if (!hpp.pFileHandle) continue; m_FolderQueue.Add(hpp); pCurHandle = hpp.pFileHandle; continue; } bsName = m_FolderQueue.GetDataPtr(m_FolderQueue.GetSize() - 1)->bsParentPath + bsFolderSpearator + bsName; break; } return bsName; }
void CCollectionFile::Close() { for ( POSITION pos = GetFileIterator(); pos; ) delete GetNextFile( pos ); m_pFiles.RemoveAll(); delete m_pMetadata; m_pMetadata = NULL; m_sTitle.Empty(); m_sThisURI.Empty(); m_sParentURI.Empty(); }
// eng: Finds the first ptc-file in emitters folder // rus: Поиск первого ptc-файла в папке с эмиттерами const char* MP_Platform_COCOS::GetFirstFile() { std::string ptc_path=GetPathToPTC(); asset_dir=AAssetManager_openDir(cocos2d::FileUtilsAndroid::getAssetManager(), ptc_path_asset.c_str()); if (asset_dir) { const char* file=GetNextFile(); return file; } return NULL; }
//-------------------------------------------------------------------// // GetNextFile() // //-------------------------------------------------------------------// // This function is called by the end user to find the next file. // // We get the most-recently successful finder object and try it // again. If it fails, we need to walk back up the previously // created finder objects in the array. // Each time GetNextFile() returns false, we are responsible for // deleting a finder object. //-------------------------------------------------------------------// bool FileScan::GetNextFile() { // Try the last successful finder object. int nFinderIndex = Finders.GetUpperBound(); bool bReturn = GetNextFile( nFinderIndex ); // We will be done only when we have come back up past the first Finders array item. // We recurse back upwards as far as possible before giving up. while ( !bReturn && nFinderIndex > 0 ) { // Unwind a finder from the array. delete Finders[ nFinderIndex ]; Finders.RemoveAt( nFinderIndex ); Results.RemoveAt( nFinderIndex ); nFinderIndex--; bReturn = GetNextFile( nFinderIndex ); } return bReturn; }
void GetFiles(char dir[], int* numItems, int parentItem) { char fileName[MAX_PATHNAME_LEN], searchPath[MAX_PATHNAME_LEN]; strcpy (searchPath, dir); strcat (searchPath, "\\*"); if (!GetFirstFile (searchPath, 1, 1, 0, 0, 0, 0, fileName)) // has at least one file { InsertListItem (Main_pnl_handle, ERG_panel_loaded_mask_list, -1, fileName, (*numItems)++); while (!GetNextFile (fileName)) { InsertListItem (Main_pnl_handle, ERG_panel_loaded_mask_list, -1, fileName, (*numItems)++); } } }
void CCollectionFile::Render(CString& strBuffer) const { strBuffer.Preallocate( GetFileCount() * 128 + 256 ); strBuffer.Format( L"<html>\n<head>\n" L"<meta http-equiv=\"Content-Type\" content=\"text/html; charset=UTF-8\"/>\n" L"<title>%s</title>\n" L"<style type=\"text/css\">\n" L"body { margin: 0px; padding: 0px; background-color: #ffffff; color: #000000; font-family: %s; font-size: %upx; }\n" L"h1 { text-align: left; color: #ffffff; height: 64px; margin: 0px; padding: 20px; font-size: 10pt; font-weight: bold; background-image: url(res://Envy.exe/312); }\n" L"table { font-size: 8pt; width: 100%%; }\n" L"td { background-color: #e0e8f0; padding: 4px; }\n" L".num { width: 40px; text-align: center; }\n" L".url { text-align: left; cursor: hand; }\n" L".size { width: 100px; text-align: center; }\n" L"</style>\n</head>\n<body>\n<h1>%s</h1>\n<table>\n", (LPCTSTR)GetTitle(), (LPCTSTR)Settings.Fonts.DefaultFont, Settings.Fonts.DefaultSize, (LPCTSTR)GetTitle() ); DWORD i = 1; for ( POSITION pos = GetFileIterator(); pos; ++i ) { CCollectionFile::File* pFile = GetNextFile( pos ); CString strURN; if ( pFile->m_oSHA1 ) strURN = pFile->m_oSHA1.toUrn(); else if ( pFile->m_oTiger ) strURN = pFile->m_oTiger.toUrn(); else if ( pFile->m_oED2K ) strURN = pFile->m_oED2K.toUrn(); else if ( pFile->m_oMD5 ) strURN = pFile->m_oMD5.toUrn(); else if ( pFile->m_oBTH ) strURN = pFile->m_oBTH.toUrn(); CString strTemp; strTemp.Format( L"<tr><td class=\"num\">%u</td>" L"<td class=\"url\" onclick=\"if ( ! window.external.open('%s') ) window.external.download('%s');\" onmouseover=\"window.external.hover('%s');\" onmouseout=\"window.external.hover('');\">%s</td>" L"<td class=\"size\">%s</td></tr>\n", i, (LPCTSTR)strURN, (LPCTSTR)strURN, (LPCTSTR)strURN, (LPCTSTR)pFile->m_sName, (LPCTSTR)Settings.SmartVolume( pFile->m_nSize ) ); strBuffer += strTemp; } strBuffer += L"</table>\n</body>\n</html>"; }
void CAlbumFolder::Serialize(CArchive& ar, int nVersion) { POSITION pos; if ( ar.IsStoring() ) { ar << m_sSchemaURI; ar.WriteCount( m_pXML != NULL ? 1 : 0 ); if ( m_pXML ) m_pXML->Serialize( ar ); ar << m_bCollSHA1; if ( m_bCollSHA1 ) ar.Write( &m_pCollSHA1, sizeof(SHA1) ); ar << m_sName; ar << m_bExpanded; ar << m_bAutoDelete; ar << m_sBestView; ar.WriteCount( GetFolderCount() ); for ( pos = GetFolderIterator() ; pos ; ) { CAlbumFolder* pFolder = GetNextFolder( pos ); pFolder->Serialize( ar, nVersion ); } ar.WriteCount( GetFileCount() ); for ( pos = GetFileIterator() ; pos ; ) { CLibraryFile* pFile = GetNextFile( pos ); ar << pFile->m_nIndex; } } else { CLibraryFile* pCollection = NULL; if ( m_pParent != NULL ) { ar >> m_sSchemaURI; m_pSchema = SchemaCache.Get( m_sSchemaURI ); } else {
int CAlbumFolder::GetSharedCount() const { int nCount = 0; for ( POSITION pos = GetFileIterator() ; pos ; ) { CLibraryFile* pFile = GetNextFile( pos ); if ( pFile->IsShared() ) nCount++; } for ( POSITION pos = GetFolderIterator() ; pos ; ) { nCount += GetNextFolder( pos )->GetSharedCount(); } return nCount; }
BOOL CUploadsCtrl::GetAt(int nSelect, CUploadQueue** ppQueue, CUploadFile** ppFile) { ASSUME_LOCK( Transfers.m_pSection ); /*int nScroll =*/ GetScrollPos( SB_VERT ); int nIndex = 0; if ( ppQueue != NULL ) *ppQueue = NULL; if ( ppFile != NULL ) *ppFile = NULL; CSingleLock pLock( &UploadQueues.m_pSection, FALSE ); if ( ! pLock.Lock( 250 ) ) return FALSE; for ( POSITION posQueue = GetQueueIterator() ; posQueue ; ) { CUploadQueue* pQueue = GetNextQueue( posQueue ); POSITION posFile = GetFileIterator( pQueue ); if ( posFile == NULL ) continue; if ( nIndex++ == nSelect ) { if ( ppQueue != NULL ) *ppQueue = pQueue; return TRUE; } if ( ! pQueue->m_bExpanded ) continue; while ( posFile ) { CUploadFile* pFile = GetNextFile( pQueue, posFile ); if ( pFile == NULL ) continue; if ( nIndex++ == nSelect ) { if ( ppFile != NULL ) *ppFile = pFile; return TRUE; } } } return FALSE; }
CCollectionFile::File* CCollectionFile::FindFile(CLibraryFile* pShared, BOOL bApply) { File* pFile = NULL; for ( POSITION pos = GetFileIterator(); pos; ) { pFile = GetNextFile( pos ); if ( validAndEqual( pShared->m_oSHA1, pFile->m_oSHA1 ) ) break; if ( validAndEqual( pShared->m_oMD5, pFile->m_oMD5 ) ) break; if ( validAndEqual( pShared->m_oTiger, pFile->m_oTiger ) ) break; if ( validAndEqual( pShared->m_oED2K, pFile->m_oED2K ) ) break; if ( validAndEqual( pShared->m_oBTH, pFile->m_oBTH ) ) break; pFile = NULL; } if ( bApply && pFile != NULL ) pFile->ApplyMetadata( pShared ); return pFile; }
int CAlbumFolder::GetFileList(CLibraryList* pList, BOOL bRecursive) const { int nCount = 0; for ( POSITION pos = GetFileIterator() ; pos ; ) { pList->CheckAndAdd( GetNextFile( pos )->m_nIndex ); nCount++; } if ( bRecursive ) { for ( POSITION pos = GetFolderIterator() ; pos ; ) { GetNextFolder( pos )->GetFileList( pList, bRecursive ); } } return nCount; }
CFileList* CLibraryMaps::Browse(int nMaximum) const { ASSUME_LOCK( Library.m_pSection ); CFileList* pHits = NULL; for ( POSITION pos = GetFileIterator() ; pos ; ) { CLibraryFile* pFile = GetNextFile( pos ); if ( pFile->IsAvailable() && pFile->IsShared() && pFile->m_oSHA1 ) { if ( ! pHits ) pHits = new CFileList; pHits->AddTail( pFile ); if ( nMaximum && pHits->GetCount() >= nMaximum ) break; } } return pHits; }
int main( int argc, char **argv ) { struct stat stat_buff; stat_record_t sum_stat; printer_t print_header, print_record; nfprof_t profile_data; char *rfile, *Rfile, *Mdirs, *wfile, *ffile, *filter, *tstring, *stat_type; char *byte_limit_string, *packet_limit_string, *print_format, *record_header; char *print_order, *query_file, *UnCompress_file, *nameserver, *aggr_fmt; int c, ffd, ret, element_stat, fdump; int i, user_format, quiet, flow_stat, topN, aggregate, aggregate_mask, bidir; int print_stat, syntax_only, date_sorted, do_tag, compress, do_xstat; int plain_numbers, GuessDir, pipe_output, csv_output; time_t t_start, t_end; uint32_t limitflows; char Ident[IDENTLEN]; rfile = Rfile = Mdirs = wfile = ffile = filter = tstring = stat_type = NULL; byte_limit_string = packet_limit_string = NULL; fdump = aggregate = 0; aggregate_mask = 0; bidir = 0; t_start = t_end = 0; syntax_only = 0; topN = -1; flow_stat = 0; print_stat = 0; element_stat = 0; do_xstat = 0; limitflows = 0; date_sorted = 0; total_bytes = 0; total_flows = 0; skipped_blocks = 0; do_tag = 0; quiet = 0; user_format = 0; compress = 0; plain_numbers = 0; pipe_output = 0; csv_output = 0; is_anonymized = 0; GuessDir = 0; nameserver = NULL; print_format = NULL; print_header = NULL; print_record = NULL; print_order = NULL; query_file = NULL; UnCompress_file = NULL; aggr_fmt = NULL; record_header = NULL; Ident[0] = '\0'; while ((c = getopt(argc, argv, "6aA:Bbc:D:E:s:hHn:i:j:f:qzr:v:w:K:M:NImO:R:XZt:TVv:x:l:L:o:")) != EOF) { switch (c) { case 'h': usage(argv[0]); exit(0); break; case 'a': aggregate = 1; break; case 'A': if ( !ParseAggregateMask(optarg, &aggr_fmt ) ) { exit(255); } aggregate_mask = 1; break; case 'B': GuessDir = 1; case 'b': if ( !SetBidirAggregation() ) { exit(255); } bidir = 1; // implies aggregate = 1; break; case 'D': nameserver = optarg; if ( !set_nameserver(nameserver) ) { exit(255); } break; case 'E': query_file = optarg; if ( !InitExporterList() ) { exit(255); } PrintExporters(query_file); exit(0); break; case 'X': fdump = 1; break; case 'Z': syntax_only = 1; break; case 'q': quiet = 1; break; case 'z': compress = 1; break; case 'c': limitflows = atoi(optarg); if ( !limitflows ) { LogError("Option -c needs a number > 0\n"); exit(255); } break; case 's': stat_type = optarg; if ( !SetStat(stat_type, &element_stat, &flow_stat) ) { exit(255); } break; case 'V': { char *e1, *e2; e1 = ""; e2 = ""; #ifdef NSEL e1 = "NSEL-NEL"; #endif printf("%s: Version: %s%s%s\n",argv[0], e1, e2, nfdump_version); exit(0); } break; case 'l': packet_limit_string = optarg; break; case 'K': LogError("*** Anonymisation moved! Use nfanon to anonymise flows!\n"); exit(255); break; case 'H': do_xstat = 1; break; case 'L': byte_limit_string = optarg; break; case 'N': plain_numbers = 1; break; case 'f': ffile = optarg; break; case 't': tstring = optarg; break; case 'r': rfile = optarg; if ( strcmp(rfile, "-") == 0 ) rfile = NULL; break; case 'm': print_order = "tstart"; Parse_PrintOrder(print_order); date_sorted = 1; LogError("Option -m depricated. Use '-O tstart' instead\n"); break; case 'M': Mdirs = optarg; break; case 'I': print_stat++; break; case 'o': // output mode print_format = optarg; break; case 'O': { // stat order by int ret; print_order = optarg; ret = Parse_PrintOrder(print_order); if ( ret < 0 ) { LogError("Unknown print order '%s'\n", print_order); exit(255); } date_sorted = ret == 6; // index into order_mode } break; case 'R': Rfile = optarg; break; case 'w': wfile = optarg; break; case 'n': topN = atoi(optarg); if ( topN < 0 ) { LogError("TopnN number %i out of range\n", topN); exit(255); } break; case 'T': do_tag = 1; break; case 'i': strncpy(Ident, optarg, IDENT_SIZE); Ident[IDENT_SIZE - 1] = 0; if ( strchr(Ident, ' ') ) { LogError("Ident must not contain spaces\n"); exit(255); } break; case 'j': UnCompress_file = optarg; UnCompressFile(UnCompress_file); exit(0); break; case 'x': query_file = optarg; InitExtensionMaps(NO_EXTENSION_LIST); DumpExMaps(query_file); exit(0); break; case 'v': query_file = optarg; QueryFile(query_file); exit(0); break; case '6': // print long IPv6 addr Setv6Mode(1); break; default: usage(argv[0]); exit(0); } } if (argc - optind > 1) { usage(argv[0]); exit(255); } else { /* user specified a pcap filter */ filter = argv[optind]; FilterFilename = NULL; } // Change Ident only if ( rfile && strlen(Ident) > 0 ) { ChangeIdent(rfile, Ident); exit(0); } if ( (element_stat || flow_stat) && (topN == -1) ) topN = 10; if ( topN < 0 ) topN = 0; if ( (element_stat && !flow_stat) && aggregate_mask ) { LogError("Warning: Aggregation ignored for element statistics\n"); aggregate_mask = 0; } if ( !flow_stat && aggregate_mask ) { aggregate = 1; } if ( rfile && Rfile ) { LogError("-r and -R are mutually exclusive. Plase specify either -r or -R\n"); exit(255); } if ( Mdirs && !(rfile || Rfile) ) { LogError("-M needs either -r or -R to specify the file or file list. Add '-R .' for all files in the directories.\n"); exit(255); } extension_map_list = InitExtensionMaps(NEEDS_EXTENSION_LIST); if ( !InitExporterList() ) { exit(255); } SetupInputFileSequence(Mdirs, rfile, Rfile); if ( print_stat ) { nffile_t *nffile; if ( !rfile && !Rfile && !Mdirs) { LogError("Expect data file(s).\n"); exit(255); } memset((void *)&sum_stat, 0, sizeof(stat_record_t)); sum_stat.first_seen = 0x7fffffff; sum_stat.msec_first = 999; nffile = GetNextFile(NULL, 0, 0); if ( !nffile ) { LogError("Error open file: %s\n", strerror(errno)); exit(250); } while ( nffile && nffile != EMPTY_LIST ) { SumStatRecords(&sum_stat, nffile->stat_record); nffile = GetNextFile(nffile, 0, 0); } PrintStat(&sum_stat); exit(0); } // handle print mode if ( !print_format ) { // automatically select an appropriate output format for custom aggregation // aggr_fmt is compiled by ParseAggregateMask if ( aggr_fmt ) { int len = strlen(AggrPrependFmt) + strlen(aggr_fmt) + strlen(AggrAppendFmt) + 7; // +7 for 'fmt:', 2 spaces and '\0' print_format = malloc(len); if ( !print_format ) { LogError("malloc() error in %s line %d: %s\n", __FILE__, __LINE__, strerror(errno) ); exit(255); } snprintf(print_format, len, "fmt:%s %s %s",AggrPrependFmt, aggr_fmt, AggrAppendFmt ); print_format[len-1] = '\0'; } else if ( bidir ) { print_format = "biline"; } else print_format = DefaultMode; } if ( strncasecmp(print_format, "fmt:", 4) == 0 ) { // special user defined output format char *format = &print_format[4]; if ( strlen(format) ) { if ( !ParseOutputFormat(format, plain_numbers, printmap) ) exit(255); print_record = format_special; record_header = get_record_header(); user_format = 1; } else { LogError("Missing format description for user defined output format!\n"); exit(255); } } else { // predefined output format // Check for long_v6 mode i = strlen(print_format); if ( i > 2 ) { if ( print_format[i-1] == '6' ) { Setv6Mode(1); print_format[i-1] = '\0'; } else Setv6Mode(0); } i = 0; while ( printmap[i].printmode ) { if ( strncasecmp(print_format, printmap[i].printmode, MAXMODELEN) == 0 ) { if ( printmap[i].Format ) { if ( !ParseOutputFormat(printmap[i].Format, plain_numbers, printmap) ) exit(255); // predefined custom format print_record = printmap[i].func; record_header = get_record_header(); user_format = 1; } else { // To support the pipe output format for element stats - check for pipe, and remember this if ( strncasecmp(print_format, "pipe", MAXMODELEN) == 0 ) { pipe_output = 1; } if ( strncasecmp(print_format, "csv", MAXMODELEN) == 0 ) { csv_output = 1; set_record_header(); record_header = get_record_header(); } // predefined static format print_record = printmap[i].func; user_format = 0; } break; } i++; } } if ( !print_record ) { LogError("Unknown output mode '%s'\n", print_format); exit(255); } // this is the only case, where headers are printed. if ( strncasecmp(print_format, "raw", 16) == 0 ) print_header = format_file_block_header; if ( aggregate && (flow_stat || element_stat) ) { aggregate = 0; LogError("Command line switch -s overwrites -a\n"); } if ( !filter && ffile ) { if ( stat(ffile, &stat_buff) ) { LogError("Can't stat filter file '%s': %s\n", ffile, strerror(errno)); exit(255); } filter = (char *)malloc(stat_buff.st_size+1); if ( !filter ) { LogError("malloc() error in %s line %d: %s\n", __FILE__, __LINE__, strerror(errno) ); exit(255); } ffd = open(ffile, O_RDONLY); if ( ffd < 0 ) { LogError("Can't open filter file '%s': %s\n", ffile, strerror(errno)); exit(255); } ret = read(ffd, (void *)filter, stat_buff.st_size); if ( ret < 0 ) { perror("Error reading filter file"); close(ffd); exit(255); } total_bytes += ret; filter[stat_buff.st_size] = 0; close(ffd); FilterFilename = ffile; } // if no filter is given, set the default ip filter which passes through every flow if ( !filter || strlen(filter) == 0 ) filter = "any"; Engine = CompileFilter(filter); if ( !Engine ) exit(254); if ( fdump ) { printf("StartNode: %i Engine: %s\n", Engine->StartNode, Engine->Extended ? "Extended" : "Fast"); DumpList(Engine); exit(0); } if ( syntax_only ) exit(0); if ( print_order && flow_stat ) { printf("-s record and -O (-m) are mutually exclusive options\n"); exit(255); } if ((aggregate || flow_stat || print_order) && !Init_FlowTable() ) exit(250); if (element_stat && !Init_StatTable(HashBits, NumPrealloc) ) exit(250); SetLimits(element_stat || aggregate || flow_stat, packet_limit_string, byte_limit_string); if ( tstring ) { if ( !ScanTimeFrame(tstring, &t_start, &t_end) ) exit(255); } if ( !(flow_stat || element_stat || wfile || quiet ) && record_header ) { if ( user_format ) { printf("%s\n", record_header); } else { // static format - no static format with header any more, but keep code anyway if ( Getv6Mode() ) { printf("%s\n", record_header); } else printf("%s\n", record_header); } } nfprof_start(&profile_data); sum_stat = process_data(wfile, element_stat, aggregate || flow_stat, print_order != NULL, print_header, print_record, t_start, t_end, limitflows, do_tag, compress, do_xstat); nfprof_end(&profile_data, total_flows); if ( total_bytes == 0 ) { printf("No matched flows\n"); exit(0); } if (aggregate || print_order) { if ( wfile ) { nffile_t *nffile = OpenNewFile(wfile, NULL, compress, is_anonymized, NULL); if ( !nffile ) exit(255); if ( ExportFlowTable(nffile, aggregate, bidir, date_sorted, extension_map_list) ) { CloseUpdateFile(nffile, Ident ); } else { CloseFile(nffile); unlink(wfile); } DisposeFile(nffile); } else { PrintFlowTable(print_record, topN, do_tag, GuessDir, extension_map_list); } } if (flow_stat) { PrintFlowStat(record_header, print_record, topN, do_tag, quiet, csv_output, extension_map_list); #ifdef DEVEL printf("Loopcnt: %u\n", loopcnt); #endif } if (element_stat) { PrintElementStat(&sum_stat, plain_numbers, record_header, print_record, topN, do_tag, quiet, pipe_output, csv_output); } if ( !quiet ) { if ( csv_output ) { PrintSummary(&sum_stat, plain_numbers, csv_output); } else if ( !wfile ) { if (is_anonymized) printf("IP addresses anonymised\n"); PrintSummary(&sum_stat, plain_numbers, csv_output); if ( t_last_flow == 0 ) { // in case of a pre 1.6.6 collected and empty flow file printf("Time window: <unknown>\n"); } else { printf("Time window: %s\n", TimeString(t_first_flow, t_last_flow)); } printf("Total flows processed: %u, Blocks skipped: %u, Bytes read: %llu\n", total_flows, skipped_blocks, (unsigned long long)total_bytes); nfprof_print(&profile_data, stdout); } } Dispose_FlowTable(); Dispose_StatTable(); FreeExtensionMaps(extension_map_list); #ifdef DEVEL if ( hash_hit || hash_miss ) printf("Hash hit: %i, miss: %i, skip: %i, ratio: %5.3f\n", hash_hit, hash_miss, hash_skip, (float)hash_hit/((float)(hash_hit+hash_miss))); #endif return 0; }
stat_record_t process_data(char *wfile, int element_stat, int flow_stat, int sort_flows, printer_t print_header, printer_t print_record, time_t twin_start, time_t twin_end, uint64_t limitflows, int tag, int compress, int do_xstat) { common_record_t *flow_record; master_record_t *master_record; nffile_t *nffile_w, *nffile_r; xstat_t *xstat; stat_record_t stat_record; int done, write_file; #ifdef COMPAT15 int v1_map_done = 0; #endif // time window of all matched flows memset((void *)&stat_record, 0, sizeof(stat_record_t)); stat_record.first_seen = 0x7fffffff; stat_record.msec_first = 999; // Do the logic first // do not print flows when doing any stats are sorting if ( sort_flows || flow_stat || element_stat ) { print_record = NULL; } // do not write flows to file, when doing any stats // -w may apply for flow_stats later write_file = !(sort_flows || flow_stat || element_stat) && wfile; nffile_r = NULL; nffile_w = NULL; xstat = NULL; // Get the first file handle nffile_r = GetNextFile(NULL, twin_start, twin_end); if ( !nffile_r ) { LogError("GetNextFile() error in %s line %d: %s\n", __FILE__, __LINE__, strerror(errno) ); return stat_record; } if ( nffile_r == EMPTY_LIST ) { LogError("Empty file list. No files to process\n"); return stat_record; } // preset time window of all processed flows to the stat record in first flow file t_first_flow = nffile_r->stat_record->first_seen; t_last_flow = nffile_r->stat_record->last_seen; // store infos away for later use // although multiple files may be processed, it is assumed that all // have the same settings is_anonymized = IP_ANONYMIZED(nffile_r); strncpy(Ident, nffile_r->file_header->ident, IDENTLEN); Ident[IDENTLEN-1] = '\0'; // prepare output file if requested if ( write_file ) { nffile_w = OpenNewFile(wfile, NULL, compress, IP_ANONYMIZED(nffile_r), NULL ); if ( !nffile_w ) { if ( nffile_r ) { CloseFile(nffile_r); DisposeFile(nffile_r); } return stat_record; } if ( do_xstat ) { xstat = InitXStat(nffile_w); if ( !xstat ) { if ( nffile_r ) { CloseFile(nffile_r); DisposeFile(nffile_r); } return stat_record; } } } // setup Filter Engine to point to master_record, as any record read from file // is expanded into this record // Engine->nfrecord = (uint64_t *)master_record; done = 0; while ( !done ) { int i, ret; // get next data block from file ret = ReadBlock(nffile_r); switch (ret) { case NF_CORRUPT: case NF_ERROR: if ( ret == NF_CORRUPT ) LogError("Skip corrupt data file '%s'\n",GetCurrentFilename()); else LogError("Read error in file '%s': %s\n",GetCurrentFilename(), strerror(errno) ); // fall through - get next file in chain case NF_EOF: { nffile_t *next = GetNextFile(nffile_r, twin_start, twin_end); if ( next == EMPTY_LIST ) { done = 1; } else if ( next == NULL ) { done = 1; LogError("Unexpected end of file list\n"); } else { // Update global time span window if ( next->stat_record->first_seen < t_first_flow ) t_first_flow = next->stat_record->first_seen; if ( next->stat_record->last_seen > t_last_flow ) t_last_flow = next->stat_record->last_seen; // continue with next file } continue; } break; // not really needed default: // successfully read block total_bytes += ret; } #ifdef COMPAT15 if ( nffile_r->block_header->id == DATA_BLOCK_TYPE_1 ) { common_record_v1_t *v1_record = (common_record_v1_t *)nffile_r->buff_ptr; // create an extension map for v1 blocks if ( v1_map_done == 0 ) { extension_map_t *map = malloc(sizeof(extension_map_t) + 2 * sizeof(uint16_t) ); if ( ! map ) { LogError("malloc() allocation error in %s line %d: %s\n", __FILE__, __LINE__, strerror(errno) ); exit(255); } map->type = ExtensionMapType; map->size = sizeof(extension_map_t) + 2 * sizeof(uint16_t); if (( map->size & 0x3 ) != 0 ) { map->size += 4 - ( map->size & 0x3 ); } map->map_id = INIT_ID; map->ex_id[0] = EX_IO_SNMP_2; map->ex_id[1] = EX_AS_2; map->ex_id[2] = 0; map->extension_size = 0; map->extension_size += extension_descriptor[EX_IO_SNMP_2].size; map->extension_size += extension_descriptor[EX_AS_2].size; if ( Insert_Extension_Map(extension_map_list,map) && write_file ) { // flush new map AppendToBuffer(nffile_w, (void *)map, map->size); } // else map already known and flushed v1_map_done = 1; } // convert the records to v2 for ( i=0; i < nffile_r->block_header->NumRecords; i++ ) { common_record_t *v2_record = (common_record_t *)v1_record; Convert_v1_to_v2((void *)v1_record); // now we have a v2 record -> use size of v2_record->size v1_record = (common_record_v1_t *)((pointer_addr_t)v1_record + v2_record->size); } nffile_r->block_header->id = DATA_BLOCK_TYPE_2; } #endif if ( nffile_r->block_header->id == Large_BLOCK_Type ) { // skip printf("Xstat block skipped ...\n"); continue; } if ( nffile_r->block_header->id != DATA_BLOCK_TYPE_2 ) { if ( nffile_r->block_header->id == DATA_BLOCK_TYPE_1 ) { LogError("Can't process nfdump 1.5.x block type 1. Add --enable-compat15 to compile compatibility code. Skip block.\n"); } else { LogError("Can't process block type %u. Skip block.\n", nffile_r->block_header->id); } skipped_blocks++; continue; } flow_record = nffile_r->buff_ptr; for ( i=0; i < nffile_r->block_header->NumRecords; i++ ) { switch ( flow_record->type ) { case CommonRecordV0Type: case CommonRecordType: { int match; uint32_t map_id = flow_record->ext_map; generic_exporter_t *exp_info = exporter_list[flow_record->exporter_sysid]; if ( map_id >= MAX_EXTENSION_MAPS ) { LogError("Corrupt data file. Extension map id %u too big.\n", flow_record->ext_map); exit(255); } if ( extension_map_list->slot[map_id] == NULL ) { LogError("Corrupt data file. Missing extension map %u. Skip record.\n", flow_record->ext_map); flow_record = (common_record_t *)((pointer_addr_t)flow_record + flow_record->size); continue; } total_flows++; master_record = &(extension_map_list->slot[map_id]->master_record); Engine->nfrecord = (uint64_t *)master_record; ExpandRecord_v2( flow_record, extension_map_list->slot[map_id], exp_info ? &(exp_info->info) : NULL, master_record); // Time based filter // if no time filter is given, the result is always true match = twin_start && (master_record->first < twin_start || master_record->last > twin_end) ? 0 : 1; match &= limitflows ? stat_record.numflows < limitflows : 1; // filter netflow record with user supplied filter if ( match ) match = (*Engine->FilterEngine)(Engine); if ( match == 0 ) { // record failed to pass all filters // increment pointer by number of bytes for netflow record flow_record = (common_record_t *)((pointer_addr_t)flow_record + flow_record->size); // go to next record continue; } // Records passed filter -> continue record processing // Update statistics UpdateStat(&stat_record, master_record); // update number of flows matching a given map extension_map_list->slot[map_id]->ref_count++; if ( flow_stat ) { AddFlow(flow_record, master_record, extension_map_list->slot[map_id]); if ( element_stat ) { AddStat(flow_record, master_record); } } else if ( element_stat ) { AddStat(flow_record, master_record); } else if ( sort_flows ) { InsertFlow(flow_record, master_record, extension_map_list->slot[map_id]); } else { if ( write_file ) { AppendToBuffer(nffile_w, (void *)flow_record, flow_record->size); if ( xstat ) UpdateXStat(xstat, master_record); } else if ( print_record ) { char *string; // if we need to print out this record print_record(master_record, &string, tag); if ( string ) { if ( limitflows ) { if ( (stat_record.numflows <= limitflows) ) printf("%s\n", string); } else printf("%s\n", string); } } else { // mutually exclusive conditions should prevent executing this code // this is buggy! printf("Bug! - this code should never get executed in file %s line %d\n", __FILE__, __LINE__); } } // sort_flows - else } break; case ExtensionMapType: { extension_map_t *map = (extension_map_t *)flow_record; if ( Insert_Extension_Map(extension_map_list, map) && write_file ) { // flush new map AppendToBuffer(nffile_w, (void *)map, map->size); } // else map already known and flushed } break; case ExporterRecordType: case SamplerRecordype: // Silently skip exporter records break; case ExporterInfoRecordType: { int ret = AddExporterInfo((exporter_info_record_t *)flow_record); if ( ret != 0 ) { if ( write_file && ret == 1 ) AppendToBuffer(nffile_w, (void *)flow_record, flow_record->size); } else { LogError("Failed to add Exporter Record\n"); } } break; case ExporterStatRecordType: AddExporterStat((exporter_stats_record_t *)flow_record); break; case SamplerInfoRecordype: { int ret = AddSamplerInfo((sampler_info_record_t *)flow_record); if ( ret != 0 ) { if ( write_file && ret == 1 ) AppendToBuffer(nffile_w, (void *)flow_record, flow_record->size); } else { LogError("Failed to add Sampler Record\n"); } } break; default: { LogError("Skip unknown record type %i\n", flow_record->type); } } // Advance pointer by number of bytes for netflow record flow_record = (common_record_t *)((pointer_addr_t)flow_record + flow_record->size); } // for all records // check if we are done, due to -c option if ( limitflows ) done = stat_record.numflows >= limitflows; } // while CloseFile(nffile_r); // flush output file if ( write_file ) { // flush current buffer to disc if ( nffile_w->block_header->NumRecords ) { if ( WriteBlock(nffile_w) <= 0 ) { LogError("Failed to write output buffer to disk: '%s'" , strerror(errno)); } } if ( xstat ) { if ( WriteExtraBlock(nffile_w, xstat->block_header ) <= 0 ) { LogError("Failed to write xstat buffer to disk: '%s'" , strerror(errno)); } } /* Stat info */ if ( write_file ) { /* Copy stat info and close file */ memcpy((void *)nffile_w->stat_record, (void *)&stat_record, sizeof(stat_record_t)); CloseUpdateFile(nffile_w, nffile_r->file_header->ident ); nffile_w = DisposeFile(nffile_w); } // else stdout } PackExtensionMapList(extension_map_list); DisposeFile(nffile_r); return stat_record; } // End of process_data
IFX_FileAccess* CFX_FontSourceEnum_File::GetNext(FX_POSITION& pos) { IFX_FileAccess* pAccess = FX_CreateDefaultFileAccess(m_wsNext.AsStringC()); m_wsNext = GetNextFile().UTF8Decode(); pos = m_wsNext.GetLength() != 0 ? pAccess : nullptr; return pAccess; }
static void send_data(char *rfile, time_t twin_start, time_t twin_end, uint32_t count, unsigned int delay, int confirm, int netflow_version) { master_record_t master_record; common_record_t *flow_record; nffile_t *nffile; int i, done, ret, again; uint32_t numflows, cnt; #ifdef COMPAT15 int v1_map_done = 0; #endif // Get the first file handle nffile = GetNextFile(NULL, twin_start, twin_end); if ( !nffile ) { LogError("GetNextFile() error in %s line %d: %s\n", __FILE__, __LINE__, strerror(errno) ); return; } if ( nffile == EMPTY_LIST ) { LogError("Empty file list. No files to process\n"); return; } peer.send_buffer = malloc(UDP_PACKET_SIZE); peer.flush = 0; if ( !peer.send_buffer ) { LogError("malloc() error in %s line %d: %s\n", __FILE__, __LINE__, strerror(errno) ); CloseFile(nffile); DisposeFile(nffile); return; } peer.buff_ptr = peer.send_buffer; peer.endp = (void *)((pointer_addr_t)peer.send_buffer + UDP_PACKET_SIZE - 1); if ( netflow_version == 5 ) Init_v5_v7_output(&peer); else Init_v9_output(&peer); numflows = 0; done = 0; // setup Filter Engine to point to master_record, as any record read from file // is expanded into this record Engine->nfrecord = (uint64_t *)&master_record; cnt = 0; while ( !done ) { // get next data block from file ret = ReadBlock(nffile); switch (ret) { case NF_CORRUPT: case NF_ERROR: if ( ret == NF_CORRUPT ) LogError("Skip corrupt data file '%s'\n",GetCurrentFilename()); else LogError("Read error in file '%s': %s\n",GetCurrentFilename(), strerror(errno) ); // fall through - get next file in chain case NF_EOF: { nffile_t *next = GetNextFile(nffile, twin_start, twin_end); if ( next == EMPTY_LIST ) { done = 1; } if ( next == NULL ) { done = 1; LogError("Unexpected end of file list\n"); } // else continue with next file continue; } break; // not really needed } #ifdef COMPAT15 if ( nffile->block_header->id == DATA_BLOCK_TYPE_1 ) { common_record_v1_t *v1_record = (common_record_v1_t *)nffile->buff_ptr; // create an extension map for v1 blocks if ( v1_map_done == 0 ) { extension_map_t *map = malloc(sizeof(extension_map_t) + 2 * sizeof(uint16_t) ); if ( ! map ) { perror("Memory allocation error"); exit(255); } map->type = ExtensionMapType; map->size = sizeof(extension_map_t) + 2 * sizeof(uint16_t); if (( map->size & 0x3 ) != 0 ) { map->size += 4 - ( map->size & 0x3 ); } map->map_id = INIT_ID; map->ex_id[0] = EX_IO_SNMP_2; map->ex_id[1] = EX_AS_2; map->ex_id[2] = 0; map->extension_size = 0; map->extension_size += extension_descriptor[EX_IO_SNMP_2].size; map->extension_size += extension_descriptor[EX_AS_2].size; Insert_Extension_Map(extension_map_list, map); v1_map_done = 1; } // convert the records to v2 for ( i=0; i < nffile->block_header->NumRecords; i++ ) { common_record_t *v2_record = (common_record_t *)v1_record; Convert_v1_to_v2((void *)v1_record); // now we have a v2 record -> use size of v2_record->size v1_record = (common_record_v1_t *)((pointer_addr_t)v1_record + v2_record->size); } nffile->block_header->id = DATA_BLOCK_TYPE_2; } #endif if ( nffile->block_header->id != DATA_BLOCK_TYPE_2 ) { LogError("Can't process block type %u. Skip block.\n", nffile->block_header->id); continue; } // cnt is the number of blocks, which survived the filter // and added to the output buffer flow_record = nffile->buff_ptr; for ( i=0; i < nffile->block_header->NumRecords; i++ ) { int match; switch ( flow_record->type ) { case CommonRecordType: { if ( extension_map_list->slot[flow_record->ext_map] == NULL ) { LogError("Corrupt data file. Missing extension map %u. Skip record.\n", flow_record->ext_map); flow_record = (common_record_t *)((pointer_addr_t)flow_record + flow_record->size); continue; } // if no filter is given, the result is always true ExpandRecord_v2( flow_record, extension_map_list->slot[flow_record->ext_map], NULL, &master_record); match = twin_start && (master_record.first < twin_start || master_record.last > twin_end) ? 0 : 1; // filter netflow record with user supplied filter if ( match ) match = (*Engine->FilterEngine)(Engine); if ( match == 0 ) { // record failed to pass all filters // increment pointer by number of bytes for netflow record flow_record = (common_record_t *)((pointer_addr_t)flow_record + flow_record->size); // go to next record continue; } // Records passed filter -> continue record processing if ( netflow_version == 5 ) again = Add_v5_output_record(&master_record, &peer); else again = Add_v9_output_record(&master_record, &peer); cnt++; numflows++; if ( peer.flush ) { ret = FlushBuffer(confirm); if ( ret < 0 ) { perror("Error sending data"); CloseFile(nffile); DisposeFile(nffile); return; } if ( delay ) { // sleep as specified usleep(delay); } cnt = 0; } if ( again ) { if ( netflow_version == 5 ) Add_v5_output_record(&master_record, &peer); else Add_v9_output_record(&master_record, &peer); cnt++; } } break; case ExtensionMapType: { extension_map_t *map = (extension_map_t *)flow_record; if ( Insert_Extension_Map(extension_map_list, map) ) { // flush new map } // else map already known and flushed } break; case ExporterRecordType: case SamplerRecordype: case ExporterInfoRecordType: case ExporterStatRecordType: case SamplerInfoRecordype: // Silently skip exporter/sampler records break; default: { LogError("Skip unknown record type %i\n", flow_record->type); } } // Advance pointer by number of bytes for netflow record flow_record = (common_record_t *)((pointer_addr_t)flow_record + flow_record->size); } } // while // flush still remaining records if ( cnt ) { ret = FlushBuffer(confirm); if ( ret < 0 ) { perror("Error sending data"); } } // if cnt if (nffile) { CloseFile(nffile); DisposeFile(nffile); } close(peer.sockfd); return; } // End of send_data
void CUploadsCtrl::OnPaint() { CRect rcClient, rcItem; CPaintDC dc( this ); CSingleLock pTransfersLock( &Transfers.m_pSection, FALSE ); if ( ! pTransfersLock.Lock( 250 ) ) return; CSingleLock pUploadQueuesLock( &UploadQueues.m_pSection, FALSE ); if ( ! pUploadQueuesLock.Lock( 250 ) ) return; if ( Settings.General.LanguageRTL ) dc.SetTextAlign( TA_RTLREADING ); GetClientRect( &rcClient ); rcClient.top += HEADER_HEIGHT; rcItem.CopyRect( &rcClient ); rcItem.left -= GetScrollPos( SB_HORZ ); rcItem.bottom = rcItem.top + ITEM_HEIGHT; int nScroll = GetScrollPos( SB_VERT ); int nIndex = 0; CFont* pfOld = (CFont*)dc.SelectObject( &CoolInterface.m_fntNormal ); BOOL bFocus = ( GetFocus() == this ); for ( POSITION posQueue = GetQueueIterator() ; posQueue && rcItem.top < rcClient.bottom ; ) { CUploadQueue* pQueue = GetNextQueue( posQueue ); POSITION posFile = GetFileIterator( pQueue ); if ( posFile == NULL ) continue; if ( nScroll > 0 ) { nScroll --; } else { if ( rcItem.bottom > rcClient.top ) PaintQueue( dc, rcItem, pQueue, bFocus && ( m_nFocus == nIndex ) ); rcItem.OffsetRect( 0, ITEM_HEIGHT ); } nIndex ++; if ( ! pQueue->m_bExpanded ) continue; while ( posFile && rcItem.top < rcClient.bottom ) { int nPosition; CUploadFile* pFile = GetNextFile( pQueue, posFile, &nPosition ); if ( pFile == NULL ) continue; if ( nScroll > 0 ) { nScroll --; } else { if ( rcItem.bottom > rcClient.top ) PaintFile( dc, rcItem, pQueue, pFile, nPosition, bFocus && ( m_nFocus == nIndex ) ); rcItem.OffsetRect( 0, ITEM_HEIGHT ); } nIndex ++; } } pUploadQueuesLock.Unlock(); pTransfersLock.Unlock(); dc.SelectObject( pfOld ); rcClient.top = rcItem.top; if ( rcClient.top < rcClient.bottom ) dc.FillSolidRect( &rcClient, CoolInterface.m_crWindow ); }