bool FileSystem::CopyFile(const char* srcFilename, const char* dstFilename) { DiskFile infile; if (!infile.Open(srcFilename, DiskFile::omRead)) { return false; } DiskFile outfile; if (!outfile.Open(dstFilename, DiskFile::omWrite)) { return false; } CharBuffer buffer(1024 * 50); int cnt = buffer.Size(); while (cnt == buffer.Size()) { cnt = (int)infile.Read(buffer, buffer.Size()); outfile.Write(buffer, cnt); } infile.Close(); outfile.Close(); return true; }
bool RarVolume::Read() { debug("Checking file %s", *m_filename); DiskFile file; if (!file.Open(m_filename, DiskFile::omRead)) { return false; } m_version = DetectRarVersion(file); file.Seek(0); bool ok = false; switch (m_version) { case 3: ok = ReadRar3Volume(file); break; case 5: ok = ReadRar5Volume(file); break; } file.Close(); DecryptFree(); LogDebugInfo(); return ok; }
bool FileSystem::LoadFileIntoBuffer(const char* filename, CharBuffer& buffer, bool addTrailingNull) { DiskFile file; if (!file.Open(filename, DiskFile::omRead)) { return false; } // obtain file size. file.Seek(0, DiskFile::soEnd); int size = (int)file.Position(); file.Seek(0); // allocate memory to contain the whole file. buffer.Reserve(size + (addTrailingNull ? 1 : 0)); // copy the file into the buffer. file.Read(buffer, size); file.Close(); if (addTrailingNull) { buffer[size] = 0; } return true; }
bool Par1Repairer::RemoveParFiles(void) { if (noiselevel > CommandLine::nlSilent && parlist.size() > 0) { cout << endl << "Purge par files." << endl; } for (list<string>::const_iterator s=parlist.begin(); s!=parlist.end(); ++s) { DiskFile *diskfile = new DiskFile; if (diskfile->Open(*s)) { if (noiselevel > CommandLine::nlSilent) { string name; string path; DiskFile::SplitFilename((*s), path, name); cout << "Remove \"" << name << "\"." << endl; } if (diskfile->IsOpen()) diskfile->Close(); diskfile->Delete(); } delete diskfile; } return true; }
bool Copy(File& srcFile, const char* destFilename) { // Operate in 16k buffers. const DWORD BUFFER_SIZE = 16 * 1024; DWORD fileSize = (DWORD)srcFile.GetLength(); // See if the destination file exists. DiskFile destFile; if (!destFile.Open(destFilename, File::MODE_CREATE | File::MODE_WRITEONLY)) return false; // Allocate the buffer space. BYTE* buffer = (BYTE*)_alloca(BUFFER_SIZE); // Keep copying until there is no more file left to copy. while (fileSize > 0) { // Copy the minimum of BUFFER_SIZE or the fileSize. DWORD readSize = min(BUFFER_SIZE, fileSize); srcFile.Read(buffer, readSize); destFile.Write(buffer, readSize); fileSize -= readSize; } // Close the destination virtual file. destFile.Close(); return true; }
// Delete all of the partly reconstructed files bool Par1Repairer::DeleteIncompleteTargetFiles(void) { list<Par1RepairerSourceFile*>::iterator sf = verifylist.begin(); // Iterate through each file in the verification list while (sf != verifylist.end()) { Par1RepairerSourceFile *sourcefile = *sf; if (sourcefile->GetTargetExists()) { DiskFile *targetfile = sourcefile->GetTargetFile(); // Close and delete the file if (targetfile->IsOpen()) targetfile->Close(); targetfile->Delete(); // Forget the file diskfilemap.Remove(targetfile); delete targetfile; // There is no target file sourcefile->SetTargetExists(false); sourcefile->SetTargetFile(0); } ++sf; } return true; }
bool Copy(File& srcFile, const char* destFilename) { // Operate in 16k buffers. const uint32_t BUFFER_SIZE = 16 * 1024; uint32_t fileSize = (uint32_t)srcFile.GetLength(); if (!Misc::PathCreate(destFilename)) return false; // See if the destination file exists. DiskFile destFile; if (!destFile.Open(destFilename, File::MODE_CREATE | File::MODE_WRITEONLY)) return false; // Allocate the buffer space. BYTE* buffer = (BYTE*)alloca(BUFFER_SIZE); // Keep copying until there is no more file left to copy. while (fileSize > 0) { // Copy the minimum of BUFFER_SIZE or the fileSize. uint32_t readSize = BUFFER_SIZE < fileSize ? BUFFER_SIZE : fileSize; if (srcFile.Read(buffer, readSize) != readSize) return false; destFile.Write(buffer, readSize); fileSize -= readSize; } // Close the destination virtual file. destFile.Close(); return true; }
// Scan any extra files specified on the command line bool Par1Repairer::VerifyExtraFiles(const list<CommandLine::ExtraFile> &extrafiles) { for (ExtraFileIterator i=extrafiles.begin(); i!=extrafiles.end() && completefilecount<sourcefiles.size(); ++i) { string filename = i->FileName(); bool skip = false; // Find the file extension string::size_type where = filename.find_last_of('.'); if (where != string::npos) { string tail = filename.substr(where+1); // Check the the file extension is the correct form if ((tail[0] == 'P' || tail[0] == 'p') && ( ((tail[1] == 'A' || tail[1] == 'a') && (tail[2] == 'R' || tail[2] == 'r')) || (isdigit(tail[1]) && isdigit(tail[2])) )) { skip = true; } } if (!skip) { filename = DiskFile::GetCanonicalPathname(filename); // Has this file already been dealt with if (diskfilemap.Find(filename) == 0) { DiskFile *diskfile = new DiskFile; // Does the file exist if (!diskfile->Open(filename)) { delete diskfile; continue; } // Remember that we have processed this file bool success = diskfilemap.Insert(diskfile); assert(success); // Do the actual verification VerifyDataFile(diskfile, 0); // Ignore errors // We have finished with the file for now diskfile->Close(); // Find out how much data we have found UpdateVerificationResults(); } } } return true; }
// Verify that all of the reconstructed target files are now correct bool Par1Repairer::VerifyTargetFiles(void) { bool finalresult = true; // Verify the target files in alphabetical order // sort(verifylist.begin(), verifylist.end(), SortSourceFilesByFileName); // Iterate through each file in the verification list for (list<Par1RepairerSourceFile*>::iterator sf = verifylist.begin(); sf != verifylist.end(); ++sf) { Par1RepairerSourceFile *sourcefile = *sf; DiskFile *targetfile = sourcefile->GetTargetFile(); // Close the file if (targetfile->IsOpen()) targetfile->Close(); // Say we don't have a complete version of the file sourcefile->SetCompleteFile(0); // Re-open the target file if (!targetfile->Open()) { finalresult = false; continue; } // Verify the file again if (!VerifyDataFile(targetfile, sourcefile)) finalresult = false; // Close the file again targetfile->Close(); // Find out how much data we have found UpdateVerificationResults(); } return finalresult; }
bool FileSystem::SaveBufferIntoFile(const char* filename, const char* buffer, int bufLen) { DiskFile file; if (!file.Open(filename, DiskFile::omWrite)) { return false; } int writtenBytes = (int)file.Write(buffer, bufLen); file.Close(); return writtenBytes == bufLen; }
void ParRenamer::CheckRegularFile(const char* destDir, const char* filename) { debug("Computing hash for %s", filename); DiskFile file; if (!file.Open(filename, DiskFile::omRead)) { PrintMessage(Message::mkError, "Could not open file %s", filename); return; } // load first 16K of the file into buffer static const int blockSize = 16*1024; CharBuffer buffer(blockSize); int readBytes = (int)file.Read(buffer, buffer.Size()); if (readBytes != buffer.Size() && file.Error()) { PrintMessage(Message::mkError, "Could not read file %s", filename); return; } file.Close(); Par2::MD5Hash hash16k; Par2::MD5Context context; context.Update(buffer, readBytes); context.Final(hash16k); debug("file: %s; hash16k: %s", FileSystem::BaseFileName(filename), hash16k.print().c_str()); for (FileHash& fileHash : m_fileHashList) { if (!strcmp(fileHash.GetHash(), hash16k.print().c_str())) { debug("Found correct filename: %s", fileHash.GetFilename()); fileHash.SetFileExists(true); BString<1024> dstFilename("%s%c%s", destDir, PATH_SEPARATOR, fileHash.GetFilename()); if (!FileSystem::FileExists(dstFilename) && !IsSplittedFragment(filename, fileHash.GetFilename())) { RenameFile(filename, dstFilename); } break; } } }
void Log::Filelog(const char* msg, ...) { if (m_logFilename.Empty()) { return; } char tmp2[1024]; va_list ap; va_start(ap, msg); vsnprintf(tmp2, 1024, msg, ap); tmp2[1024-1] = '\0'; va_end(ap); time_t rawtime = Util::CurrentTime() + g_Options->GetTimeCorrection(); char time[50]; Util::FormatTime(rawtime, time, 50); if ((int)rawtime/86400 != (int)m_lastWritten/86400 && g_Options->GetWriteLog() == Options::wlRotate) { RotateLog(); } m_lastWritten = rawtime; DiskFile file; if (file.Open(m_logFilename, DiskFile::omAppend)) { #ifdef WIN32 uint64 processId = GetCurrentProcessId(); uint64 threadId = GetCurrentThreadId(); #else uint64 processId = (uint64)getpid(); uint64 threadId = (uint64)pthread_self(); #endif #ifdef DEBUG file.Print("%s\t%llu\t%llu\t%s%s", time, processId, threadId, tmp2, LINE_ENDING); #else file.Print("%s\t%s%s", time, tmp2, LINE_ENDING); #endif file.Close(); } else { perror(m_logFilename); } }
void ParRenamer::CheckParFile(const char* destDir, const char* filename) { debug("Checking par2-header for %s", filename); DiskFile file; if (!file.Open(filename, DiskFile::omRead)) { PrintMessage(Message::mkError, "Could not open file %s", filename); return; } // load par2-header Par2::PACKET_HEADER header; int readBytes = (int)file.Read(&header, sizeof(header)); if (readBytes != sizeof(header) && file.Error()) { PrintMessage(Message::mkError, "Could not read file %s", filename); return; } file.Close(); // Check the packet header if (Par2::packet_magic != header.magic || // not par2-file sizeof(Par2::PACKET_HEADER) > header.length || // packet length is too small 0 != (header.length & 3) || // packet length is not a multiple of 4 FileSystem::FileSize(filename) < (int)header.length) // packet would extend beyond the end of the file { // not par2-file or damaged header, ignoring the file return; } BString<100> setId = header.setid.print().c_str(); for (char* p = setId; *p; p++) *p = tolower(*p); // convert string to lowercase debug("Storing: %s; setid: %s", FileSystem::BaseFileName(filename), *setId); m_parInfoList.emplace_back(filename, setId); }
void WinConsole::SetupConfigFile() { // create new config-file from config template char commonAppDataPath[MAX_PATH]; SHGetFolderPath(nullptr, CSIDL_COMMON_APPDATA, nullptr, 0, commonAppDataPath); BString<1024> filename("%s\\NZBGet\\nzbget.conf", commonAppDataPath); BString<1024> appDataPath("%s\\NZBGet", commonAppDataPath); FileSystem::CreateDirectory(appDataPath); BString<1024> confTemplateFilename("%s\\nzbget.conf.template", g_Options->GetAppDir()); CopyFile(confTemplateFilename, filename, FALSE); // set MainDir in the config-file int size = 0; CharBuffer config; if (FileSystem::LoadFileIntoBuffer(filename, config, true)) { const char* SIGNATURE = "MainDir=${AppDir}\\downloads"; char* p = strstr(config, SIGNATURE); if (p) { DiskFile outfile; if (outfile.Open(filename, DiskFile::omWrite)) { outfile.Write(config, p - config); outfile.Write("MainDir=", 8); outfile.Write(appDataPath, strlen(appDataPath)); outfile.Write(p + strlen(SIGNATURE), config.Size() - 1 - (p + strlen(SIGNATURE) - config) - 1); outfile.Close(); } } } // create default destination directory (which is not created on start automatically) BString<1024> completeDir("%s\\NZBGet\\complete", commonAppDataPath); FileSystem::CreateDirectory(completeDir); }
void ArticleWriter::CompleteFileParts() { debug("Completing file parts"); debug("ArticleFilename: %s", m_fileInfo->GetFilename()); bool directWrite = (g_Options->GetDirectWrite() || m_fileInfo->GetForceDirectWrite()) && m_fileInfo->GetOutputInitialized(); BString<1024> nzbName; BString<1024> nzbDestDir; BString<1024> filename; { GuardedDownloadQueue guard = DownloadQueue::Guard(); nzbName = m_fileInfo->GetNzbInfo()->GetName(); nzbDestDir = m_fileInfo->GetNzbInfo()->GetDestDir(); filename = m_fileInfo->GetFilename(); } BString<1024> infoFilename("%s%c%s", *nzbName, PATH_SEPARATOR, *filename); bool cached = m_fileInfo->GetCachedArticles() > 0; if (g_Options->GetRawArticle()) { detail("Moving articles for %s", *infoFilename); } else if (directWrite && cached) { detail("Writing articles for %s", *infoFilename); } else if (directWrite) { detail("Checking articles for %s", *infoFilename); } else { detail("Joining articles for %s", *infoFilename); } // Ensure the DstDir is created CString errmsg; if (!FileSystem::ForceDirectories(nzbDestDir, errmsg)) { m_fileInfo->GetNzbInfo()->PrintMessage(Message::mkError, "Could not create directory %s: %s", *nzbDestDir, *errmsg); return; } CString ofn; if (m_fileInfo->GetForceDirectWrite()) { ofn.Format("%s%c%s", *nzbDestDir, PATH_SEPARATOR, *filename); } else { ofn = FileSystem::MakeUniqueFilename(nzbDestDir, *filename); } DiskFile outfile; BString<1024> tmpdestfile("%s.tmp", *ofn); if (!g_Options->GetRawArticle() && !directWrite) { FileSystem::DeleteFile(tmpdestfile); if (!outfile.Open(tmpdestfile, DiskFile::omWrite)) { m_fileInfo->GetNzbInfo()->PrintMessage(Message::mkError, "Could not create file %s: %s", *tmpdestfile, *FileSystem::GetLastErrorMessage()); return; } } else if (directWrite && cached) { if (!outfile.Open(m_outputFilename, DiskFile::omReadWrite)) { m_fileInfo->GetNzbInfo()->PrintMessage(Message::mkError, "Could not open file %s: %s", *m_outputFilename, *FileSystem::GetLastErrorMessage()); return; } tmpdestfile = *m_outputFilename; } else if (g_Options->GetRawArticle()) { FileSystem::DeleteFile(tmpdestfile); if (!FileSystem::CreateDirectory(ofn)) { m_fileInfo->GetNzbInfo()->PrintMessage(Message::mkError, "Could not create directory %s: %s", *ofn, *FileSystem::GetLastErrorMessage()); return; } } if (outfile.Active()) { SetWriteBuffer(outfile, 0); } uint32 crc = 0; { std::unique_ptr<ArticleCache::FlushGuard> flushGuard; if (cached) { flushGuard = std::make_unique<ArticleCache::FlushGuard>(g_ArticleCache->GuardFlush()); } CharBuffer buffer; bool firstArticle = true; if (!g_Options->GetRawArticle() && !directWrite) { buffer.Reserve(1024 * 64); } for (ArticleInfo* pa : m_fileInfo->GetArticles()) { if (pa->GetStatus() != ArticleInfo::aiFinished) { continue; } if (!g_Options->GetRawArticle() && !directWrite && pa->GetSegmentOffset() > -1 && pa->GetSegmentOffset() > outfile.Position() && outfile.Position() > -1) { memset(buffer, 0, buffer.Size()); if (!g_Options->GetSkipWrite()) { while (pa->GetSegmentOffset() > outfile.Position() && outfile.Position() > -1 && outfile.Write(buffer, std::min((int)(pa->GetSegmentOffset() - outfile.Position()), buffer.Size()))); } } if (pa->GetSegmentContent()) { if (!g_Options->GetSkipWrite()) { outfile.Seek(pa->GetSegmentOffset()); outfile.Write(pa->GetSegmentContent(), pa->GetSegmentSize()); } pa->DiscardSegment(); } else if (!g_Options->GetRawArticle() && !directWrite && !g_Options->GetSkipWrite()) { DiskFile infile; if (pa->GetResultFilename() && infile.Open(pa->GetResultFilename(), DiskFile::omRead)) { int cnt = buffer.Size(); while (cnt == buffer.Size()) { cnt = (int)infile.Read(buffer, buffer.Size()); outfile.Write(buffer, cnt); } infile.Close(); } else { m_fileInfo->SetFailedArticles(m_fileInfo->GetFailedArticles() + 1); m_fileInfo->SetSuccessArticles(m_fileInfo->GetSuccessArticles() - 1); m_fileInfo->GetNzbInfo()->PrintMessage(Message::mkError, "Could not find file %s for %s [%i/%i]", pa->GetResultFilename(), *infoFilename, pa->GetPartNumber(), (int)m_fileInfo->GetArticles()->size()); } } else if (g_Options->GetRawArticle()) { BString<1024> dstFileName("%s%c%03i", *ofn, PATH_SEPARATOR, pa->GetPartNumber()); if (!FileSystem::MoveFile(pa->GetResultFilename(), dstFileName)) { m_fileInfo->GetNzbInfo()->PrintMessage(Message::mkError, "Could not move file %s to %s: %s", pa->GetResultFilename(), *dstFileName, *FileSystem::GetLastErrorMessage()); } } if (m_format == Decoder::efYenc) { crc = firstArticle ? pa->GetCrc() : Crc32::Combine(crc, pa->GetCrc(), pa->GetSegmentSize()); firstArticle = false; } } buffer.Clear(); } if (outfile.Active()) { outfile.Close(); if (!directWrite && !FileSystem::MoveFile(tmpdestfile, ofn)) { m_fileInfo->GetNzbInfo()->PrintMessage(Message::mkError, "Could not move file %s to %s: %s", *tmpdestfile, *ofn, *FileSystem::GetLastErrorMessage()); } } if (directWrite) { if (!FileSystem::SameFilename(m_outputFilename, ofn) && !FileSystem::MoveFile(m_outputFilename, ofn)) { m_fileInfo->GetNzbInfo()->PrintMessage(Message::mkError, "Could not move file %s to %s: %s", *m_outputFilename, *ofn, *FileSystem::GetLastErrorMessage()); } // if destination directory was changed delete the old directory (if empty) int len = strlen(nzbDestDir); if (!(!strncmp(nzbDestDir, m_outputFilename, len) && (m_outputFilename[len] == PATH_SEPARATOR || m_outputFilename[len] == ALT_PATH_SEPARATOR))) { debug("Checking old dir for: %s", *m_outputFilename); BString<1024> oldDestDir; oldDestDir.Set(m_outputFilename, (int)(FileSystem::BaseFileName(m_outputFilename) - m_outputFilename)); if (FileSystem::DirEmpty(oldDestDir)) { debug("Deleting old dir: %s", *oldDestDir); FileSystem::RemoveDirectory(oldDestDir); } } } if (!directWrite) { for (ArticleInfo* pa : m_fileInfo->GetArticles()) { if (pa->GetResultFilename()) { FileSystem::DeleteFile(pa->GetResultFilename()); } } } if (m_fileInfo->GetTotalArticles() == m_fileInfo->GetSuccessArticles()) { m_fileInfo->GetNzbInfo()->PrintMessage(Message::mkInfo, "Successfully downloaded %s", *infoFilename); } else if (m_fileInfo->GetMissedArticles() + m_fileInfo->GetFailedArticles() > 0) { m_fileInfo->GetNzbInfo()->PrintMessage(Message::mkWarning, "%i of %i article downloads failed for \"%s\"", m_fileInfo->GetMissedArticles() + m_fileInfo->GetFailedArticles(), m_fileInfo->GetTotalArticles(), *infoFilename); } else { m_fileInfo->GetNzbInfo()->PrintMessage(Message::mkInfo, "Partially downloaded %s", *infoFilename); } { GuardedDownloadQueue guard = DownloadQueue::Guard(); m_fileInfo->SetCrc(crc); m_fileInfo->SetOutputFilename(ofn); if (strcmp(m_fileInfo->GetFilename(), filename)) { // file was renamed during completion, need to move the file ofn = FileSystem::MakeUniqueFilename(nzbDestDir, m_fileInfo->GetFilename()); if (!FileSystem::MoveFile(m_fileInfo->GetOutputFilename(), ofn)) { m_fileInfo->GetNzbInfo()->PrintMessage(Message::mkError, "Could not rename file %s to %s: %s", m_fileInfo->GetOutputFilename(), *ofn, *FileSystem::GetLastErrorMessage()); } m_fileInfo->SetOutputFilename(ofn); } if (strcmp(m_fileInfo->GetNzbInfo()->GetDestDir(), nzbDestDir)) { // destination directory was changed during completion, need to move the file MoveCompletedFiles(m_fileInfo->GetNzbInfo(), nzbDestDir); } } }
// Read source data, process it through the RS matrix and write it to disk. bool Par2Creator::ProcessData(u64 blockoffset, size_t blocklength) { // Clear the output buffer memset(outputbuffer, 0, chunksize * recoveryblockcount); // If we have defered computation of the file hash and block crc and hashes // sourcefile and sourceindex will be used to update them during // the main recovery block computation vector<Par2CreatorSourceFile*>::iterator sourcefile = sourcefiles.begin(); u32 sourceindex = 0; vector<DataBlock>::iterator sourceblock; u32 inputblock; DiskFile *lastopenfile = NULL; // For each input block for ((sourceblock=sourceblocks.begin()),(inputblock=0); sourceblock != sourceblocks.end(); ++sourceblock, ++inputblock) { // Are we reading from a new file? if (lastopenfile != (*sourceblock).GetDiskFile()) { // Close the last file if (lastopenfile != NULL) { lastopenfile->Close(); } // Open the new file lastopenfile = (*sourceblock).GetDiskFile(); if (!lastopenfile->Open()) { return false; } } // Read data from the current input block if (!sourceblock->ReadData(blockoffset, blocklength, inputbuffer)) return false; if (deferhashcomputation) { assert(blockoffset == 0 && blocklength == blocksize); assert(sourcefile != sourcefiles.end()); (*sourcefile)->UpdateHashes(sourceindex, inputbuffer, blocklength); } // Function that does the subtask in multiple threads if appropriate. if (!this->CreateParityBlocks (blocklength, inputblock)) return false; // Work out which source file the next block belongs to if (++sourceindex >= (*sourcefile)->BlockCount()) { sourceindex = 0; ++sourcefile; } } // Close the last file if (lastopenfile != NULL) { lastopenfile->Close(); } if (noiselevel > CommandLine::nlQuiet) cout << "Writing recovery packets\r"; // For each output block for (u32 outputblock=0; outputblock<recoveryblockcount;outputblock++) { // Select the appropriate part of the output buffer char *outbuf = &((char*)outputbuffer)[chunksize * outputblock]; // Write the data to the recovery packet if (!recoverypackets[outputblock].WriteData(blockoffset, blocklength, outbuf)) return false; } if (noiselevel > CommandLine::nlQuiet) cout << "Wrote " << recoveryblockcount * blocklength << " bytes to disk" << endl; return true; }
// Attempt to verify all of the source files bool Par1Repairer::VerifySourceFiles(void) { bool finalresult = true; u32 filenumber = 0; vector<Par1RepairerSourceFile*>::iterator sourceiterator = sourcefiles.begin(); while (sourceiterator != sourcefiles.end()) { Par1RepairerSourceFile *sourcefile = *sourceiterator; string filename = sourcefile->FileName(); // Check to see if we have already used this file if (diskfilemap.Find(filename) != 0) { string path; string name; DiskFile::SplitRelativeFilename(filename, path, name); // The file has already been used! // cerr << "Source file " << filenumber+1 << " is a duplicate." << endl; // Original cerr << "Source file " << name << " is a duplicate." << endl; // Imported finalresult = false; } else { DiskFile *diskfile = new DiskFile; // Does the target file exist if (diskfile->Open(filename)) { // Yes. Record that fact. sourcefile->SetTargetExists(true); // Remember that the DiskFile is the target file sourcefile->SetTargetFile(diskfile); // Remember that we have processed this file bool success = diskfilemap.Insert(diskfile); assert(success); // Do the actual verification if (!VerifyDataFile(diskfile, sourcefile)) finalresult = false; // We have finished with the file for now diskfile->Close(); // Find out how much data we have found UpdateVerificationResults(); } else { // The file does not exist. delete diskfile; if (noiselevel > CommandLine::nlSilent) { string path; string name; DiskFile::SplitFilename(filename, path, name); cout << "Target: \"" << name << "\" - missing." << endl; } } } ++sourceiterator; ++filenumber; } return finalresult; }
bool Par1Repairer::LoadRecoveryFile(string filename) { // Skip the file if it has already been processed if (diskfilemap.Find(filename) != 0) { return true; } DiskFile *diskfile = new DiskFile; // Open the file if (!diskfile->Open(filename)) { // If we could not open the file, ignore the error and // proceed to the next file delete diskfile; return true; } if (noiselevel > CommandLine::nlSilent) { string path; string name; DiskFile::SplitFilename(filename, path, name); cout << "Loading \"" << name << "\"." << endl; } parlist.push_back(filename); bool havevolume = false; u32 volumenumber = 0; // How big is the file u64 filesize = diskfile->FileSize(); if (filesize >= sizeof(PAR1FILEHEADER)) { // Allocate a buffer to read data into size_t buffersize = (size_t)min((u64)1048576, filesize); u8 *buffer = new u8[buffersize]; do { PAR1FILEHEADER fileheader; if (!diskfile->Read(0, &fileheader, sizeof(fileheader))) break; // Is this really a PAR file? if (fileheader.magic != par1_magic) break; // Is the version number correct? if (fileheader.fileversion != 0x00010000) break; ignore16kfilehash = (fileheader.programversion == smartpar11); // Prepare to carry out MD5 Hash check of the Control Hash MD5Context context; u64 offset = offsetof(PAR1FILEHEADER, sethash); // Process until the end of the file is reached while (offset < filesize) { // How much data should we read? size_t want = (size_t)min((u64)buffersize, filesize-offset); if (!diskfile->Read(offset, buffer, want)) break; context.Update(buffer, want); offset += want; } // Did we read the whole file if (offset < filesize) break; // Compute the hash value MD5Hash hash; context.Final(hash); // Is it correct? if (hash != fileheader.controlhash) break; // Check that the volume number is ok if (fileheader.volumenumber >= 256) break; // Are there any files? if (fileheader.numberoffiles == 0 || fileheader.filelistoffset < sizeof(PAR1FILEHEADER) || fileheader.filelistsize == 0) break; // Verify that the file list and data offsets are ok if ((fileheader.filelistoffset + fileheader.filelistsize > filesize) || (fileheader.datasize && (fileheader.dataoffset < sizeof(fileheader) || fileheader.dataoffset + fileheader.datasize > filesize)) || (fileheader.datasize && ((fileheader.filelistoffset <= fileheader.dataoffset && fileheader.dataoffset < fileheader.filelistoffset+fileheader.filelistsize) || (fileheader.dataoffset <= fileheader.filelistoffset && fileheader.filelistoffset < fileheader.dataoffset + fileheader.datasize)))) break; // Check the size of the file list if (fileheader.filelistsize > 200000) break; // If we already have a copy of the file list, make sure this one has the same size if (filelist != 0 && filelistsize != fileheader.filelistsize) break; // Allocate a buffer to hold a copy of the file list unsigned char *temp = new unsigned char[(size_t)fileheader.filelistsize]; // Read the file list into the buffer if (!diskfile->Read(fileheader.filelistoffset, temp, (size_t)fileheader.filelistsize)) { delete [] temp; break; } // If we already have a copy of the file list, make sure this copy is identical if (filelist != 0) { bool match = (0 == memcmp(filelist, temp, filelistsize)); delete [] temp; if (!match) break; } else { // Prepare to scan the file list unsigned char *current = temp; size_t remaining = (size_t)fileheader.filelistsize; unsigned int fileindex = 0; // Allocate a buffer to copy each file entry into so that // all fields will be correctly aligned in memory. PAR1FILEENTRY *fileentry = (PAR1FILEENTRY*)new u64[(remaining + sizeof(u64)-1)/sizeof(u64)]; // Process until we run out of files or data while (remaining > 0 && fileindex < fileheader.numberoffiles) { // Copy fixed portion of file entry memcpy((void*)fileentry, (void*)current, sizeof(PAR1FILEENTRY)); // Is there enough data remaining if (remaining < sizeof(fileentry->entrysize) || remaining < fileentry->entrysize) break; // Check the length of the filename if (fileentry->entrysize <= sizeof(PAR1FILEENTRY)) break; // Check the file size if (blocksize < fileentry->filesize) blocksize = fileentry->filesize; // Copy whole of file entry memcpy((void*)fileentry, (void*)current, (size_t)(u64)fileentry->entrysize); // Create source file and add it to the appropriate list Par1RepairerSourceFile *sourcefile = new Par1RepairerSourceFile(fileentry, searchpath); if (fileentry->status & INPARITYVOLUME) { sourcefiles.push_back(sourcefile); } else { extrafiles.push_back(sourcefile); } remaining -= (size_t)fileentry->entrysize; current += (size_t)fileentry->entrysize; fileindex++; } delete [] (u64*)fileentry; // Did we find the correct number of files if (fileindex < fileheader.numberoffiles) { vector<Par1RepairerSourceFile*>::iterator i = sourcefiles.begin(); while (i != sourcefiles.end()) { Par1RepairerSourceFile *sourcefile = *i; delete sourcefile; ++i; } sourcefiles.clear(); i = extrafiles.begin(); while (i != extrafiles.end()) { Par1RepairerSourceFile *sourcefile = *i; delete sourcefile; ++i; } extrafiles.clear(); delete [] temp; break; } filelist = temp; filelistsize = (u32)fileheader.filelistsize; } // Is this a recovery volume? if (fileheader.volumenumber > 0) { // Make sure there is data and that it is the correct size if (fileheader.dataoffset == 0 || fileheader.datasize != blocksize) break; // What volume number is this? volumenumber = (u32)(fileheader.volumenumber - 1); // Do we already have this volume? if (recoveryblocks.find(volumenumber) == recoveryblocks.end()) { // Create a data block DataBlock *datablock = new DataBlock; datablock->SetLength(blocksize); datablock->SetLocation(diskfile, fileheader.dataoffset); // Store it in the map recoveryblocks.insert(pair<u32, DataBlock*>(volumenumber, datablock)); havevolume = true; } } } while (false); delete [] buffer; } // We have finished with the file for now diskfile->Close(); if (noiselevel > CommandLine::nlQuiet) { if (havevolume) { cout << "Loaded recovery volume " << volumenumber << endl; } else { cout << "No new recovery volumes found" << endl; } } // Remember that the file was processed bool success = diskfilemap.Insert(diskfile); assert(success); return true; }
void ArticleWriter::FlushCache() { detail("Flushing cache for %s", *m_infoName); bool directWrite = g_Options->GetDirectWrite() && m_fileInfo->GetOutputInitialized(); DiskFile outfile; bool needBufFile = false; int flushedArticles = 0; int64 flushedSize = 0; { ArticleCache::FlushGuard flushGuard = g_ArticleCache->GuardFlush(); std::vector<ArticleInfo*> cachedArticles; { Guard contentGuard = g_ArticleCache->GuardContent(); if (m_fileInfo->GetFlushLocked()) { return; } m_fileInfo->SetFlushLocked(true); cachedArticles.reserve(m_fileInfo->GetArticles()->size()); for (ArticleInfo* pa : m_fileInfo->GetArticles()) { if (pa->GetSegmentContent()) { cachedArticles.push_back(pa); } } } for (ArticleInfo* pa : cachedArticles) { if (m_fileInfo->GetDeleted() && !m_fileInfo->GetNzbInfo()->GetParking()) { // the file was deleted during flushing: stop flushing immediately break; } if (directWrite && !outfile.Active()) { if (!outfile.Open(m_fileInfo->GetOutputFilename(), DiskFile::omReadWrite)) { m_fileInfo->GetNzbInfo()->PrintMessage(Message::mkError, "Could not open file %s: %s", m_fileInfo->GetOutputFilename(), *FileSystem::GetLastErrorMessage()); // prevent multiple error messages pa->DiscardSegment(); flushedArticles++; break; } needBufFile = true; } BString<1024> destFile; if (!directWrite) { destFile.Format("%s.tmp", pa->GetResultFilename()); if (!outfile.Open(destFile, DiskFile::omWrite)) { m_fileInfo->GetNzbInfo()->PrintMessage(Message::mkError, "Could not create file %s: %s", *destFile, *FileSystem::GetLastErrorMessage()); // prevent multiple error messages pa->DiscardSegment(); flushedArticles++; break; } needBufFile = true; } if (outfile.Active() && needBufFile) { SetWriteBuffer(outfile, 0); needBufFile = false; } if (directWrite) { outfile.Seek(pa->GetSegmentOffset()); } if (!g_Options->GetSkipWrite()) { outfile.Write(pa->GetSegmentContent(), pa->GetSegmentSize()); } flushedSize += pa->GetSegmentSize(); flushedArticles++; pa->DiscardSegment(); if (!directWrite) { outfile.Close(); if (!FileSystem::MoveFile(destFile, pa->GetResultFilename())) { m_fileInfo->GetNzbInfo()->PrintMessage(Message::mkError, "Could not rename file %s to %s: %s", *destFile, pa->GetResultFilename(), *FileSystem::GetLastErrorMessage()); } } } outfile.Close(); { Guard contentGuard = g_ArticleCache->GuardContent(); m_fileInfo->SetCachedArticles(m_fileInfo->GetCachedArticles() - flushedArticles); m_fileInfo->SetFlushLocked(false); } } detail("Saved %i articles (%.2f MB) from cache into disk for %s", flushedArticles, (float)(flushedSize / 1024.0 / 1024.0), *m_infoName); }
// Read source data, process it through the RS matrix and write it to disk. bool Par2Creator::ProcessData(u64 blockoffset, size_t blocklength) { // Clear the output buffer memset(outputbuffer, 0, chunksize * recoveryblockcount); // If we have defered computation of the file hash and block crc and hashes // sourcefile and sourceindex will be used to update them during // the main recovery block computation vector<Par2CreatorSourceFile*>::iterator sourcefile = sourcefiles.begin(); u32 sourceindex = 0; vector<DataBlock>::iterator sourceblock; u32 inputblock; DiskFile *lastopenfile = NULL; // For each input block for ((sourceblock=sourceblocks.begin()),(inputblock=0); sourceblock != sourceblocks.end(); ++sourceblock, ++inputblock) { // Are we reading from a new file? if (lastopenfile != (*sourceblock).GetDiskFile()) { // Close the last file if (lastopenfile != NULL) { lastopenfile->Close(); } // Open the new file lastopenfile = (*sourceblock).GetDiskFile(); if (!lastopenfile->Open()) { return false; } } // Read data from the current input block if (!sourceblock->ReadData(blockoffset, blocklength, inputbuffer)) return false; if (deferhashcomputation) { assert(blockoffset == 0 && blocklength == blocksize); assert(sourcefile != sourcefiles.end()); (*sourcefile)->UpdateHashes(sourceindex, inputbuffer, blocklength); } // For each output block #pragma omp parallel for for (u32 outputblock=0; outputblock<recoveryblockcount; outputblock++) { // Select the appropriate part of the output buffer void *outbuf = &((u8*)outputbuffer)[chunksize * outputblock]; // Process the data through the RS matrix rs.Process(blocklength, inputblock, inputbuffer, outputblock, outbuf); if (noiselevel > CommandLine::nlQuiet) { // Update a progress indicator u32 oldfraction = (u32)(1000 * progress / totaldata); #pragma omp atomic progress += blocklength; u32 newfraction = (u32)(1000 * progress / totaldata); if (oldfraction != newfraction) { #pragma omp critical cout << "Processing: " << newfraction/10 << '.' << newfraction%10 << "%\r" << flush; } } } // Work out which source file the next block belongs to if (++sourceindex >= (*sourcefile)->BlockCount()) { sourceindex = 0; ++sourcefile; } } // Close the last file if (lastopenfile != NULL) { lastopenfile->Close(); } if (noiselevel > CommandLine::nlQuiet) cout << "Writing recovery packets\r"; // For each output block for (u32 outputblock=0; outputblock<recoveryblockcount;outputblock++) { // Select the appropriate part of the output buffer char *outbuf = &((char*)outputbuffer)[chunksize * outputblock]; // Write the data to the recovery packet if (!recoverypackets[outputblock].WriteData(blockoffset, blocklength, outbuf)) return false; } if (noiselevel > CommandLine::nlQuiet) cout << "Wrote " << recoveryblockcount * blocklength << " bytes to disk" << endl; return true; }