Пример #1
0
void NntpProcessor::SendSegment()
{
	detail("[%i] Sending segment %s (%i=%lli:%i)", m_id, *m_filename, m_part, (long long)m_offset, m_size);

	if (m_speed > 0)
	{
		m_start = Util::GetCurrentTicks();
	}

	BString<1024> fullFilename("%s/%s", m_dataDir, *m_filename);
	BString<1024> cacheFileDir("%s/%s", m_cacheDir, *m_filename);
	BString<1024> cacheFileName("%i=%lli-%i", m_part, (long long)m_offset, m_size);
	BString<1024> cacheFullFilename("%s/%s", *cacheFileDir, *cacheFileName);
	BString<1024> cacheKey("%s/%s", *m_filename, *cacheFileName);

	const char* cachedData = nullptr;
	int cachedSize;
	if (m_cache)
	{
		m_cache->Find(cacheKey, cachedData, cachedSize);
	}

	DiskFile cacheFile;
	bool readCache = !cachedData && m_cacheDir && cacheFile.Open(cacheFullFilename, DiskFile::omRead);
	bool writeCache = !cachedData && m_cacheDir && !readCache;
	StringBuilder cacheMem;
	if (m_cache && !cachedData)
	{
		cacheMem.Reserve((int)(m_size * 1.1));
	}

	CString errmsg;
	if (writeCache && !FileSystem::ForceDirectories(cacheFileDir, errmsg))
	{
		error("Could not create directory %s: %s", *cacheFileDir, *errmsg);
	}

	if (writeCache && !cacheFile.Open(cacheFullFilename, DiskFile::omWrite))
	{
		error("Could not create file %s: %s", *cacheFullFilename, *FileSystem::GetLastErrorMessage());
	}

	if (!cachedData && !readCache && !FileSystem::FileExists(fullFilename))
	{
		m_connection->WriteLine(CString::FormatStr("430 Article not found\r\n"));
		return;
	}

	YEncoder encoder(fullFilename, m_part, m_offset, m_size, 
		[proc = this, writeCache, &cacheFile, &cacheMem](const char* buf, int size)
		{
			if (proc->m_cache)
			{
				cacheMem.Append(buf);
			}
			if (writeCache)
			{
				cacheFile.Write(buf, size);
			}
			proc->SendData(buf, size);
		});

	if (!cachedData && !readCache && !encoder.OpenFile(errmsg))
	{
		m_connection->WriteLine(CString::FormatStr("403 %s\r\n", *errmsg));
		return;
	}

	m_connection->WriteLine(CString::FormatStr("%i, 0 %s\r\n", m_sendHeaders ? 222 : 220, m_messageid));
	if (m_sendHeaders)
	{
		m_connection->WriteLine(CString::FormatStr("Message-ID: %s\r\n", m_messageid));
		m_connection->WriteLine(CString::FormatStr("Subject: \"%s\"\r\n", FileSystem::BaseFileName(m_filename)));
		m_connection->WriteLine("\r\n");
	}

	if (cachedData)
	{
		SendData(cachedData, cachedSize);
	}
	else if (readCache)
	{
		cacheFile.Seek(0, DiskFile::soEnd);
		int size = (int)cacheFile.Position();
		CharBuffer buf(size);
		cacheFile.Seek(0);
		if (cacheFile.Read((char*)buf, size) != size)
		{
			error("Could not read file %s: %s", *cacheFullFilename, *FileSystem::GetLastErrorMessage());
		}
		if (m_cache)
		{
			cacheMem.Append(buf, size);
		}
		SendData(buf, size);
	}
	else
	{
		encoder.WriteSegment();
	}

	if (!cachedData && cacheMem.Length() > 0)
	{
		m_cache->Append(cacheKey, cacheMem, cacheMem.Length());
	}

	m_connection->WriteLine(".\r\n");
}
Пример #2
0
// Read source data, process it through the RS matrix and write it to disk.
bool Par2Creator::ProcessData(u64 blockoffset, size_t blocklength)
{
  // Clear the output buffer
  memset(outputbuffer, 0, chunksize * recoveryblockcount);

  // If we have defered computation of the file hash and block crc and hashes
  // sourcefile and sourceindex will be used to update them during
  // the main recovery block computation
  vector<Par2CreatorSourceFile*>::iterator sourcefile = sourcefiles.begin();
  u32 sourceindex = 0;

  vector<DataBlock>::iterator sourceblock;
  u32 inputblock;

  DiskFile *lastopenfile = NULL;

  // For each input block
  for ((sourceblock=sourceblocks.begin()),(inputblock=0);
       sourceblock != sourceblocks.end();
       ++sourceblock, ++inputblock)
  {
    // Are we reading from a new file?
    if (lastopenfile != (*sourceblock).GetDiskFile())
    {
      // Close the last file
      if (lastopenfile != NULL)
      {
        lastopenfile->Close();
      }

      // Open the new file
      lastopenfile = (*sourceblock).GetDiskFile();
      if (!lastopenfile->Open())
      {
        return false;
      }
    }

    // Read data from the current input block
    if (!sourceblock->ReadData(blockoffset, blocklength, inputbuffer))
      return false;

    if (deferhashcomputation)
    {
      assert(blockoffset == 0 && blocklength == blocksize);
      assert(sourcefile != sourcefiles.end());

      (*sourcefile)->UpdateHashes(sourceindex, inputbuffer, blocklength);
    }

	// Function that does the subtask in multiple threads if appropriate.
	if (!this->CreateParityBlocks (blocklength, inputblock))
		return false;
	
    // Work out which source file the next block belongs to
    if (++sourceindex >= (*sourcefile)->BlockCount())
    {
      sourceindex = 0;
      ++sourcefile;
    }
  }

  // Close the last file
  if (lastopenfile != NULL)
  {
    lastopenfile->Close();
  }

  if (noiselevel > CommandLine::nlQuiet)
    cout << "Writing recovery packets\r";

  // For each output block
  for (u32 outputblock=0; outputblock<recoveryblockcount;outputblock++)
  {
    // Select the appropriate part of the output buffer
    char *outbuf = &((char*)outputbuffer)[chunksize * outputblock];

    // Write the data to the recovery packet
    if (!recoverypackets[outputblock].WriteData(blockoffset, blocklength, outbuf))
      return false;
  }

  if (noiselevel > CommandLine::nlQuiet)
    cout << "Wrote " << recoveryblockcount * blocklength << " bytes to disk" << endl;

  return true;
}
Пример #3
0
// Attempt to verify all of the source files
bool Par1Repairer::VerifySourceFiles(void) {
	bool finalresult = true;

	u32 filenumber = 0;
	vector<Par1RepairerSourceFile*>::iterator sourceiterator = sourcefiles.begin();
	while (sourceiterator != sourcefiles.end()) {
		Par1RepairerSourceFile *sourcefile = *sourceiterator;

		string filename = sourcefile->FileName();

		// Check to see if we have already used this file
		if (diskfilemap.Find(filename) != 0) {
			string path;
			string name;
			DiskFile::SplitRelativeFilename(filename, path, name);

			// The file has already been used!
//			cerr << "Source file " << filenumber+1 << " is a duplicate." << endl; // Original
			cerr << "Source file " << name         << " is a duplicate." << endl; // Imported
			finalresult = false;
			} else {

			DiskFile *diskfile = new DiskFile;

			// Does the target file exist
			if (diskfile->Open(filename)) {
				// Yes. Record that fact.
				sourcefile->SetTargetExists(true);

				// Remember that the DiskFile is the target file
				sourcefile->SetTargetFile(diskfile);

				// Remember that we have processed this file
				bool success = diskfilemap.Insert(diskfile);
				assert(success);

				// Do the actual verification
				if (!VerifyDataFile(diskfile, sourcefile))
					finalresult = false;

				// We have finished with the file for now
				diskfile->Close();

				// Find out how much data we have found
				UpdateVerificationResults();
			} else {
				// The file does not exist.
				delete diskfile;

				if (noiselevel > CommandLine::nlSilent) {
					string path;
					string name;
					DiskFile::SplitFilename(filename, path, name);

					cout << "Target: \"" << name << "\" - missing." << endl;
				}
			}
		}
		++sourceiterator;
		++filenumber;
	}

	return finalresult;
}
Пример #4
0
bool Par1Repairer::LoadRecoveryFile(string filename) {
	// Skip the file if it has already been processed
	if (diskfilemap.Find(filename) != 0) {
		return true;
	}

	DiskFile *diskfile = new DiskFile;

	// Open the file
	if (!diskfile->Open(filename)) {
		// If we could not open the file, ignore the error and
		// proceed to the next file
		delete diskfile;
		return true;
	}

	if (noiselevel > CommandLine::nlSilent) {
		string path;
		string name;
		DiskFile::SplitFilename(filename, path, name);
		cout << "Loading \"" << name << "\"." << endl;
	}

	parlist.push_back(filename);

	bool havevolume = false;
	u32 volumenumber = 0;

	// How big is the file
	u64 filesize = diskfile->FileSize();
	if (filesize >= sizeof(PAR1FILEHEADER)) {
		// Allocate a buffer to read data into
		size_t buffersize = (size_t)min((u64)1048576, filesize);
		u8 *buffer = new u8[buffersize];

		do {
			PAR1FILEHEADER fileheader;
			if (!diskfile->Read(0, &fileheader, sizeof(fileheader)))
				break;

			// Is this really a PAR file?
			if (fileheader.magic != par1_magic)
				break;

			// Is the version number correct?
			if (fileheader.fileversion != 0x00010000)
				break;

			ignore16kfilehash = (fileheader.programversion == smartpar11);

			// Prepare to carry out MD5 Hash check of the Control Hash
			MD5Context context;
			u64 offset = offsetof(PAR1FILEHEADER, sethash);

			// Process until the end of the file is reached
			while (offset < filesize) {
				// How much data should we read?
				size_t want = (size_t)min((u64)buffersize, filesize-offset);
				if (!diskfile->Read(offset, buffer, want))
					break;

				context.Update(buffer, want);

				offset += want;
			}

			// Did we read the whole file
			if (offset < filesize)
				break;

			// Compute the hash value
			MD5Hash hash;
			context.Final(hash);

			// Is it correct?
			if (hash != fileheader.controlhash)
				break;

			// Check that the volume number is ok
			if (fileheader.volumenumber >= 256)
				break;

			// Are there any files?
			if (fileheader.numberoffiles == 0 ||
					fileheader.filelistoffset < sizeof(PAR1FILEHEADER) ||
					fileheader.filelistsize == 0)
				break;

			// Verify that the file list and data offsets are ok
			if ((fileheader.filelistoffset + fileheader.filelistsize > filesize)
					||
					(fileheader.datasize && (fileheader.dataoffset < sizeof(fileheader) || fileheader.dataoffset + fileheader.datasize > filesize))
					||
					(fileheader.datasize && ((fileheader.filelistoffset <= fileheader.dataoffset && fileheader.dataoffset < fileheader.filelistoffset+fileheader.filelistsize) || (fileheader.dataoffset <= fileheader.filelistoffset && fileheader.filelistoffset < fileheader.dataoffset + fileheader.datasize))))
				break;

			// Check the size of the file list
			if (fileheader.filelistsize > 200000)
				break;

			// If we already have a copy of the file list, make sure this one has the same size
			if (filelist != 0 && filelistsize != fileheader.filelistsize)
				break;

			// Allocate a buffer to hold a copy of the file list
			unsigned char *temp = new unsigned char[(size_t)fileheader.filelistsize];

			// Read the file list into the buffer
			if (!diskfile->Read(fileheader.filelistoffset, temp, (size_t)fileheader.filelistsize)) {
				delete [] temp;
				break;
			}

			// If we already have a copy of the file list, make sure this copy is identical
			if (filelist != 0) {
				bool match = (0 == memcmp(filelist, temp, filelistsize));
				delete [] temp;

				if (!match)
					break;
			} else {
				// Prepare to scan the file list
				unsigned char *current = temp;
				size_t remaining = (size_t)fileheader.filelistsize;
				unsigned int fileindex = 0;

				// Allocate a buffer to copy each file entry into so that
				// all fields will be correctly aligned in memory.
				PAR1FILEENTRY *fileentry = (PAR1FILEENTRY*)new u64[(remaining + sizeof(u64)-1)/sizeof(u64)];

				// Process until we run out of files or data
				while (remaining > 0 && fileindex < fileheader.numberoffiles) {
					// Copy fixed portion of file entry
					memcpy((void*)fileentry, (void*)current, sizeof(PAR1FILEENTRY));

					// Is there enough data remaining
					if (remaining < sizeof(fileentry->entrysize) ||
							remaining < fileentry->entrysize)
						break;

					// Check the length of the filename
					if (fileentry->entrysize <= sizeof(PAR1FILEENTRY))
						break;

					// Check the file size
					if (blocksize < fileentry->filesize)
						blocksize = fileentry->filesize;

					// Copy whole of file entry
					memcpy((void*)fileentry, (void*)current, (size_t)(u64)fileentry->entrysize);

					// Create source file and add it to the appropriate list
					Par1RepairerSourceFile *sourcefile = new Par1RepairerSourceFile(fileentry, searchpath);
					if (fileentry->status & INPARITYVOLUME) {
						sourcefiles.push_back(sourcefile);
					} else {
						extrafiles.push_back(sourcefile);
					}

					remaining -= (size_t)fileentry->entrysize;
					current += (size_t)fileentry->entrysize;

					fileindex++;
				}

				delete [] (u64*)fileentry;

				// Did we find the correct number of files
				if (fileindex < fileheader.numberoffiles) {
					vector<Par1RepairerSourceFile*>::iterator i = sourcefiles.begin();
					while (i != sourcefiles.end()) {
						Par1RepairerSourceFile *sourcefile = *i;
						delete sourcefile;
						++i;
					}
					sourcefiles.clear();

					i = extrafiles.begin();
					while (i != extrafiles.end()) {
						Par1RepairerSourceFile *sourcefile = *i;
						delete sourcefile;
						++i;
					}
					extrafiles.clear();

					delete [] temp;
					break;
				}

				filelist = temp;
				filelistsize = (u32)fileheader.filelistsize;
			}

			// Is this a recovery volume?
			if (fileheader.volumenumber > 0) {
				// Make sure there is data and that it is the correct size
				if (fileheader.dataoffset == 0 || fileheader.datasize != blocksize)
					break;

				// What volume number is this?
				volumenumber = (u32)(fileheader.volumenumber - 1);

				// Do we already have this volume?
				if (recoveryblocks.find(volumenumber) == recoveryblocks.end()) {
					// Create a data block
					DataBlock *datablock = new DataBlock;
					datablock->SetLength(blocksize);
					datablock->SetLocation(diskfile, fileheader.dataoffset);

					// Store it in the map
					recoveryblocks.insert(pair<u32, DataBlock*>(volumenumber, datablock));

					havevolume = true;
				}
			}
		} while (false);

		delete [] buffer;
	}

	// We have finished with the file for now
	diskfile->Close();

	if (noiselevel > CommandLine::nlQuiet) {
		if (havevolume) {
			cout << "Loaded recovery volume " << volumenumber << endl;
		} else {
			cout << "No new recovery volumes found" << endl;
		}
	}

	// Remember that the file was processed
	bool success = diskfilemap.Insert(diskfile);
	assert(success);

	return true;
}
Пример #5
0
// Read source data, process it through the RS matrix and write it to disk.
bool Par2Creator::ProcessData(u64 blockoffset, size_t blocklength)
{
  // Clear the output buffer
  memset(outputbuffer, 0, chunksize * recoveryblockcount);

  // If we have defered computation of the file hash and block crc and hashes
  // sourcefile and sourceindex will be used to update them during
  // the main recovery block computation
  vector<Par2CreatorSourceFile*>::iterator sourcefile = sourcefiles.begin();
  u32 sourceindex = 0;

  vector<DataBlock>::iterator sourceblock;
  u32 inputblock;

  DiskFile *lastopenfile = NULL;

  // For each input block
  for ((sourceblock=sourceblocks.begin()),(inputblock=0);
       sourceblock != sourceblocks.end();
       ++sourceblock, ++inputblock)
  {
    // Are we reading from a new file?
    if (lastopenfile != (*sourceblock).GetDiskFile())
    {
      // Close the last file
      if (lastopenfile != NULL)
      {
        lastopenfile->Close();
      }

      // Open the new file
      lastopenfile = (*sourceblock).GetDiskFile();
      if (!lastopenfile->Open())
      {
        return false;
      }
    }

    // Read data from the current input block
    if (!sourceblock->ReadData(blockoffset, blocklength, inputbuffer))
      return false;

    if (deferhashcomputation)
    {
      assert(blockoffset == 0 && blocklength == blocksize);
      assert(sourcefile != sourcefiles.end());

      (*sourcefile)->UpdateHashes(sourceindex, inputbuffer, blocklength);
    }

    // For each output block
    #pragma omp parallel for
    for (u32 outputblock=0; outputblock<recoveryblockcount; outputblock++)
    {
      // Select the appropriate part of the output buffer
      void *outbuf = &((u8*)outputbuffer)[chunksize * outputblock];

      // Process the data through the RS matrix
      rs.Process(blocklength, inputblock, inputbuffer, outputblock, outbuf);

      if (noiselevel > CommandLine::nlQuiet)
      {
        // Update a progress indicator
        u32 oldfraction = (u32)(1000 * progress / totaldata);
        #pragma omp atomic
        progress += blocklength;
        u32 newfraction = (u32)(1000 * progress / totaldata);

        if (oldfraction != newfraction)
        {
          #pragma omp critical
          cout << "Processing: " << newfraction/10 << '.' << newfraction%10 << "%\r" << flush;
        }
      }
    }

    // Work out which source file the next block belongs to
    if (++sourceindex >= (*sourcefile)->BlockCount())
    {
      sourceindex = 0;
      ++sourcefile;
    }
  }

  // Close the last file
  if (lastopenfile != NULL)
  {
    lastopenfile->Close();
  }

  if (noiselevel > CommandLine::nlQuiet)
    cout << "Writing recovery packets\r";

  // For each output block
  for (u32 outputblock=0; outputblock<recoveryblockcount;outputblock++)
  {
    // Select the appropriate part of the output buffer
    char *outbuf = &((char*)outputbuffer)[chunksize * outputblock];

    // Write the data to the recovery packet
    if (!recoverypackets[outputblock].WriteData(blockoffset, blocklength, outbuf))
      return false;
  }

  if (noiselevel > CommandLine::nlQuiet)
    cout << "Wrote " << recoveryblockcount * blocklength << " bytes to disk" << endl;

  return true;
}
Пример #6
0
void ArticleWriter::FlushCache()
{
	detail("Flushing cache for %s", *m_infoName);

	bool directWrite = g_Options->GetDirectWrite() && m_fileInfo->GetOutputInitialized();
	DiskFile outfile;
	bool needBufFile = false;
	int flushedArticles = 0;
	int64 flushedSize = 0;

	{
		ArticleCache::FlushGuard flushGuard = g_ArticleCache->GuardFlush();

		std::vector<ArticleInfo*> cachedArticles;

		{
			Guard contentGuard = g_ArticleCache->GuardContent();

			if (m_fileInfo->GetFlushLocked())
			{
				return;
			}

			m_fileInfo->SetFlushLocked(true);

			cachedArticles.reserve(m_fileInfo->GetArticles()->size());
			for (ArticleInfo* pa : m_fileInfo->GetArticles())
			{
				if (pa->GetSegmentContent())
				{
					cachedArticles.push_back(pa);
				}
			}
		}

		for (ArticleInfo* pa : cachedArticles)
		{
			if (m_fileInfo->GetDeleted() && !m_fileInfo->GetNzbInfo()->GetParking())
			{
				// the file was deleted during flushing: stop flushing immediately
				break;
			}

			if (directWrite && !outfile.Active())
			{
				if (!outfile.Open(m_fileInfo->GetOutputFilename(), DiskFile::omReadWrite))
				{
					m_fileInfo->GetNzbInfo()->PrintMessage(Message::mkError,
						"Could not open file %s: %s", m_fileInfo->GetOutputFilename(),
						*FileSystem::GetLastErrorMessage());
					// prevent multiple error messages
					pa->DiscardSegment();
					flushedArticles++;
					break;
				}
				needBufFile = true;
			}

			BString<1024> destFile;

			if (!directWrite)
			{
				destFile.Format("%s.tmp", pa->GetResultFilename());
				if (!outfile.Open(destFile, DiskFile::omWrite))
				{
					m_fileInfo->GetNzbInfo()->PrintMessage(Message::mkError,
						"Could not create file %s: %s", *destFile,
						*FileSystem::GetLastErrorMessage());
					// prevent multiple error messages
					pa->DiscardSegment();
					flushedArticles++;
					break;
				}
				needBufFile = true;
			}

			if (outfile.Active() && needBufFile)
			{
				SetWriteBuffer(outfile, 0);
				needBufFile = false;
			}

			if (directWrite)
			{
				outfile.Seek(pa->GetSegmentOffset());
			}

			if (!g_Options->GetSkipWrite())
			{
				outfile.Write(pa->GetSegmentContent(), pa->GetSegmentSize());
			}

			flushedSize += pa->GetSegmentSize();
			flushedArticles++;

			pa->DiscardSegment();

			if (!directWrite)
			{
				outfile.Close();

				if (!FileSystem::MoveFile(destFile, pa->GetResultFilename()))
				{
					m_fileInfo->GetNzbInfo()->PrintMessage(Message::mkError,
						"Could not rename file %s to %s: %s", *destFile, pa->GetResultFilename(),
						*FileSystem::GetLastErrorMessage());
				}
			}
		}

		outfile.Close();

		{
			Guard contentGuard = g_ArticleCache->GuardContent();
			m_fileInfo->SetCachedArticles(m_fileInfo->GetCachedArticles() - flushedArticles);
			m_fileInfo->SetFlushLocked(false);
		}
	}

	detail("Saved %i articles (%.2f MB) from cache into disk for %s", flushedArticles,
		(float)(flushedSize / 1024.0 / 1024.0), *m_infoName);
}
Пример #7
0
void ArticleWriter::CompleteFileParts()
{
	debug("Completing file parts");
	debug("ArticleFilename: %s", m_fileInfo->GetFilename());

	bool directWrite = (g_Options->GetDirectWrite() || m_fileInfo->GetForceDirectWrite()) && m_fileInfo->GetOutputInitialized();

	BString<1024> nzbName;
	BString<1024> nzbDestDir;
	BString<1024> filename;

	{
		GuardedDownloadQueue guard = DownloadQueue::Guard();
		nzbName = m_fileInfo->GetNzbInfo()->GetName();
		nzbDestDir = m_fileInfo->GetNzbInfo()->GetDestDir();
		filename = m_fileInfo->GetFilename();
	}

	BString<1024> infoFilename("%s%c%s", *nzbName, PATH_SEPARATOR, *filename);

	bool cached = m_fileInfo->GetCachedArticles() > 0;

	if (g_Options->GetRawArticle())
	{
		detail("Moving articles for %s", *infoFilename);
	}
	else if (directWrite && cached)
	{
		detail("Writing articles for %s", *infoFilename);
	}
	else if (directWrite)
	{
		detail("Checking articles for %s", *infoFilename);
	}
	else
	{
		detail("Joining articles for %s", *infoFilename);
	}

	// Ensure the DstDir is created
	CString errmsg;
	if (!FileSystem::ForceDirectories(nzbDestDir, errmsg))
	{
		m_fileInfo->GetNzbInfo()->PrintMessage(Message::mkError,
			"Could not create directory %s: %s", *nzbDestDir, *errmsg);
		return;
	}

	CString ofn;
	if (m_fileInfo->GetForceDirectWrite())
	{
		ofn.Format("%s%c%s", *nzbDestDir, PATH_SEPARATOR, *filename);
	}
	else
	{
		ofn = FileSystem::MakeUniqueFilename(nzbDestDir, *filename);
	}

	DiskFile outfile;
	BString<1024> tmpdestfile("%s.tmp", *ofn);

	if (!g_Options->GetRawArticle() && !directWrite)
	{
		FileSystem::DeleteFile(tmpdestfile);
		if (!outfile.Open(tmpdestfile, DiskFile::omWrite))
		{
			m_fileInfo->GetNzbInfo()->PrintMessage(Message::mkError,
				"Could not create file %s: %s", *tmpdestfile, *FileSystem::GetLastErrorMessage());
			return;
		}
	}
	else if (directWrite && cached)
	{
		if (!outfile.Open(m_outputFilename, DiskFile::omReadWrite))
		{
			m_fileInfo->GetNzbInfo()->PrintMessage(Message::mkError,
				"Could not open file %s: %s", *m_outputFilename, *FileSystem::GetLastErrorMessage());
			return;
		}
		tmpdestfile = *m_outputFilename;
	}
	else if (g_Options->GetRawArticle())
	{
		FileSystem::DeleteFile(tmpdestfile);
		if (!FileSystem::CreateDirectory(ofn))
		{
			m_fileInfo->GetNzbInfo()->PrintMessage(Message::mkError,
				"Could not create directory %s: %s", *ofn, *FileSystem::GetLastErrorMessage());
			return;
		}
	}

	if (outfile.Active())
	{
		SetWriteBuffer(outfile, 0);
	}

	uint32 crc = 0;

	{
		std::unique_ptr<ArticleCache::FlushGuard> flushGuard;
		if (cached)
		{
			flushGuard = std::make_unique<ArticleCache::FlushGuard>(g_ArticleCache->GuardFlush());
		}

		CharBuffer buffer;
		bool firstArticle = true;

		if (!g_Options->GetRawArticle() && !directWrite)
		{
			buffer.Reserve(1024 * 64);
		}

		for (ArticleInfo* pa : m_fileInfo->GetArticles())
		{
			if (pa->GetStatus() != ArticleInfo::aiFinished)
			{
				continue;
			}

			if (!g_Options->GetRawArticle() && !directWrite && pa->GetSegmentOffset() > -1 &&
				pa->GetSegmentOffset() > outfile.Position() && outfile.Position() > -1)
			{
				memset(buffer, 0, buffer.Size());
				if (!g_Options->GetSkipWrite())
				{
					while (pa->GetSegmentOffset() > outfile.Position() && outfile.Position() > -1 &&
						outfile.Write(buffer, std::min((int)(pa->GetSegmentOffset() - outfile.Position()), buffer.Size())));
				}
			}

			if (pa->GetSegmentContent())
			{
				if (!g_Options->GetSkipWrite())
				{
					outfile.Seek(pa->GetSegmentOffset());
					outfile.Write(pa->GetSegmentContent(), pa->GetSegmentSize());
				}
				pa->DiscardSegment();
			}
			else if (!g_Options->GetRawArticle() && !directWrite && !g_Options->GetSkipWrite())
			{
				DiskFile infile;
				if (pa->GetResultFilename() && infile.Open(pa->GetResultFilename(), DiskFile::omRead))
				{
					int cnt = buffer.Size();
					while (cnt == buffer.Size())
					{
						cnt = (int)infile.Read(buffer, buffer.Size());
						outfile.Write(buffer, cnt);
					}
					infile.Close();
				}
				else
				{
					m_fileInfo->SetFailedArticles(m_fileInfo->GetFailedArticles() + 1);
					m_fileInfo->SetSuccessArticles(m_fileInfo->GetSuccessArticles() - 1);
					m_fileInfo->GetNzbInfo()->PrintMessage(Message::mkError,
						"Could not find file %s for %s [%i/%i]",
						pa->GetResultFilename(), *infoFilename, pa->GetPartNumber(),
						(int)m_fileInfo->GetArticles()->size());
				}
			}
			else if (g_Options->GetRawArticle())
			{
				BString<1024> dstFileName("%s%c%03i", *ofn, PATH_SEPARATOR, pa->GetPartNumber());
				if (!FileSystem::MoveFile(pa->GetResultFilename(), dstFileName))
				{
					m_fileInfo->GetNzbInfo()->PrintMessage(Message::mkError,
						"Could not move file %s to %s: %s", pa->GetResultFilename(),
						*dstFileName, *FileSystem::GetLastErrorMessage());
				}
			}

			if (m_format == Decoder::efYenc)
			{
				crc = firstArticle ? pa->GetCrc() : Crc32::Combine(crc, pa->GetCrc(), pa->GetSegmentSize());
				firstArticle = false;
			}
		}

		buffer.Clear();
	}

	if (outfile.Active())
	{
		outfile.Close();
		if (!directWrite && !FileSystem::MoveFile(tmpdestfile, ofn))
		{
			m_fileInfo->GetNzbInfo()->PrintMessage(Message::mkError,
				"Could not move file %s to %s: %s", *tmpdestfile, *ofn,
				*FileSystem::GetLastErrorMessage());
		}
	}

	if (directWrite)
	{
		if (!FileSystem::SameFilename(m_outputFilename, ofn) &&
			!FileSystem::MoveFile(m_outputFilename, ofn))
		{
			m_fileInfo->GetNzbInfo()->PrintMessage(Message::mkError,
				"Could not move file %s to %s: %s", *m_outputFilename, *ofn,
				*FileSystem::GetLastErrorMessage());
		}

		// if destination directory was changed delete the old directory (if empty)
		int len = strlen(nzbDestDir);
		if (!(!strncmp(nzbDestDir, m_outputFilename, len) &&
			(m_outputFilename[len] == PATH_SEPARATOR || m_outputFilename[len] == ALT_PATH_SEPARATOR)))
		{
			debug("Checking old dir for: %s", *m_outputFilename);
			BString<1024> oldDestDir;
			oldDestDir.Set(m_outputFilename, (int)(FileSystem::BaseFileName(m_outputFilename) - m_outputFilename));
			if (FileSystem::DirEmpty(oldDestDir))
			{
				debug("Deleting old dir: %s", *oldDestDir);
				FileSystem::RemoveDirectory(oldDestDir);
			}
		}
	}

	if (!directWrite)
	{
		for (ArticleInfo* pa : m_fileInfo->GetArticles())
		{
			if (pa->GetResultFilename())
			{
				FileSystem::DeleteFile(pa->GetResultFilename());
			}
		}
	}

	if (m_fileInfo->GetTotalArticles() == m_fileInfo->GetSuccessArticles())
	{
		m_fileInfo->GetNzbInfo()->PrintMessage(Message::mkInfo, "Successfully downloaded %s", *infoFilename);
	}
	else if (m_fileInfo->GetMissedArticles() + m_fileInfo->GetFailedArticles() > 0)
	{
		m_fileInfo->GetNzbInfo()->PrintMessage(Message::mkWarning,
			"%i of %i article downloads failed for \"%s\"",
			m_fileInfo->GetMissedArticles() + m_fileInfo->GetFailedArticles(),
			m_fileInfo->GetTotalArticles(), *infoFilename);
	}
	else
	{
		m_fileInfo->GetNzbInfo()->PrintMessage(Message::mkInfo, "Partially downloaded %s", *infoFilename);
	}

	{
		GuardedDownloadQueue guard = DownloadQueue::Guard();

		m_fileInfo->SetCrc(crc);
		m_fileInfo->SetOutputFilename(ofn);

		if (strcmp(m_fileInfo->GetFilename(), filename))
		{
			// file was renamed during completion, need to move the file
			ofn = FileSystem::MakeUniqueFilename(nzbDestDir, m_fileInfo->GetFilename());
			if (!FileSystem::MoveFile(m_fileInfo->GetOutputFilename(), ofn))
			{
				m_fileInfo->GetNzbInfo()->PrintMessage(Message::mkError,
					"Could not rename file %s to %s: %s", m_fileInfo->GetOutputFilename(),
					*ofn, *FileSystem::GetLastErrorMessage());
			}
			m_fileInfo->SetOutputFilename(ofn);
		}

		if (strcmp(m_fileInfo->GetNzbInfo()->GetDestDir(), nzbDestDir))
		{
			// destination directory was changed during completion, need to move the file
			MoveCompletedFiles(m_fileInfo->GetNzbInfo(), nzbDestDir);
		}
	}
}