bool KArchive::writeFile_impl(const QString &name, const QString &user, const QString &group, uint size, mode_t perm, time_t atime, time_t mtime, time_t ctime, const char *data) { if(!prepareWriting(name, user, group, size, perm, atime, mtime, ctime)) { kdWarning() << "KArchive::writeFile prepareWriting failed" << endl; return false; } // Write data // Note: if data is 0L, don't call writeBlock, it would terminate the KFilterDev if(data && size && !writeData(data, size)) { kdWarning() << "KArchive::writeFile writeData failed" << endl; return false; } if(!doneWriting(size)) { kdWarning() << "KArchive::writeFile doneWriting failed" << endl; return false; } return true; }
afs_int32 writeDatabase(struct ubik_trans *ut, int fid) { dbadr dbAddr, dbAppAddr; struct dump diskDump, apDiskDump; dbadr tapeAddr; struct tape diskTape; dbadr volFragAddr; struct volFragment diskVolFragment; struct volInfo diskVolInfo; int length, hash; int old = 0; int entrySize; afs_int32 code = 0, tcode; afs_int32 appDumpAddrs[MAXAPPENDS], numaddrs, appcount, j; struct memoryHashTable *mht; LogDebug(4, "writeDatabase:\n"); /* write out a header identifying this database etc */ tcode = writeDbHeader(fid); if (tcode) { LogError(tcode, "writeDatabase: Can't write Header\n"); ERROR(tcode); } /* write out the tree of dump structures */ mht = ht_GetType(HT_dumpIden_FUNCTION, &entrySize); if (!mht) { LogError(tcode, "writeDatabase: Can't get dump type\n"); ERROR(BUDB_BADARGUMENT); } for (old = 0; old <= 1; old++) { /*oldnew */ /* only two states, old or not old */ length = (old ? mht->oldLength : mht->length); if (!length) continue; for (hash = 0; hash < length; hash++) { /*hashBuckets */ /* dump all the dumps in this hash bucket */ for (dbAddr = ht_LookupBucket(ut, mht, hash, old); dbAddr; dbAddr = ntohl(diskDump.idHashChain)) { /*initialDumps */ /* now check if this dump had any errors/inconsistencies. * If so, don't dump it */ if (badEntry(dbAddr)) { LogError(0, "writeDatabase: Damaged dump entry at addr 0x%x\n", dbAddr); Log(" Skipping remainder of dumps on hash chain %d\n", hash); break; } tcode = cdbread(ut, dump_BLOCK, dbAddr, &diskDump, sizeof(diskDump)); if (tcode) { LogError(tcode, "writeDatabase: Can't read dump entry (addr 0x%x)\n", dbAddr); Log(" Skipping remainder of dumps on hash chain %d\n", hash); break; } /* Skip appended dumps, only start with initial dumps */ if (diskDump.initialDumpID != 0) continue; /* Skip appended dumps, only start with initial dumps. Then * follow the appended dump chain so they are in order for restore. */ appcount = numaddrs = 0; for (dbAppAddr = dbAddr; dbAppAddr; dbAppAddr = ntohl(apDiskDump.appendedDumpChain)) { /*appendedDumps */ /* Check to see if we have a circular loop of appended dumps */ for (j = 0; j < numaddrs; j++) { if (appDumpAddrs[j] == dbAppAddr) break; /* circular loop */ } if (j < numaddrs) { /* circular loop */ Log("writeDatabase: Circular loop found in appended dumps\n"); Log("Skipping rest of appended dumps of dumpID %u\n", ntohl(diskDump.id)); break; } if (numaddrs >= MAXAPPENDS) numaddrs = MAXAPPENDS - 1; /* don't overflow */ appDumpAddrs[numaddrs] = dbAppAddr; numaddrs++; /* If we dump a 1000 appended dumps, assume a loop */ if (appcount >= 5 * MAXAPPENDS) { Log("writeDatabase: Potential circular loop of appended dumps\n"); Log("Skipping rest of appended dumps of dumpID %u. Dumped %d\n", ntohl(diskDump.id), appcount); break; } appcount++; /* Read the dump entry */ if (dbAddr == dbAppAddr) { /* First time through, don't need to read the dump entry again */ memcpy(&apDiskDump, &diskDump, sizeof(diskDump)); } else { if (badEntry(dbAppAddr)) { LogError(0, "writeDatabase: Damaged appended dump entry at addr 0x%x\n", dbAddr); Log(" Skipping this and remainder of appended dumps of initial DumpID %u\n", ntohl(diskDump.id)); break; } tcode = cdbread(ut, dump_BLOCK, dbAppAddr, &apDiskDump, sizeof(apDiskDump)); if (tcode) { LogError(tcode, "writeDatabase: Can't read appended dump entry (addr 0x%x)\n", dbAppAddr); Log(" Skipping this and remainder of appended dumps of initial DumpID %u\n", ntohl(diskDump.id)); break; } /* Verify that this appended dump points to the initial dump */ if (ntohl(apDiskDump.initialDumpID) != ntohl(diskDump.id)) { LogError(0, "writeDatabase: Appended dumpID %u does not reference initial dumpID %u\n", ntohl(apDiskDump.id), ntohl(diskDump.id)); Log(" Skipping this appended dump\n"); continue; } } /* Save the dump entry */ tcode = writeDump(fid, &apDiskDump); if (tcode) { LogError(tcode, "writeDatabase: Can't write dump entry\n"); ERROR(tcode); } /* For each tape on this dump */ for (tapeAddr = ntohl(apDiskDump.firstTape); tapeAddr; tapeAddr = ntohl(diskTape.nextTape)) { /*tapes */ /* read the tape entry */ tcode = cdbread(ut, tape_BLOCK, tapeAddr, &diskTape, sizeof(diskTape)); if (tcode) { LogError(tcode, "writeDatabase: Can't read tape entry (addr 0x%x) of dumpID %u\n", tapeAddr, ntohl(apDiskDump.id)); Log(" Skipping this and remaining tapes in the dump (and all their volumes)\n"); break; } /* Save the tape entry */ tcode = writeTape(fid, &diskTape, ntohl(apDiskDump.id)); if (tcode) { LogError(tcode, "writeDatabase: Can't write tape entry\n"); ERROR(tcode); } /* For each volume on this tape. */ for (volFragAddr = ntohl(diskTape.firstVol); volFragAddr; volFragAddr = ntohl(diskVolFragment.sameTapeChain)) { /*volumes */ /* Read the volume Fragment entry */ tcode = cdbread(ut, volFragment_BLOCK, volFragAddr, &diskVolFragment, sizeof(diskVolFragment)); if (tcode) { LogError(tcode, "writeDatabase: Can't read volfrag entry (addr 0x%x) of dumpID %u\n", volFragAddr, ntohl(apDiskDump.id)); Log(" Skipping this and remaining volumes on tape '%s'\n", diskTape.name); break; } /* Read the volume Info entry */ tcode = cdbread(ut, volInfo_BLOCK, ntohl(diskVolFragment.vol), &diskVolInfo, sizeof(diskVolInfo)); if (tcode) { LogError(tcode, "writeDatabase: Can't read volinfo entry (addr 0x%x) of dumpID %u\n", ntohl(diskVolFragment.vol), ntohl(apDiskDump.id)); Log(" Skipping volume on tape '%s'\n", diskTape.name); continue; } /* Save the volume entry */ tcode = writeVolume(ut, fid, &diskVolFragment, &diskVolInfo, ntohl(apDiskDump.id), diskTape.name); if (tcode) { LogError(tcode, "writeDatabase: Can't write volume entry\n"); ERROR(tcode); } } /*volumes */ } /*tapes */ } /*appendedDumps */ } /*initialDumps */ } /*hashBuckets */ } /*oldnew */ /* write out the textual configuration information */ tcode = writeText(ut, fid, TB_DUMPSCHEDULE); if (tcode) { LogError(tcode, "writeDatabase: Can't write dump schedule\n"); ERROR(tcode); } tcode = writeText(ut, fid, TB_VOLUMESET); if (tcode) { LogError(tcode, "writeDatabase: Can't write volume set\n"); ERROR(tcode); } tcode = writeText(ut, fid, TB_TAPEHOSTS); if (tcode) { LogError(tcode, "writeDatabase: Can't write tape hosts\n"); ERROR(tcode); } tcode = writeStructHeader(fid, SD_END); if (tcode) { LogError(tcode, "writeDatabase: Can't write end savedb\n"); ERROR(tcode); } error_exit: doneWriting(code); return (code); }
bool KArchive::addLocalFile(const QString &fileName, const QString &destName) { QFileInfo fileInfo(fileName); if(!fileInfo.isFile() && !fileInfo.isSymLink()) { kdWarning() << "KArchive::addLocalFile " << fileName << " doesn't exist or is not a regular file." << endl; return false; } KDE_struct_stat fi; if(KDE_lstat(QFile::encodeName(fileName), &fi) == -1) { kdWarning() << "KArchive::addLocalFile stating " << fileName << " failed: " << strerror(errno) << endl; return false; } if(fileInfo.isSymLink()) { return writeSymLink(destName, fileInfo.readLink(), fileInfo.owner(), fileInfo.group(), fi.st_mode, fi.st_atime, fi.st_mtime, fi.st_ctime); } /*end if*/ uint size = fileInfo.size(); // the file must be opened before prepareWriting is called, otherwise // if the opening fails, no content will follow the already written // header and the tar file is effectively f*cked up QFile file(fileName); if(!file.open(IO_ReadOnly)) { kdWarning() << "KArchive::addLocalFile couldn't open file " << fileName << endl; return false; } if(!prepareWriting(destName, fileInfo.owner(), fileInfo.group(), size, fi.st_mode, fi.st_atime, fi.st_mtime, fi.st_ctime)) { kdWarning() << "KArchive::addLocalFile prepareWriting " << destName << " failed" << endl; return false; } // Read and write data in chunks to minimize memory usage QByteArray array(8 * 1024); int n; uint total = 0; while((n = file.readBlock(array.data(), array.size())) > 0) { if(!writeData(array.data(), n)) { kdWarning() << "KArchive::addLocalFile writeData failed" << endl; return false; } total += n; } Q_ASSERT(total == size); if(!doneWriting(size)) { kdWarning() << "KArchive::addLocalFile doneWriting failed" << endl; return false; } return true; }