Ejemplo n.º 1
0
afs_int32
writeDatabase(struct ubik_trans *ut, int fid)
{
    dbadr dbAddr, dbAppAddr;
    struct dump diskDump, apDiskDump;
    dbadr tapeAddr;
    struct tape diskTape;
    dbadr volFragAddr;
    struct volFragment diskVolFragment;
    struct volInfo diskVolInfo;
    int length, hash;
    int old = 0;
    int entrySize;
    afs_int32 code = 0, tcode;
    afs_int32 appDumpAddrs[MAXAPPENDS], numaddrs, appcount, j;

    struct memoryHashTable *mht;

    LogDebug(4, "writeDatabase:\n");

    /* write out a header identifying this database etc */
    tcode = writeDbHeader(fid);
    if (tcode) {
	LogError(tcode, "writeDatabase: Can't write Header\n");
	ERROR(tcode);
    }

    /* write out the tree of dump structures */

    mht = ht_GetType(HT_dumpIden_FUNCTION, &entrySize);
    if (!mht) {
	LogError(tcode, "writeDatabase: Can't get dump type\n");
	ERROR(BUDB_BADARGUMENT);
    }

    for (old = 0; old <= 1; old++) {
	/*oldnew */
	/* only two states, old or not old */
	length = (old ? mht->oldLength : mht->length);
	if (!length)
	    continue;

	for (hash = 0; hash < length; hash++) {
	    /*hashBuckets */
	    /* dump all the dumps in this hash bucket
	     */
	    for (dbAddr = ht_LookupBucket(ut, mht, hash, old); dbAddr; dbAddr = ntohl(diskDump.idHashChain)) {	/*initialDumps */
		/* now check if this dump had any errors/inconsistencies.
		 * If so, don't dump it
		 */
		if (badEntry(dbAddr)) {
		    LogError(0,
			     "writeDatabase: Damaged dump entry at addr 0x%x\n",
			     dbAddr);
		    Log("     Skipping remainder of dumps on hash chain %d\n",
			hash);
		    break;
		}

		tcode =
		    cdbread(ut, dump_BLOCK, dbAddr, &diskDump,
			    sizeof(diskDump));
		if (tcode) {
		    LogError(tcode,
			     "writeDatabase: Can't read dump entry (addr 0x%x)\n",
			     dbAddr);
		    Log("     Skipping remainder of dumps on hash chain %d\n",
			hash);
		    break;
		}

		/* Skip appended dumps, only start with initial dumps */
		if (diskDump.initialDumpID != 0)
		    continue;

		/* Skip appended dumps, only start with initial dumps. Then
		 * follow the appended dump chain so they are in order for restore.
		 */
		appcount = numaddrs = 0;
		for (dbAppAddr = dbAddr; dbAppAddr;
		     dbAppAddr = ntohl(apDiskDump.appendedDumpChain)) {
		    /*appendedDumps */
		    /* Check to see if we have a circular loop of appended dumps */
		    for (j = 0; j < numaddrs; j++) {
			if (appDumpAddrs[j] == dbAppAddr)
			    break;	/* circular loop */
		    }
		    if (j < numaddrs) {	/* circular loop */
			Log("writeDatabase: Circular loop found in appended dumps\n");
			Log("Skipping rest of appended dumps of dumpID %u\n",
			    ntohl(diskDump.id));
			break;
		    }
		    if (numaddrs >= MAXAPPENDS)
			numaddrs = MAXAPPENDS - 1;	/* don't overflow */
		    appDumpAddrs[numaddrs] = dbAppAddr;
		    numaddrs++;

		    /* If we dump a 1000 appended dumps, assume a loop */
		    if (appcount >= 5 * MAXAPPENDS) {
			Log("writeDatabase: Potential circular loop of appended dumps\n");
			Log("Skipping rest of appended dumps of dumpID %u. Dumped %d\n", ntohl(diskDump.id), appcount);
			break;
		    }
		    appcount++;

		    /* Read the dump entry */
		    if (dbAddr == dbAppAddr) {
			/* First time through, don't need to read the dump entry again */
			memcpy(&apDiskDump, &diskDump, sizeof(diskDump));
		    } else {
			if (badEntry(dbAppAddr)) {
			    LogError(0,
				     "writeDatabase: Damaged appended dump entry at addr 0x%x\n",
				     dbAddr);
			    Log("     Skipping this and remainder of appended dumps of initial DumpID %u\n", ntohl(diskDump.id));
			    break;
			}

			tcode =
			    cdbread(ut, dump_BLOCK, dbAppAddr, &apDiskDump,
				    sizeof(apDiskDump));
			if (tcode) {
			    LogError(tcode,
				     "writeDatabase: Can't read appended dump entry (addr 0x%x)\n",
				     dbAppAddr);
			    Log("     Skipping this and remainder of appended dumps of initial DumpID %u\n", ntohl(diskDump.id));
			    break;
			}

			/* Verify that this appended dump points to the initial dump */
			if (ntohl(apDiskDump.initialDumpID) !=
			    ntohl(diskDump.id)) {
			    LogError(0,
				     "writeDatabase: Appended dumpID %u does not reference initial dumpID %u\n",
				     ntohl(apDiskDump.id),
				     ntohl(diskDump.id));
			    Log("     Skipping this appended dump\n");
			    continue;
			}
		    }

		    /* Save the dump entry */
		    tcode = writeDump(fid, &apDiskDump);
		    if (tcode) {
			LogError(tcode,
				 "writeDatabase: Can't write dump entry\n");
			ERROR(tcode);
		    }

		    /* For each tape on this dump
		     */
		    for (tapeAddr = ntohl(apDiskDump.firstTape); tapeAddr; tapeAddr = ntohl(diskTape.nextTape)) {	/*tapes */
			/* read the tape entry */
			tcode =
			    cdbread(ut, tape_BLOCK, tapeAddr, &diskTape,
				    sizeof(diskTape));
			if (tcode) {
			    LogError(tcode,
				     "writeDatabase: Can't read tape entry (addr 0x%x) of dumpID %u\n",
				     tapeAddr, ntohl(apDiskDump.id));
			    Log("     Skipping this and remaining tapes in the dump (and all their volumes)\n");
			    break;
			}

			/* Save the tape entry */
			tcode =
			    writeTape(fid, &diskTape, ntohl(apDiskDump.id));
			if (tcode) {
			    LogError(tcode,
				     "writeDatabase: Can't write tape entry\n");
			    ERROR(tcode);
			}

			/* For each volume on this tape.
			 */
			for (volFragAddr = ntohl(diskTape.firstVol); volFragAddr; volFragAddr = ntohl(diskVolFragment.sameTapeChain)) {	/*volumes */
			    /* Read the volume Fragment entry */
			    tcode =
				cdbread(ut, volFragment_BLOCK, volFragAddr,
					&diskVolFragment,
					sizeof(diskVolFragment));
			    if (tcode) {
				LogError(tcode,
					 "writeDatabase: Can't read volfrag entry (addr 0x%x) of dumpID %u\n",
					 volFragAddr, ntohl(apDiskDump.id));
				Log("     Skipping this and remaining volumes on tape '%s'\n", diskTape.name);
				break;
			    }

			    /* Read the volume Info entry */
			    tcode =
				cdbread(ut, volInfo_BLOCK,
					ntohl(diskVolFragment.vol),
					&diskVolInfo, sizeof(diskVolInfo));
			    if (tcode) {
				LogError(tcode,
					 "writeDatabase: Can't read volinfo entry (addr 0x%x) of dumpID %u\n",
					 ntohl(diskVolFragment.vol),
					 ntohl(apDiskDump.id));
				Log("     Skipping volume on tape '%s'\n",
				    diskTape.name);
				continue;
			    }

			    /* Save the volume entry */
			    tcode =
				writeVolume(ut, fid, &diskVolFragment,
					    &diskVolInfo,
					    ntohl(apDiskDump.id),
					    diskTape.name);
			    if (tcode) {
				LogError(tcode,
					 "writeDatabase: Can't write volume entry\n");
				ERROR(tcode);
			    }
			}	/*volumes */
		    }		/*tapes */
		}		/*appendedDumps */
	    }			/*initialDumps */
	}			/*hashBuckets */
    }				/*oldnew */

    /* write out the textual configuration information */
    tcode = writeText(ut, fid, TB_DUMPSCHEDULE);
    if (tcode) {
	LogError(tcode, "writeDatabase: Can't write dump schedule\n");
	ERROR(tcode);
    }
    tcode = writeText(ut, fid, TB_VOLUMESET);
    if (tcode) {
	LogError(tcode, "writeDatabase: Can't write volume set\n");
	ERROR(tcode);
    }
    tcode = writeText(ut, fid, TB_TAPEHOSTS);
    if (tcode) {
	LogError(tcode, "writeDatabase: Can't write tape hosts\n");
	ERROR(tcode);
    }

    tcode = writeStructHeader(fid, SD_END);
    if (tcode) {
	LogError(tcode, "writeDatabase: Can't write end savedb\n");
	ERROR(tcode);
    }

  error_exit:
    doneWriting(code);
    return (code);
}
Ejemplo n.º 2
0
	void CryptoManager::cryptThread()
	{
		ISC_STATUS_ARRAY status_vector;

		try
		{
			// Try to take crypt mutex
			// If can't take that mutex - nothing to do, cryptThread already runs in our process
			MutexEnsureUnlock guard(cryptThreadMtx, FB_FUNCTION);
			if (!guard.tryEnter())
			{
				return;
			}

			// establish context
			UserId user;
			user.usr_user_name = "(Crypt thread)";

			Jrd::Attachment* const attachment = Jrd::Attachment::create(&dbb);
			RefPtr<SysAttachment> jAtt(new SysAttachment(attachment));
			attachment->att_interface = jAtt;
			attachment->att_filename = dbb.dbb_filename;
			attachment->att_user = &user;

			BackgroundContextHolder tdbb(&dbb, attachment, status_vector, FB_FUNCTION);
			tdbb->tdbb_quantum = SWEEP_QUANTUM;

			ULONG lastPage = getLastPage(tdbb);
			ULONG runpage = 1;
			Stack<ULONG> pages;

			// Take exclusive threadLock
			// If can't take that lock - nothing to do, cryptThread already runs somewhere
			if (!LCK_lock(tdbb, threadLock, LCK_EX, LCK_NO_WAIT))
			{
				return;
			}

			bool lckRelease = false;

			try
			{
				do
				{
					// Check is there some job to do
					while ((runpage = currentPage.exchangeAdd(+1)) < lastPage)
					{
						// forced terminate
						if (down)
						{
							break;
						}

						// nbackup state check
						if (dbb.dbb_backup_manager && dbb.dbb_backup_manager->getState() != nbak_state_normal)
						{
							if (currentPage.exchangeAdd(-1) >= lastPage)
							{
								// currentPage was set to last page by thread, closing database
								break;
							}
							THD_sleep(100);
							continue;
						}

						// scheduling
						if (--tdbb->tdbb_quantum < 0)
						{
							JRD_reschedule(tdbb, SWEEP_QUANTUM, true);
						}

						// writing page to disk will change it's crypt status in usual way
						WIN window(DB_PAGE_SPACE, runpage);
						Ods::pag* page = CCH_FETCH(tdbb, &window, LCK_write, pag_undefined);
						if (page && page->pag_type <= pag_max &&
							(bool(page->pag_flags & Ods::crypted_page) != crypt) &&
							Ods::pag_crypt_page[page->pag_type])
						{
							CCH_MARK(tdbb, &window);
							pages.push(runpage);
						}
						CCH_RELEASE_TAIL(tdbb, &window);

						// sometimes save currentPage into DB header
						++runpage;
						if ((runpage & 0x3FF) == 0)
						{
							writeDbHeader(tdbb, runpage, pages);
						}
					}

					// At this moment of time all pages with number < lastpage
					// are guaranteed to change crypt state. Check for added pages.
					lastPage = getLastPage(tdbb);

					// forced terminate
					if (down)
					{
						break;
					}
				} while (runpage < lastPage);

				// Finalize crypt
				if (!down)
				{
					writeDbHeader(tdbb, 0, pages);
				}

				// Release exclusive lock on StartCryptThread
				lckRelease = true;
				LCK_release(tdbb, threadLock);
			}
			catch (const Exception&)
			{
				try
				{
					if (!lckRelease)
					{
						// try to save current state of crypt thread
						if (!down)
						{
							writeDbHeader(tdbb, runpage, pages);
						}

						// Release exclusive lock on StartCryptThread
						LCK_release(tdbb, threadLock);
					}
				}
				catch (const Exception&)
				{ }

				throw;
			}
		}
		catch (const Exception& ex)
		{
			// Error during context creation - we can't even release lock
			iscLogException("Crypt thread:", ex);
		}
	}