Esempio n. 1
0
afs_int32
writeDatabase(struct ubik_trans *ut, int fid)
{
    dbadr dbAddr, dbAppAddr;
    struct dump diskDump, apDiskDump;
    dbadr tapeAddr;
    struct tape diskTape;
    dbadr volFragAddr;
    struct volFragment diskVolFragment;
    struct volInfo diskVolInfo;
    int length, hash;
    int old = 0;
    int entrySize;
    afs_int32 code = 0, tcode;
    afs_int32 appDumpAddrs[MAXAPPENDS], numaddrs, appcount, j;

    struct memoryHashTable *mht;

    LogDebug(4, "writeDatabase:\n");

    /* write out a header identifying this database etc */
    tcode = writeDbHeader(fid);
    if (tcode) {
	LogError(tcode, "writeDatabase: Can't write Header\n");
	ERROR(tcode);
    }

    /* write out the tree of dump structures */

    mht = ht_GetType(HT_dumpIden_FUNCTION, &entrySize);
    if (!mht) {
	LogError(tcode, "writeDatabase: Can't get dump type\n");
	ERROR(BUDB_BADARGUMENT);
    }

    for (old = 0; old <= 1; old++) {
	/*oldnew */
	/* only two states, old or not old */
	length = (old ? mht->oldLength : mht->length);
	if (!length)
	    continue;

	for (hash = 0; hash < length; hash++) {
	    /*hashBuckets */
	    /* dump all the dumps in this hash bucket
	     */
	    for (dbAddr = ht_LookupBucket(ut, mht, hash, old); dbAddr; dbAddr = ntohl(diskDump.idHashChain)) {	/*initialDumps */
		/* now check if this dump had any errors/inconsistencies.
		 * If so, don't dump it
		 */
		if (badEntry(dbAddr)) {
		    LogError(0,
			     "writeDatabase: Damaged dump entry at addr 0x%x\n",
			     dbAddr);
		    Log("     Skipping remainder of dumps on hash chain %d\n",
			hash);
		    break;
		}

		tcode =
		    cdbread(ut, dump_BLOCK, dbAddr, &diskDump,
			    sizeof(diskDump));
		if (tcode) {
		    LogError(tcode,
			     "writeDatabase: Can't read dump entry (addr 0x%x)\n",
			     dbAddr);
		    Log("     Skipping remainder of dumps on hash chain %d\n",
			hash);
		    break;
		}

		/* Skip appended dumps, only start with initial dumps */
		if (diskDump.initialDumpID != 0)
		    continue;

		/* Skip appended dumps, only start with initial dumps. Then
		 * follow the appended dump chain so they are in order for restore.
		 */
		appcount = numaddrs = 0;
		for (dbAppAddr = dbAddr; dbAppAddr;
		     dbAppAddr = ntohl(apDiskDump.appendedDumpChain)) {
		    /*appendedDumps */
		    /* Check to see if we have a circular loop of appended dumps */
		    for (j = 0; j < numaddrs; j++) {
			if (appDumpAddrs[j] == dbAppAddr)
			    break;	/* circular loop */
		    }
		    if (j < numaddrs) {	/* circular loop */
			Log("writeDatabase: Circular loop found in appended dumps\n");
			Log("Skipping rest of appended dumps of dumpID %u\n",
			    ntohl(diskDump.id));
			break;
		    }
		    if (numaddrs >= MAXAPPENDS)
			numaddrs = MAXAPPENDS - 1;	/* don't overflow */
		    appDumpAddrs[numaddrs] = dbAppAddr;
		    numaddrs++;

		    /* If we dump a 1000 appended dumps, assume a loop */
		    if (appcount >= 5 * MAXAPPENDS) {
			Log("writeDatabase: Potential circular loop of appended dumps\n");
			Log("Skipping rest of appended dumps of dumpID %u. Dumped %d\n", ntohl(diskDump.id), appcount);
			break;
		    }
		    appcount++;

		    /* Read the dump entry */
		    if (dbAddr == dbAppAddr) {
			/* First time through, don't need to read the dump entry again */
			memcpy(&apDiskDump, &diskDump, sizeof(diskDump));
		    } else {
			if (badEntry(dbAppAddr)) {
			    LogError(0,
				     "writeDatabase: Damaged appended dump entry at addr 0x%x\n",
				     dbAddr);
			    Log("     Skipping this and remainder of appended dumps of initial DumpID %u\n", ntohl(diskDump.id));
			    break;
			}

			tcode =
			    cdbread(ut, dump_BLOCK, dbAppAddr, &apDiskDump,
				    sizeof(apDiskDump));
			if (tcode) {
			    LogError(tcode,
				     "writeDatabase: Can't read appended dump entry (addr 0x%x)\n",
				     dbAppAddr);
			    Log("     Skipping this and remainder of appended dumps of initial DumpID %u\n", ntohl(diskDump.id));
			    break;
			}

			/* Verify that this appended dump points to the initial dump */
			if (ntohl(apDiskDump.initialDumpID) !=
			    ntohl(diskDump.id)) {
			    LogError(0,
				     "writeDatabase: Appended dumpID %u does not reference initial dumpID %u\n",
				     ntohl(apDiskDump.id),
				     ntohl(diskDump.id));
			    Log("     Skipping this appended dump\n");
			    continue;
			}
		    }

		    /* Save the dump entry */
		    tcode = writeDump(fid, &apDiskDump);
		    if (tcode) {
			LogError(tcode,
				 "writeDatabase: Can't write dump entry\n");
			ERROR(tcode);
		    }

		    /* For each tape on this dump
		     */
		    for (tapeAddr = ntohl(apDiskDump.firstTape); tapeAddr; tapeAddr = ntohl(diskTape.nextTape)) {	/*tapes */
			/* read the tape entry */
			tcode =
			    cdbread(ut, tape_BLOCK, tapeAddr, &diskTape,
				    sizeof(diskTape));
			if (tcode) {
			    LogError(tcode,
				     "writeDatabase: Can't read tape entry (addr 0x%x) of dumpID %u\n",
				     tapeAddr, ntohl(apDiskDump.id));
			    Log("     Skipping this and remaining tapes in the dump (and all their volumes)\n");
			    break;
			}

			/* Save the tape entry */
			tcode =
			    writeTape(fid, &diskTape, ntohl(apDiskDump.id));
			if (tcode) {
			    LogError(tcode,
				     "writeDatabase: Can't write tape entry\n");
			    ERROR(tcode);
			}

			/* For each volume on this tape.
			 */
			for (volFragAddr = ntohl(diskTape.firstVol); volFragAddr; volFragAddr = ntohl(diskVolFragment.sameTapeChain)) {	/*volumes */
			    /* Read the volume Fragment entry */
			    tcode =
				cdbread(ut, volFragment_BLOCK, volFragAddr,
					&diskVolFragment,
					sizeof(diskVolFragment));
			    if (tcode) {
				LogError(tcode,
					 "writeDatabase: Can't read volfrag entry (addr 0x%x) of dumpID %u\n",
					 volFragAddr, ntohl(apDiskDump.id));
				Log("     Skipping this and remaining volumes on tape '%s'\n", diskTape.name);
				break;
			    }

			    /* Read the volume Info entry */
			    tcode =
				cdbread(ut, volInfo_BLOCK,
					ntohl(diskVolFragment.vol),
					&diskVolInfo, sizeof(diskVolInfo));
			    if (tcode) {
				LogError(tcode,
					 "writeDatabase: Can't read volinfo entry (addr 0x%x) of dumpID %u\n",
					 ntohl(diskVolFragment.vol),
					 ntohl(apDiskDump.id));
				Log("     Skipping volume on tape '%s'\n",
				    diskTape.name);
				continue;
			    }

			    /* Save the volume entry */
			    tcode =
				writeVolume(ut, fid, &diskVolFragment,
					    &diskVolInfo,
					    ntohl(apDiskDump.id),
					    diskTape.name);
			    if (tcode) {
				LogError(tcode,
					 "writeDatabase: Can't write volume entry\n");
				ERROR(tcode);
			    }
			}	/*volumes */
		    }		/*tapes */
		}		/*appendedDumps */
	    }			/*initialDumps */
	}			/*hashBuckets */
    }				/*oldnew */

    /* write out the textual configuration information */
    tcode = writeText(ut, fid, TB_DUMPSCHEDULE);
    if (tcode) {
	LogError(tcode, "writeDatabase: Can't write dump schedule\n");
	ERROR(tcode);
    }
    tcode = writeText(ut, fid, TB_VOLUMESET);
    if (tcode) {
	LogError(tcode, "writeDatabase: Can't write volume set\n");
	ERROR(tcode);
    }
    tcode = writeText(ut, fid, TB_TAPEHOSTS);
    if (tcode) {
	LogError(tcode, "writeDatabase: Can't write tape hosts\n");
	ERROR(tcode);
    }

    tcode = writeStructHeader(fid, SD_END);
    if (tcode) {
	LogError(tcode, "writeDatabase: Can't write end savedb\n");
	ERROR(tcode);
    }

  error_exit:
    doneWriting(code);
    return (code);
}
Esempio n. 2
0
static int
WorkerBee(struct cmd_syndesc *as, void *arock)
{
    afs_int32 code;
    char *dbFile;
    char *outFile;
    afs_int32 index;
    struct stat info;
    struct kaheader header;
    int nentries, i, j, count;
    int *entrys;
    struct kaentry entry;

    dbFile = as->parms[0].items->data;	/* -database */
    listuheader = (as->parms[1].items ? 1 : 0);	/* -uheader  */
    listkheader = (as->parms[2].items ? 1 : 0);	/* -kheader  */
    listentries = (as->parms[3].items ? 1 : 0);	/* -entries  */
    verbose = (as->parms[4].items ? 1 : 0);	/* -verbose  */
    outFile = (as->parms[5].items ? as->parms[5].items->data : NULL);	/* -rebuild  */

    if (outFile) {
	out = fopen(outFile, "w");
	if (!out) {
	    afs_com_err(whoami, errno, "opening output file %s", outFile);
	    exit(7);
	}
    } else
	out = 0;

    fd = open(dbFile, O_RDONLY, 0);
    if (fd < 0) {
	afs_com_err(whoami, errno, "opening database file %s", dbFile);
	exit(6);
    }
    code = fstat(fd, &info);
    if (code) {
	afs_com_err(whoami, errno, "stat'ing file %s", dbFile);
	exit(6);
    }
    if ((info.st_size - UBIK_HEADERSIZE) % UBIK_BUFFERSIZE)
	fprintf(stderr,
		"DATABASE SIZE INCONSISTENT: was %d, should be (n*%d + %d), for integral n\n",
		(int) info.st_size, UBIK_BUFFERSIZE, UBIK_HEADERSIZE);

    readUbikHeader();

    readDB(0, &header, sizeof(header));
    code = CheckHeader(&header);
    if (listkheader)
	PrintHeader(&header);

    nentries =
	(info.st_size -
	 (UBIK_HEADERSIZE + header.headerSize)) / sizeof(struct kaentry);
    entrys = calloc(nentries, sizeof(int));

    for (i = 0, index = sizeof(header); i < nentries;
	 i++, index += sizeof(struct kaentry)) {
	readDB(index, &entry, sizeof(entry));

	if (index >= header.eofPtr) {
	    entrys[i] |= 0x8;
	} else if (listentries) {
	    PrintEntry(index, &entry);
	}

	if (entry.flags & KAFNORMAL) {
	    entrys[i] |= 0x1;	/* user entry */

	    if (strlen(entry.userID.name) == 0) {
		if (verbose)
		    printf("Entry %d has zero length name\n", i);
		continue;
	    }
	    if (!DES_check_key_parity(ktc_to_cblock(&entry.key))
		|| DES_is_weak_key(ktc_to_cblock(&entry.key))) {
		fprintf(stderr, "Entry %d, %s, has bad key\n", i,
			EntryName(&entry));
		continue;
	    }

	    if (out) {
		RebuildEntry(&entry);
	    }

	} else if (entry.flags & KAFFREE) {
	    entrys[i] |= 0x2;	/* free entry */

	} else if (entry.flags & KAFOLDKEYS) {
	    entrys[i] |= 0x4;	/* old keys block */
	    /* Should check the structure of the oldkeys block? */

	} else {
	    if (index < header.eofPtr) {
		fprintf(stderr, "Entry %d is unrecognizable\n", i);
	    }
	}
    }

    /* Follow the hash chains */
    for (j = 0; j < HASHSIZE; j++) {
	for (index = header.nameHash[j]; index; index = entry.next) {
	    readDB(index, &entry, sizeof(entry));

	    /* check to see if the name is hashed correctly */
	    i = NameHash(&entry);
	    if (i != j) {
		fprintf(stderr,
			"Entry %" AFS_SIZET_FMT ", %s, found in hash chain %d (should be %d)\n",
			((index -
			  sizeof(struct kaheader)) / sizeof(struct kaentry)),
			EntryName(&entry), j, i);
	    }

	    /* Is it on another hash chain or circular hash chain */
	    i = (index - header.headerSize) / sizeof(entry);
	    if (entrys[i] & 0x10) {
		fprintf(stderr,
			"Entry %d, %s, hash index %d, was found on another hash chain\n",
			i, EntryName(&entry), j);
		if (entry.next)
		    fprintf(stderr, "Skipping rest of hash chain %d\n", j);
		else
		    fprintf(stderr, "No next entry in hash chain %d\n", j);
		code++;
		break;
	    }
	    entrys[i] |= 0x10;	/* On hash chain */
	}
    }

    /* Follow the free pointers */
    count = 0;
    for (index = header.freePtr; index; index = entry.next) {
	readDB(index, &entry, sizeof(entry));

	/* Is it on another chain or circular free chain */
	i = (index - header.headerSize) / sizeof(entry);
	if (entrys[i] & 0x20) {
	    fprintf(stderr, "Entry %d, %s, already found on free chain\n", i,
		    EntryName(&entry));
	    fprintf(stderr, "Skipping rest of free chain\n");
	    code++;
	    break;
	}
	entrys[i] |= 0x20;	/* On free chain */

	count++;
    }
    if (verbose)
	printf("Found %d free entries\n", count);

    /* Follow the oldkey blocks */
    count = 0;
    for (index = header.kvnoPtr; index; index = entry.next) {
	readDB(index, &entry, sizeof(entry));

	/* Is it on another chain or circular free chain */
	i = (index - header.headerSize) / sizeof(entry);
	if (entrys[i] & 0x40) {
	    fprintf(stderr, "Entry %d, %s, already found on olkeys chain\n",
		    i, EntryName(&entry));
	    fprintf(stderr, "Skipping rest of oldkeys chain\n");
	    code++;
	    break;
	}
	entrys[i] |= 0x40;	/* On free chain */

	count++;
    }
    if (verbose)
	printf("Found %d oldkey blocks\n", count);

    /* Now recheck all the blocks and see if they are allocated correctly
     * 0x1 --> User Entry           0x10 --> On hash chain
     * 0x2 --> Free Entry           0x20 --> On Free chain
     * 0x4 --> OldKeys Entry        0x40 --> On Oldkeys chain
     * 0x8 --> Past EOF
     */
    for (i = 0; i < nentries; i++) {
	j = entrys[i];
	if (j & 0x1) {		/* user entry */
	    if (!(j & 0x10))
		badEntry(j, i);	/* on hash chain? */
	    else if (j & 0xee)
		badEntry(j, i);	/* anything else? */
	} else if (j & 0x2) {	/* free entry */
	    if (!(j & 0x20))
		badEntry(j, i);	/* on free chain? */
	    else if (j & 0xdd)
		badEntry(j, i);	/* anything else? */
	} else if (j & 0x4) {	/* oldkeys entry */
	    if (!(j & 0x40))
		badEntry(j, i);	/* on oldkeys chain? */
	    else if (j & 0xbb)
		badEntry(j, i);	/* anything else? */
	} else if (j & 0x8) {	/* past eof */
	    if (j & 0xf7)
		badEntry(j, i);	/* anything else? */
	} else
	    badEntry(j, i);	/* anything else? */
    }

    exit(code != 0);
}