Esempio n. 1
0
static void
applyrelocs(off_t offset, size_t size, void *buf)
{
	struct blockreloc *reloc;
	off_t roffset;
	uint32_t coff;

	if (numrelocs == 0)
		return;

	offset -= sectobytes(outputminsec);

	for (reloc = reloctable; reloc < &reloctable[numrelocs]; reloc++) {
		roffset = sectobytes(reloc->sector) + reloc->sectoff;
		if (offset < roffset+reloc->size && offset+size > roffset) {
			/* XXX lazy: relocation must be totally contained */
			assert(offset <= roffset);
			assert(roffset+reloc->size <= offset+size);

			coff = (u_int32_t)(roffset - offset);
			if (debug > 1)
				fprintf(stderr,
					"Applying reloc type %d [%qu-%qu] "
					"to [%qu-%qu]\n", reloc->type,
					roffset, roffset+reloc->size,
					offset, offset+size);
			switch (reloc->type) {
			case RELOC_NONE:
				break;
#ifndef linux
			case RELOC_FBSDDISKLABEL:
			case RELOC_OBSDDISKLABEL:
				assert(reloc->size >= sizeof(struct disklabel));
				reloc_bsdlabel((struct disklabel *)(buf+coff),
					       reloc->type);
				break;
#endif
			case RELOC_LILOSADDR:
			case RELOC_LILOMAPSECT:
				reloc_lilo(buf+coff, reloc->type, reloc->size);
				break;
			case RELOC_LILOCKSUM:
				reloc_lilocksum(buf, coff, reloc->size);
				break;
			default:
				fprintf(stderr,
					"Ignoring unknown relocation type %d\n",
					reloc->type);
				break;
			}
		}
	}
}
Esempio n. 2
0
/*
 * Parse the DOS partition table to set the bounds of the slice we
 * are writing to. 
 */
int
readmbr(int slice)
{
	struct doslabel doslabel;
	int		cc;

	if (slice < 1 || slice > 4) {
		fprintf(stderr, "Slice must be 1, 2, 3, or 4\n");
 		return 1;
	}

	if ((cc = devread(outfd, doslabel.pad2, DOSPARTSIZE)) < 0) {
		perror("Could not read DOS label");
		return 1;
	}
	if (cc != DOSPARTSIZE) {
		fprintf(stderr, "Could not get the entire DOS label\n");
 		return 1;
	}
	if (doslabel.magic != BOOT_MAGIC) {
		fprintf(stderr, "Wrong magic number in DOS partition table\n");
 		return 1;
	}

	outputminsec  = doslabel.parts[slice-1].dp_start;
	outputmaxsec  = doslabel.parts[slice-1].dp_start +
		        doslabel.parts[slice-1].dp_size;
	outputmaxsize = (long long)sectobytes(outputmaxsec - outputminsec);

	if (debug) {
		fprintf(stderr, "Slice Mode: S:%d min:%ld max:%ld size:%qd\n",
			slice, outputminsec, outputmaxsec, outputmaxsize);
	}
	return 0;
}
Esempio n. 3
0
/*
 * When compiled for frisbee, act as a library.
 */
int
ImageUnzipInit(char *filename, int _slice, int _debug, int _fill,
	       int _nothreads, int _dostype, unsigned long _writebufmem)
{
	if (outfd >= 0)
		close(outfd);

	if ((outfd = open(filename, O_RDWR|O_CREAT|O_TRUNC, 0666)) < 0) {
		perror("opening output file");
		exit(1);
	}
	slice     = _slice;
	debug     = _debug;
	dofill    = _fill;
	nothreads = _nothreads;
	dostype   = _dostype;
#ifndef NOTHREADS
	maxwritebufmem = _writebufmem;
#endif

	/*
	 * If the output device isn't seekable we must modify our behavior:
	 * we cannot really handle slice mode, we must always zero fill
	 * (cannot skip free space) and we cannot use pwrite.
	 */
	if (lseek(outfd, (off_t)0, SEEK_SET) < 0) {
		if (slice) {
			fprintf(stderr, "Output file is not seekable, "
				"cannot specify a slice\n");
			exit(1);
		}
		if (!dofill)
			fprintf(stderr,
				"WARNING: output file is not seekable, "
				"must zero-fill free space\n");
		dofill = 1;
		seekable = 0;
	} else
		seekable = 1;

	if (slice) {
		off_t	minseek;
		
		if (readmbr(slice)) {
			fprintf(stderr, "Failed to read MBR\n");
			exit(1);
		}
		minseek = sectobytes(outputminsec);
		
		if (lseek(outfd, minseek, SEEK_SET) < 0) {
			perror("Setting seek pointer to slice");
			exit(1);
		}
	}
	threadinit();
	return 0;
}
Esempio n. 4
0
static void
free_readbuf(readbuf_t *rbuf)
{
	assert(rbuf != NULL);

	pthread_mutex_lock(&readbuf_mutex);
	curreadbufs--;
	curreadbufmem -= sectobytes(rbuf->region.size);
	assert(curreadbufmem >= 0);
	if (readbufwanted) {
		readbufwanted = 0;
#ifdef CONDVARS_WORK
		pthread_cond_signal(&readbuf_cond);
#endif
	}
	free(rbuf);
	pthread_mutex_unlock(&readbuf_mutex);
}
Esempio n. 5
0
static int
inflate_subblock(char *chunkbufp)
{
	int		cc, err, count, ibsize = 0, ibleft = 0;
	z_stream	d_stream; /* inflation stream */
	blockhdr_t	*blockhdr;
	struct region	*curregion;
	off_t		offset, size;
	int		chunkbytes = SUBBLOCKSIZE;
	char		resid[SECSIZE];
	writebuf_t	*wbuf;
	
	d_stream.zalloc   = (alloc_func)0;
	d_stream.zfree    = (free_func)0;
	d_stream.opaque   = (voidpf)0;
	d_stream.next_in  = 0;
	d_stream.avail_in = 0;
	d_stream.next_out = 0;

	err = inflateInit(&d_stream);
	CHECK_ERR(err, "inflateInit");

	/*
	 * Grab the header. It is uncompressed, and holds the real
	 * image size and the magic number. Advance the pointer too.
	 */
	blockhdr    = (blockhdr_t *) chunkbufp;
	chunkbufp  += DEFAULTREGIONSIZE;
	chunkbytes -= DEFAULTREGIONSIZE;
	
	switch (blockhdr->magic) {
	case COMPRESSED_V1:
	{
		static int didwarn;

		curregion = (struct region *)
			((struct blockhdr_V1 *)blockhdr + 1);
		if (dofill && !didwarn) {
			fprintf(stderr,
				"WARNING: old image file format, "
				"may not zero all unused blocks\n");
			didwarn = 1;
		}
		break;
	}

	case COMPRESSED_V2:
	case COMPRESSED_V3:
		imageversion = 2;
		curregion = (struct region *)
			((struct blockhdr_V2 *)blockhdr + 1);
		/*
		 * Extract relocation information
		 */
		getrelocinfo(blockhdr);
		break;

	default:
		fprintf(stderr, "Bad Magic Number!\n");
		exit(1);
	}

	/*
	 * Handle any lead-off free space
	 */
	if (imageversion > 1 && curregion->start > blockhdr->firstsect) {
		offset = sectobytes(blockhdr->firstsect);
		size = sectobytes(curregion->start - blockhdr->firstsect);
		if (dofill) {
			wbuf = alloc_writebuf(offset, size, 0, 1);
			dowrite_request(wbuf);
		} else
			totaledata += size;
	}
 
	/*
	 * Start with the first region. 
	 */
	offset = sectobytes(curregion->start);
	size   = sectobytes(curregion->size);
	assert(size > 0);
	curregion++;
	blockhdr->regioncount--;

	if (debug == 1)
		fprintf(stderr, "Decompressing: %14qd --> ", offset);

	wbuf = NULL;
	while (1) {
		/*
		 * Read just up to the end of compressed data.
		 */
		count              = blockhdr->size;
		blockhdr->size     = 0;
		d_stream.next_in   = chunkbufp;
		d_stream.avail_in  = count;
		chunkbufp	  += count;
		chunkbytes	  -= count;
		assert(chunkbytes >= 0);
	inflate_again:
		assert(wbuf == NULL);
		wbuf = alloc_writebuf(offset, OUTSIZE, 1, 1);

		/*
		 * Must operate on multiples of the sector size so first we
		 * restore any residual from the last decompression.
		 */
		if (ibleft)
			memcpy(wbuf->data, resid, ibleft);

		/*
		 * Adjust the decompression params to account for the resid
		 */
		d_stream.next_out  = &wbuf->data[ibleft];
		d_stream.avail_out = OUTSIZE - ibleft;

		/*
		 * Inflate a chunk
		 */
		err = inflate(&d_stream, Z_SYNC_FLUSH);
		if (err != Z_OK && err != Z_STREAM_END) {
			fprintf(stderr, "inflate failed, err=%d\n", err);
			exit(1);
		}

		/*
		 * Figure out how much valid data is in the buffer and
		 * save off any SECSIZE residual for the next round.
		 *
		 * Yes the ibsize computation is correct, just not obvious.
		 * The obvious expression is:
		 *	ibsize = (OUTSIZE - ibleft) - avail_out + ibleft;
		 * so ibleft cancels out.
		 */
		ibsize = OUTSIZE - d_stream.avail_out;
		count  = ibsize & ~(SECSIZE - 1);
		ibleft = ibsize - count;
		if (ibleft)
			memcpy(resid, &wbuf->data[count], ibleft);
		wbuf->size = count;

		while (count) {
			/*
			 * Move data into the output block only as far as
			 * the end of the current region. Since outbuf is
			 * same size as rdyblk->buf, its guaranteed to fit.
			 */
			if (count <= size) {
				dowrite_request(wbuf);
				wbuf = NULL;
				cc = count;
			} else {
				writebuf_t *wbtail;

				/*
				 * Data we decompressed belongs to physically
				 * distinct areas, we have to split the
				 * write up, meaning we have to allocate a
				 * new writebuf and copy the remaining data
				 * into it.
				 */
				wbtail = split_writebuf(wbuf, size, 1);
				dowrite_request(wbuf);
				wbuf = wbtail;
				cc = size;
			}

			if (debug == 2) {
				fprintf(stderr,
					"%12qd %8d %8d %12qd %10qd %8d %5d %8d"
					"\n",
					offset, cc, count, totaledata, size,
					ibsize, ibleft, d_stream.avail_in);
			}

			count  -= cc;
			size   -= cc;
			offset += cc;
			assert(count >= 0);
			assert(size  >= 0);

			/*
			 * Hit the end of the region. Need to figure out
			 * where the next one starts. If desired, we write
			 * a block of zeros in the empty space between this
			 * region and the next.
			 */
			if (size == 0) {
				off_t	    newoffset;
				writebuf_t *wbzero;

				/*
				 * No more regions. Must be done.
				 */
				if (!blockhdr->regioncount)
					break;

				newoffset = sectobytes(curregion->start);
				size      = sectobytes(curregion->size);
				assert(size);
				curregion++;
				blockhdr->regioncount--;
				assert((newoffset-offset) > 0);
				if (dofill) {
					wbzero = alloc_writebuf(offset,
							newoffset-offset,
							0, 1);
					dowrite_request(wbzero);
				} else
					totaledata += newoffset-offset;
				offset = newoffset;
				if (wbuf)
					wbuf->offset = newoffset;
			}
		}
		assert(wbuf == NULL);

		/*
		 * Exhausted our output buffer but still have more input in
		 * the current chunk, go back and deflate more from this chunk.
		 */
		if (d_stream.avail_in)
			goto inflate_again;

		/*
		 * All input inflated and all output written, done.
		 */
		if (err == Z_STREAM_END)
			break;

		/*
		 * We should never reach this!
		 */
		assert(1);
	}
	err = inflateEnd(&d_stream);
	CHECK_ERR(err, "inflateEnd");

	assert(wbuf == NULL);
	assert(blockhdr->regioncount == 0);
	assert(size == 0);
	assert(blockhdr->size == 0);

	/*
	 * Handle any trailing free space
	 */
	curregion--;
	if (imageversion > 1 &&
	    curregion->start + curregion->size < blockhdr->lastsect) {
		offset = sectobytes(curregion->start + curregion->size);
		size = sectobytes(blockhdr->lastsect -
				  (curregion->start + curregion->size));
		if (dofill) {
			wbuf = alloc_writebuf(offset, size, 0, 1);
			dowrite_request(wbuf);
		} else
			totaledata += size;
		offset += size;
	}
 
	if (debug == 1) {
		fprintf(stderr, "%14qd\n", offset);
	}
#ifndef FRISBEE
	else if (dots) {
		fprintf(stderr, ".");
		if (dotcol++ > 59) {
			struct timeval estamp;

			gettimeofday(&estamp, 0);
			estamp.tv_sec -= stamp.tv_sec;
			fprintf(stderr, "%4ld %13qd\n",
				estamp.tv_sec, totaledata);

			dotcol = 0;
		}
	}
#endif

	return 0;
}
Esempio n. 6
0
int
main(int argc, char **argv)
{
	int		i, ch;
	extern char	build_info[];
	struct timeval  estamp;

#ifdef NOTHREADS
	nothreads = 1;
#endif
	while ((ch = getopt(argc, argv, "vdhs:zp:onFD:W:C")) != -1)
		switch(ch) {
#ifdef FAKEFRISBEE
		case 'F':
			dofrisbee++;
			break;
#endif
		case 'd':
			debug++;
			break;

		case 'n':
			nothreads++;
			break;

		case 'v':
			version++;
			break;

		case 'o':
			dots++;
			break;

		case 's':
			slice = atoi(optarg);
			break;

		case 'D':
			dostype = atoi(optarg);
			break;

		case 'p':
			fillpat = strtoul(optarg, NULL, 0);
		case 'z':
			dofill++;
			break;

		case 'C':
			docrconly++;
			dofill++;
			seekable = 0;
			break;

#ifndef NOTHREADS
		case 'W':
			maxwritebufmem = atoi(optarg);
			if (maxwritebufmem >= 4096)
				maxwritebufmem = MAXWRITEBUFMEM;
			maxwritebufmem *= (1024 * 1024);
			break;
#endif

		case 'h':
		case '?':
		default:
			usage();
		}
	argc -= optind;
	argv += optind;

	if (version || debug) {
		fprintf(stderr, "%s\n", build_info);
		if (version)
			exit(0);
	}

	if (argc < 1 || argc > 2)
		usage();

	if (fillpat) {
		unsigned	*bp = (unsigned *) &zeros;

		for (i = 0; i < sizeof(zeros)/sizeof(unsigned); i++)
			*bp++ = fillpat;
	}

	if (strcmp(argv[0], "-")) {
		if ((infd = open(argv[0], O_RDONLY, 0666)) < 0) {
			perror("opening input file");
			exit(1);
		}
	}
	else
		infd = fileno(stdin);

	if (docrconly)
		outfd = -1;
	else if (argc == 2 && strcmp(argv[1], "-")) {
		if ((outfd =
		     open(argv[1], O_RDWR|O_CREAT|O_TRUNC, 0666)) < 0) {
			perror("opening output file");
			exit(1);
		}
	}
	else
		outfd = fileno(stdout);

	/*
	 * If the output device isn't seekable we must modify our behavior:
	 * we cannot really handle slice mode, we must always zero fill
	 * (cannot skip free space) and we cannot use pwrite.
	 */
	if (lseek(outfd, (off_t)0, SEEK_SET) < 0) {
		if (slice) {
			fprintf(stderr, "Output file is not seekable, "
				"cannot specify a slice\n");
			exit(1);
		}
		if (!dofill && !docrconly)
			fprintf(stderr,
				"WARNING: output file is not seekable, "
				"must zero-fill free space\n");
		dofill = 1;
		seekable = 0;
	} else
		seekable = 1;

	if (slice) {
		off_t	minseek;
		
		if (readmbr(slice)) {
			fprintf(stderr, "Failed to read MBR\n");
			exit(1);
		}
		minseek = sectobytes(outputminsec);
		
		if (lseek(outfd, minseek, SEEK_SET) < 0) {
			perror("Setting seek pointer to slice");
			exit(1);
		}
	}

	threadinit();
	gettimeofday(&stamp, 0);
	
#ifdef FAKEFRISBEE
	if (dofrisbee) {
		struct stat st;
		int numchunks, i;

		if (fstat(infd, &st) < 0) {
			fprintf(stderr, "Cannot stat input file\n");
			exit(1);
		}
		numchunks = st.st_size / SUBBLOCKSIZE;

		chunklist = (int *) calloc(numchunks+1, sizeof(*chunklist));
		assert(chunklist != NULL);

		for (i = 0; i < numchunks; i++)
			chunklist[i] = i;
		chunklist[i] = -1;

		srandom((long)(stamp.tv_usec^stamp.tv_sec));
		for (i = 0; i < 50 * numchunks; i++) {
			int c1 = random() % numchunks;
			int c2 = random() % numchunks;
			int t1 = chunklist[c1];
			int t2 = chunklist[c2];

			chunklist[c2] = t1;
			chunklist[c1] = t2;
		}
		nextchunk = chunklist;
	}
#endif

	while (1) {
		int	count = sizeof(chunkbuf);
		char	*bp   = chunkbuf;
		
#ifdef FAKEFRISBEE
		if (dofrisbee) {
			if (*nextchunk == -1)
				goto done;
			if (lseek(infd, (off_t)*nextchunk * SUBBLOCKSIZE,
				  SEEK_SET) < 0) {
				perror("seek failed");
				exit(1);
			}
			nextchunk++;
		}
#endif
		/*
		 * Decompress one subblock at a time. We read the entire
		 * chunk and hand it off. Since we might be reading from
		 * stdin, we have to make sure we get the entire amount.
		 */
		while (count) {
			int	cc;
			
			if ((cc = read(infd, bp, count)) <= 0) {
				if (cc == 0)
					goto done;
				perror("reading zipped image");
				exit(1);
			}
			count -= cc;
			bp    += cc;
		}
		if (inflate_subblock(chunkbuf))
			break;
	}
 done:
	close(infd);

	/* This causes the output queue to drain */
	threadquit();
	
	/* Set the MBR type if necesary */
	if (slice && dostype >= 0)
		fixmbr(slice, dostype);

	gettimeofday(&estamp, 0);
	estamp.tv_sec -= stamp.tv_sec;
	if (debug != 1 && dots) {
		while (dotcol++ <= 60)
			fprintf(stderr, " ");
		
		fprintf(stderr, "%4ld %13qd\n", estamp.tv_sec, totaledata);
	}
	else {
		fprintf(stderr, "Wrote %qd bytes (%qd actual) in %ld seconds\n",
			totaledata, totalrdata, estamp.tv_sec);
		fprintf(stderr, "%lu %lu %d\n",
			decompblocks, writeridles, rdycount);
	}
	if (debug)
		fprintf(stderr, "decompressor blocked: %lu, "
			"writer idle: %lu, writes performed: %d\n",
			decompblocks, writeridles, rdycount);
	if (docrconly)
		fprintf(stderr, "%s: CRC=%u\n", argv[0], ~crc);
	dump_writebufs();
	return 0;
}
Esempio n. 7
0
static void
dowrite_request(writebuf_t *wbuf)
{
	off_t offset, size;
	void *buf;
	
	offset = wbuf->offset;
	size = wbuf->size;
	buf = wbuf->data;
	assert(offset >= 0);
	assert(size > 0);

	/*
	 * Adjust for partition start and ensure data fits
	 * within partition boundaries.
	 */
	offset += sectobytes(outputminsec);
	assert((offset & (SECSIZE-1)) == 0);
	if (outputmaxsec > 0 && offset + size > sectobytes(outputmaxsec)) {
		if (!imagetoobigwarned) {
			fprintf(stderr, "WARNING: image too large "
				"for target slice, truncating\n");
			imagetoobigwarned = 1;
		}
		if (offset >= sectobytes(outputmaxsec)) {
			free_writebuf(wbuf);
			return;
		}
		size = sectobytes(outputmaxsec) - offset;
		wbuf->size = size;
	}
	wbuf->offset = offset;

	totaledata += size;

	if (nothreads) {
		/*
		 * Null buf means its a request to zero.
		 * If we are not filling, just return.
		 */
		if (buf == NULL) {
			if (dofill)
				writezeros(offset, size);
		} else {
			assert(size <= OUTSIZE);

			/*
			 * Handle any relocations
			 */
			applyrelocs(offset, (size_t)size, buf);
			writedata(offset, (size_t)size, buf);
		}
		free_writebuf(wbuf);
		return;
	}

#ifndef NOTHREADS
	if (buf == NULL) {
		if (!dofill) {
			free_writebuf(wbuf);
			return;
		}
	} else {
		assert(size <= OUTSIZE);

		/*
		 * Handle any relocations
		 */
		applyrelocs(offset, (size_t)size, buf);
	}

	/*
	 * Queue it up for the writer thread
	 */
	pthread_mutex_lock(&writequeue_mutex);
	queue_enter(&writequeue, wbuf, writebuf_t *, chain);
#ifdef CONDVARS_WORK
	pthread_cond_signal(&writequeue_cond);
#endif
	pthread_mutex_unlock(&writequeue_mutex);
#endif
}
Esempio n. 8
0
/*
 * Read the hash info from a signature file into an array of hashinfo structs
 * We also record the maximum hash range size so we can size a static buffer
 * for IO.
 */
static int
readhashinfo(char *hfile, struct hashinfo **hinfop, uint32_t ssect)
{
	struct hashinfo		hi, *hinfo;
	int			fd, nregbytes, cc, i;

	fd = open(hfile, O_RDONLY);
	if (fd < 0) {
		perror(hfile);
		return -1;
	}
	cc = read(fd, &hi, sizeof(hi));
	if (cc != sizeof(hi)) {
		if (cc < 0)
			perror(hfile);
		else
			fprintf(stderr, "%s: too short\n", hfile);
		close(fd);
		return -1;
	}
	if (strcmp((char *)hi.magic, HASH_MAGIC) != 0 ||
	    hi.version != HASH_VERSION) {
		fprintf(stderr, "%s: not a valid signature file\n", hfile);
		return -1;
	}
	nregbytes = hi.nregions * sizeof(struct hashregion);
	hinfo = malloc(sizeof(hi) + nregbytes);
	if (hinfo == 0) {
		fprintf(stderr, "%s: not enough memory for info\n", hfile);
		return -1;
	}
	*hinfo = hi;
	cc = read(fd, hinfo->regions, nregbytes);
	if (cc != nregbytes) {
		free(hinfo);
		return -1;
	}

	for (i = 0; i < hinfo->nregions; i++) {
		struct hashregion *hreg = &hinfo->regions[i];
		if (hreg->region.size > hashdatasize)
			hashdatasize = hreg->region.size;
		hreg->region.start += ssect;
#ifdef HASHSTATS
		hashstats.orig_allocated += hreg->region.size;
#endif
	}
	close(fd);

	hashfile = hfile;
	hashdatasize = sectobytes(hashdatasize);
	hashdata = malloc(hashdatasize);
	if (hashdata == NULL) {
		fprintf(stderr, "%s: not enough memory for data buffer\n",
			hfile);
		return -1;
	}

#ifdef DEBUG
	//dumphash(hinfo);
#endif

	*hinfop = hinfo;
	return 0;
}
Esempio n. 9
0
/*
 * Read from infd, hash the contents and compare with the hash from sig file.
 * Optionally (READ_CACHE), read-ahead and cache the blocks
 */
static int
hash_and_cmp(int infd,
	     unsigned char *(*hashfunc)(const unsigned char *, size_t,
					unsigned char *),
	     int hashlen, struct hashregion *hashreg, int num_reg)
{
	unsigned char		*bp;
	size_t			count, byte_size;
	ssize_t			cc;
	off_t			byte_start, retval;
	unsigned char 		hash[HASH_MAXSIZE];
	struct region		hreg = hashreg->region;
	int			iretval;

	//printf("hash_and_cmp: in -- start = %u, size = %x, num_reg = %d.\n",
	//				hreg.start, hreg.size, num_reg);
#ifdef READ_CACHE
	static struct range	cache = { 0, 0, NULL, NULL };
	static char		*odata = NULL;
	/*
	 * We read the blocks here. try to optimize here by reading 
	 * as many contguous blocks as possible (by looking thru the
	 * hashregions) and store the cached data's range.
	 * all subsequent calls that can be served from this cache are served.
	 * when the first request outside this data comes, we purge the cache
	 * (since request comes sequentially), and fetch the next bunch of
	 * consecutive blocks....
	 */
	if (hreg.start + hreg.size <= cache.start + cache.size) {
		/*
		 * serve the request from the cache
		 */
		buf = cache.data + sectobytes((hreg.start - cache.start));

		//printf("hash_and_cmp: fetching from cache start = %d...\n",
		//		sectobytes((hreg.start - cache.start)));
	} else {
		int i;
		/*
		 * bad luck ! gotta hit the disk...
		 */
		//printf("hash_and_cmp: NOT in cache...\n");

		/*
		 * find the contiguous blocks
		 */
		cache.start = hreg.start;
		cache.size = hreg.size;
		for (i = 0; i < num_reg - 1; i++) {
			/*
			 * since there are NO overlaps in hashed blocks
			 * just check end points..
			 */
			if (hashreg[i].region.start + hashreg[i].region.size
						!= hashreg[i+1].region.start) {
				break;
			}

			/*
			 * voila ! contiguous...
			 */
			cache.size += hashreg[i+1].region.size;
		}
	
		byte_size = sectobytes(cache.size);
		byte_start = sectobytes(cache.start);

		if (cache.data) {
			free(cache.data);
		}
		cache.data = (unsigned char *) malloc(byte_size);
		if (!cache.data) {
			fprintf(stderr, "hash_and_cmp: unable to malloc !\n:");
			goto error;
		}
		bzero(cache.data, byte_size);

		//printf("hash_and_cmp: gonna fetch start = %d, size = %d\n",
		//				cache.start, cache.size);

		/*
		 * go fetch the blocks.
		 */
		retval = lseek(infd, byte_start, SEEK_SET);
	//	printf("BUG_DBG: hash_and_cmp(): retval = %ld,"
	//		" byte_start = %ld\n", retval, byte_start);
		if (retval < 0) {
			fprintf(stderr, "hash_and_cmp: lseek error !\n:");
			goto free_error;
		}

		count = byte_size;
		bp = cache.data;
		while (count) {
			TIMEOP(cc = read(infd, bp, count), time_curr_read);
			if (cc < 0) {
				perror("hash_and_cmp: read error -- ");
				goto free_error;
			}
			count -= cc;
			//printf("looping...%d %d\n", cc, count);
			bp += cc;
		}
		buf = cache.data;

	}
#else
	/*
	 * Read from the disk !
	 */
	byte_size = sectobytes(hreg.size);
	byte_start = sectobytes(hreg.start);
	assert(hreg.size <= hashdatasize);

	retval = lseek(infd, byte_start, SEEK_SET);
	if (retval < 0) {
		perror("hash_and_cmp: lseek error");
		return -1;
	}

	count = byte_size;
	bp = hashdata;
	while (count > 0) {
		TIMEOP(cc = read(infd, bp, count), time_curr_read);
		if (cc < 0) {
			perror("hash_and_cmp: read error");
			return -1;
		}
		if (cc == 0) {
			fprintf(stderr, "hash_and_cmp: unexpected EOF\n");
			return -1;
		}
		count -= cc;
		bp += cc;
	}
#endif

	/*
	 * now caculate the hash and compare it.
	 */
	TIMEOP(
	    (void)(*hashfunc)(hashdata, byte_size, hash),
	time_hash);

#if 0
	fprintf(stderr, "disk: %s\n", spewhash(hash));
	fprintf(stderr, "sig:  %s\n", spewhash(hashreg->hash));
#endif

	iretval = (memcmp(hashreg->hash, hash, hashlen) != 0);

#ifdef HASHSTATS
	hashstats.hash_compares++;
	hashstats.hash_scompares += hreg.size;
	if (!iretval) {
		hashstats.hash_identical++;
		hashstats.hash_sidentical += hreg.size;
	}
#endif

	return iretval;

#ifdef READ_CACHE
free_error:
	free(cache.data);
	cache.data = NULL;
error:
	cache.start = 0;
	cache.size = 0;
#endif
	return -1;
}
Esempio n. 10
0
/*
 * Decompress the chunk, calculating hashes
 */
static void
hashchunk(int chunkno, char *chunkbufp, struct hashinfo **hinfop)
{
	blockhdr_t *blockhdr;
	struct region *regp;
	z_stream z;
	int err, nreg;
	char hash[HASH_MAXSIZE];
	unsigned char *(*hashfunc)(const unsigned char *, unsigned long,
				   unsigned char *);
	readbuf_t *rbuf;
#ifdef TIMEIT
	u_int64_t sstamp, estamp;
#endif

	z.zalloc = Z_NULL;
	z.zfree    = Z_NULL;
	z.opaque   = Z_NULL;
	z.next_in  = Z_NULL;
	z.avail_in = 0;
	z.next_out = Z_NULL;

	err = inflateInit(&z);
	CHECK_ERR(err, "inflateInit");
	
	memset(hash, 0, sizeof hash);

	/*
	 * Grab the header. It is uncompressed, and holds the real
	 * image size and the magic number. Advance the pointer too.
	 */
	blockhdr = (blockhdr_t *)chunkbufp;
	chunkbufp += DEFAULTREGIONSIZE;
	nregions += blockhdr->regioncount;
	z.next_in = chunkbufp;
	z.avail_in = blockhdr->size;
	
	switch (blockhdr->magic) {
	case COMPRESSED_V1:
		regp = (struct region *)((struct blockhdr_V1 *)blockhdr + 1);
		break;

	case COMPRESSED_V2:
		regp = (struct region *)((struct blockhdr_V2 *)blockhdr + 1);
		break;

	default:
		fprintf(stderr, "Bad Magic Number!\n");
		exit(1);
	}

	/*
	 * Deterimine the hash function
	 */
	switch (hashtype) {
	case HASH_TYPE_MD5:
	default:
		hashfunc = MD5;
		break;
	case HASH_TYPE_SHA1:
		hashfunc = SHA1;
		break;
	}

	/*
	 * Loop through all regions, decompressing and hashing data
	 * in HASHBLK_SIZE or smaller blocks.
	 */
	rbuf = alloc_readbuf(0, bytestosec(HASHBLK_SIZE), 0);
	if (rbuf == NULL) {
		fprintf(stderr, "no memory\n");
		exit(1);
	}
	for (nreg = 0; nreg < blockhdr->regioncount; nreg++) {
		uint32_t rstart, rsize, hsize;

		rstart = regp->start;
		rsize = regp->size;
		ndatabytes += sectobytes(rsize);
		while (rsize > 0) {
			if (rsize > bytestosec(HASHBLK_SIZE))
				hsize = bytestosec(HASHBLK_SIZE);
			else
				hsize = rsize;

			z.next_out = rbuf->data;
			z.avail_out = sectobytes(hsize);
#ifdef TIMEIT
			sstamp = rdtsc();
#endif
			err = inflate(&z, Z_SYNC_FLUSH);
#ifdef TIMEIT
			estamp = rdtsc();
			dcycles += (estamp - sstamp);
#endif
			if (err != Z_OK && err != Z_STREAM_END) {
				fprintf(stderr, "inflate failed, err=%d\n",
					err);
				exit(1);
			}

			/*
			 * Make sure we are still in synch
			 */
			if (z.avail_out != 0) {
				fprintf(stderr,
					"inflate failed to fill buf, %d left\n",
					z.avail_out);
				exit(1);
			}
			if (err == Z_STREAM_END && hsize != rsize) {
				fprintf(stderr,
					"inflate ran out of input, %d left\n",
					rsize - hsize);
				exit(1);
			}

			/*
			 * Compute the hash
			 */
			(void)(*hashfunc)(rbuf->data, sectobytes(hsize), hash);
			addhash(hinfop, chunkno, rstart, hsize, hash);

			rstart += hsize;
			rsize -= hsize;
		}
		regp++;
	}
	free_readbuf(rbuf);
	if (z.avail_in != 0) {
		fprintf(stderr,
			"too much input for chunk, %d left\n", z.avail_in);
		exit(1);
	}
}
Esempio n. 11
0
static readbuf_t *
alloc_readbuf(uint32_t start, uint32_t size, int dowait)
{
	readbuf_t *rbuf;
	size_t bufsize;

	pthread_mutex_lock(&readbuf_mutex);
	bufsize = sectobytes(size);
	if (size > HASHBLK_SIZE) {
		fprintf(stderr, "%s: hash region too big (%d bytes)\n",
			devfile, size);
		exit(1);
	}

	do {
		if (maxreadbufmem && curreadbufmem + bufsize > maxreadbufmem)
			rbuf = NULL;
		else
			rbuf = malloc(sizeof(*rbuf) + bufsize);

		if (rbuf == NULL) {
			if (!dowait) {
				pthread_mutex_unlock(&readbuf_mutex);
				return NULL;
			}

			readeridles++;
			readbufwanted = 1;
			/*
			 * Once again it appears that linuxthreads
			 * condition variables don't work well.
			 * We seem to sleep longer than necessary.
			 */
			do {
#ifdef CONDVARS_WORK
				pthread_cond_wait(&readbuf_cond,
						  &readbuf_mutex);
#else
				pthread_mutex_unlock(&readbuf_mutex);
				fsleep(1000);
				pthread_mutex_lock(&readbuf_mutex);
#endif
				pthread_testcancel();
			} while (readbufwanted);
		}
	} while (rbuf == NULL);

	curreadbufs++;
	curreadbufmem += bufsize;
	if (curreadbufs > maxbufsalloced)
		maxbufsalloced = curreadbufs;
	if (curreadbufmem > maxmemalloced)
		maxmemalloced = curreadbufmem;
	pthread_mutex_unlock(&readbuf_mutex);

	queue_init(&rbuf->chain);
	rbuf->region.start = start;
	rbuf->region.size = size;

	return rbuf;
}
Esempio n. 12
0
static int
checkhash(char *name, struct hashinfo *hinfo)
{
	uint32_t i, inbad, badstart, badsize, reportbad;
	uint32_t badhashes, badchunks, lastbadchunk;
	uint64_t badhashdata;
	struct hashregion *reg;
	int hashlen, chunkno;
	unsigned char hash[HASH_MAXSIZE];
	unsigned char *(*hashfunc)(const unsigned char *, unsigned long,
				   unsigned char *);
	char *hashstr;
	readbuf_t *rbuf;
	size_t size;
#ifdef TIMEIT
	u_int64_t sstamp, estamp;
#endif

	if (startreader(name, hinfo))
		return -1;

	chunkno = lastbadchunk = -1;
	badhashes = badchunks = inbad = reportbad = 0;
	badhashdata = 0;
	badstart = badsize = ~0;
	switch (hinfo->hashtype) {
	case HASH_TYPE_MD5:
	default:
		hashlen = 16;
		hashfunc = MD5;
		hashstr = "MD5";
		break;
	case HASH_TYPE_SHA1:
		hashlen = 20;
		hashfunc = SHA1;
		hashstr = "SHA1";
		break;
	}
	fprintf(stderr, "Checking disk contents using %s digest\n", hashstr);

	for (i = 0, reg = hinfo->regions; i < hinfo->nregions; i++, reg++) {
		if (chunkno != reg->chunkno) {
			nchunks++;
			chunkno = reg->chunkno;
		}
		size = sectobytes(reg->region.size);
		rbuf = getblock(reg);
#ifdef TIMEIT
		sstamp = rdtsc();
#endif
		(void)(*hashfunc)(rbuf->data, size, hash);
#ifdef TIMEIT
		estamp = rdtsc();
		hcycles += (estamp - sstamp);
#endif
		putblock(rbuf);
		ndatabytes += size;

		if (detail > 2) {
			printf("[%u-%u]:\n", reg->region.start,
			       reg->region.start + reg->region.size - 1);
			printf("  sig  %s\n", spewhash(reg->hash));
			printf("  disk %s\n", spewhash(hash));
		}

		if (memcmp(reg->hash, hash, hashlen) == 0) {
			/*
			 * Hash is good.
			 * If we were in a bad stretch, be sure to dump info
			 */
			if (inbad)
				reportbad = 1;
		} else {
			/*
			 * Hash is bad.
			 * If not already in a bad stretch, start one.
			 * If in a bad stretch, lengthen it if contig.
			 * Otherwise, dump the info.
			 */
			badhashes++;
			if (chunkno != lastbadchunk) {
				badchunks++;
				lastbadchunk = chunkno;
			}
			badhashdata += size;
			if (!inbad) {
				inbad = 1;
				badstart = reg->region.start;
				badsize = reg->region.size;
			} else {
				if (badstart + badsize == reg->region.start)
					badsize += reg->region.size;
				else
					reportbad = 1;
			}
		}
#ifdef TIMEIT
		sstamp = rdtsc();
		ccycles += (sstamp - estamp);
#endif
		/*
		 * Report on a bad stretch
		 */
		if (reportbad) {
			if (detail)
				fprintf(stderr, "%s: bad hash [%u-%u]\n",
					name, badstart, badstart + badsize - 1);
			reportbad = inbad = 0;
		}
	}
	/*
	 * Finished on a sour note, report the final bad stretch.
	 */
	if (inbad && detail)
		fprintf(stderr, "%s: bad hash [%u-%u]\n",
			name, badstart, badstart + badsize - 1);

	stopreader();

	nhregions = hinfo->nregions;
	printf("%s: %lu chunks, %lu hashregions, %qu data bytes\n",
	       name, nchunks, nhregions, ndatabytes);
	if (badhashes)
		printf("%s: %u regions (%d chunks) had bad hashes, "
		       "%qu bytes affected\n",
		       name, badhashes, badchunks, badhashdata);
	dump_readbufs();
#ifdef TIMEIT
	printf("%qu bytes: read cycles: %qu, hash cycles: %qu, cmp cycles: %qu\n",
	       ndatabytes, rcycles, hcycles, ccycles);
#endif
	return 0;
}
Esempio n. 13
0
/*
 * Operate on a BSD slice
 */
int
read_bsdslice(int slice, int bsdtype, u_int32_t start, u_int32_t size,
	      char *sname, int infd)
{
	int		cc, i, rval = 0, npart, absoffset;
	union {
		struct disklabel	label;
		char			pad[BBSIZE];
	} dlabel;

	if (debug)
		fprintf(stderr, "  P%d (%sBSD Slice)\n", slice + 1,
			bsdtype == DOSPTYP_386BSD ? "Free" : "Open");
	
	if (devlseek(infd, sectobytes(start), SEEK_SET) < 0) {
		warn("Could not seek to beginning of BSD slice");
		return 1;
	}

	/*
	 * Then seek ahead to the disklabel.
	 */
	if (devlseek(infd, sectobytes(LABELSECTOR), SEEK_CUR) < 0) {
		warn("Could not seek to beginning of BSD disklabel");
		return 1;
	}

	if ((cc = devread(infd, &dlabel, sizeof(dlabel))) < 0) {
		warn("Could not read BSD disklabel");
		return 1;
	}
	if (cc != sizeof(dlabel)) {
		warnx("Could not get the entire BSD disklabel");
 		return 1;
	}

	/*
	 * Check the magic numbers.
	 */
	if (dlabel.label.d_magic  != DISKMAGIC ||
	    dlabel.label.d_magic2 != DISKMAGIC) {
#ifdef linux /* not needed in BSD, a fake disklabel is created by the kernel */
		/*
		 * If we were forced with the bsdfs option,
		 * assume this is a single partition disk like a
		 * memory or vnode disk.  We cons up a disklabel
		 * and let it rip.
		 */
		if (size == 0) {
			fprintf(stderr, "P%d: WARNING: No disklabel, "
				"assuming single partition\n", slice+1);
			dlabel.label.d_partitions[0].p_offset = 0;
			dlabel.label.d_partitions[0].p_size = 0;
			dlabel.label.d_partitions[0].p_fstype = FS_BSDFFS;
			return read_bsdpartition(infd, &dlabel.label, 0);
		}
#endif
		warnx("Wrong magic number in BSD disklabel");
 		return 1;
	}

	/*
	 * Now scan partitions.
	 *
	 * XXX space not covered by a partition winds up being compressed,
	 * we could detect this.
	 */
	npart = dlabel.label.d_npartitions;
	assert(npart >= 0 && npart <= 16);

	/*
	 * XXX partition table offsets were traditionally absolute, but
	 * at least with FBSD 8.x, they became relative to the slice.
	 * So we attempt to differentiate them here by looking for
	 * slice starts that are less than the DOS partition offset.
	 * Such a slice would indicate relative addressing.
	 */
	absoffset = 1;
	if (start != 0) {
		for (i = 0; i < npart; i++) {
			if (dlabel.label.d_partitions[i].p_size == 0 ||
			    dlabel.label.d_partitions[i].p_fstype == FS_UNUSED)
				continue;
			if (bsdtype == DOSPTYP_OPENBSD && i >= 8 && i < 16)
				continue;
			if (dlabel.label.d_partitions[i].p_offset < start) {
				fprintf(stderr, "P%d: WARNING: BSD label appears to use relative offsets, adjusting...\n", slice+1);
				absoffset = 0;
				break;
			}
		}
	}
	if (debug) {
		fprintf(stderr, "  P%d: %d ", slice+1, npart);
		if (absoffset == 0)
			fprintf(stderr, "(slice relative) ");
		fprintf(stderr, "BSD partitions\n");
	}
	for (i = 0; i < npart; i++) {
		if (dlabel.label.d_partitions[i].p_size == 0)
			continue;

		/*
		 * OpenBSD maps the extended DOS partitions as slices 8-15,
		 * skip them.
		 */
		if (bsdtype == DOSPTYP_OPENBSD && i >= 8 && i < 16) {
			if (debug)
				fprintf(stderr, "    '%c'   skipping, "
					"OpenBSD mapping of DOS partition %d\n",
					BSDPARTNAME(i), i - 6);
			continue;
		}

		/*
		 * Make relative offsets absolute.  We do this even for
		 * unused partitions so that any reloc entries created
		 * below are correct.
		 */
		if (absoffset == 0)
			dlabel.label.d_partitions[i].p_offset += start;

		if (dlabel.label.d_partitions[i].p_fstype == FS_UNUSED)
			continue;

		if (debug) {
			fprintf(stderr, "    '%c' ", BSDPARTNAME(i));

			fprintf(stderr, "start %9d, size %9d\t(%s)\n",
			   dlabel.label.d_partitions[i].p_offset,
			   dlabel.label.d_partitions[i].p_size,
			   fstypenames[dlabel.label.d_partitions[i].p_fstype]);
		}

		if (ignore[slice] & (1 << i)) {
			fprintf(stderr, "  Slice %d BSD partition '%c' ignored,"
				" NOT SAVING.\n",
				slice + 1, BSDPARTNAME(i));
			addskip(dlabel.label.d_partitions[i].p_offset,
				dlabel.label.d_partitions[i].p_size);
		} else if (forceraw[slice] & (1 << i)) {
			fprintf(stderr, "  Slice %d BSD partition '%c',"
				" forcing raw compression.\n",
				slice + 1, BSDPARTNAME(i));
		} else {
			rval = read_bsdpartition(infd, &dlabel.label, i);
			if (rval)
				return rval;
		}
	}
	
	/*
	 * Record a fixup for the partition table, adjusting the
	 * partition offsets to make them slice relative.
	 *
	 * Note that event if partitions were relative (absoffset == 0) we
	 * have converted the value in dlabel to absolute by this point.
	 */
	if (dorelocs &&
	    start != 0 && dlabel.label.d_partitions[0].p_offset == start) {
		for (i = 0; i < npart; i++) {
			if (dlabel.label.d_partitions[i].p_size == 0)
				continue;

			/*
			 * Don't mess with OpenBSD partitions 8-15 which map
			 * extended DOS partitions.  Also leave raw partition
			 * alone as it maps the entire disk (not just slice)
			 */
			if (bsdtype == DOSPTYP_OPENBSD &&
			    (i == 2 || (i >= 8 && i < 16)))
				continue;

			assert(dlabel.label.d_partitions[i].p_offset >= start);
			dlabel.label.d_partitions[i].p_offset -= start;
		}
		dlabel.label.d_checksum = 0;
		dlabel.label.d_checksum = dkcksum(&dlabel.label);

		addfixup(sectobytes(start+LABELSECTOR),
			 sectobytes(start),
			 (off_t)sizeof(dlabel.label), &dlabel,
			 bsdtype == DOSPTYP_OPENBSD ?
			 RELOC_OBSDDISKLABEL : RELOC_FBSDDISKLABEL);
	}

	return 0;
}
Esempio n. 14
0
static int
read_bsdcg(struct fs *fsp, struct cg *cgp, int cg, u_int32_t offset)
{
	int  i, max;
	u_int8_t *p;
	int count, j;
	unsigned long dboff, dbcount, dbstart;

	max = fsp->fs_fpg;
	p   = cg_blksfree(cgp);

	/* paranoia: make sure we stay in the buffer */
	assert(&p[max/NBBY] <= (u_int8_t *)cgp + fsp->fs_cgsize);

	/*
	 * XXX The bitmap is fragments, not FS blocks.
	 *
	 * The block bitmap lists blocks relative to the base (cgbase()) of
	 * the cylinder group. cgdmin() is the first actual datablock, but
	 * the bitmap includes all the blocks used for all the blocks
	 * comprising the cg. These include the superblock, cg, inodes,
	 * datablocks and the variable-sized padding before all of these
	 * (used to skew the offset of consecutive cgs).
	 * The "dbstart" parameter is thus the beginning of the cg, to which
	 * we add the bitmap offset. All blocks before cgdmin() will always
	 * be allocated, but we scan them anyway. 
	 */
	//assert(cgbase(fsp, cg) == cgstart(fsp, cg));
	dbstart = fsbtodb(fsp, cgbase(fsp, cg)) + offset;

	if (debug > 2)
		fprintf(stderr, "                   ");
	for (count = i = 0; i < max; i++)
		if (isset(p, i)) {
			j = i;
			while ((i+1)<max && isset(p, i+1))
				i++;

			dboff = dbstart + fsbtodb(fsp, j);
			dbcount = fsbtodb(fsp, (i-j) + 1);
			freecount += (i-j) + 1;
					
			if (debug > 2) {
				if (count)
					fprintf(stderr, ",%s",
						count % 4 ?
						" " : "\n                   ");
				fprintf(stderr, "%lu:%ld", dboff, dbcount);
			}
			addskip(dboff, dbcount);
			count++;
		}
	if (debug > 2)
		fprintf(stderr, "\n");

#ifdef DO_INODES
	/*
	 * Look for free inodes
	 */
	if (cgp->cg_cs.cs_nifree != 0) {
		int tifree = 0;
		unsigned long edboff;
		int ino;

		p = cg_inosused(cgp);
		max = fsp->fs_ipg;
		assert(&p[max/NBBY] <= (u_int8_t *)cgp + fsp->fs_cgsize);

		/*
		 * For UFS2, (cylinder-group relative) inode numbers beyond
		 * initediblk are uninitialized.  We do not process those
		 * now.  They are treated as regular free blocks below.
		 */
		if (fsp->fs_magic == FS_UFS2_MAGIC) {
			assert(cgp->cg_initediblk > 0);
			assert(cgp->cg_initediblk <= fsp->fs_ipg);
			assert((cgp->cg_initediblk % INOPB(fsp)) == 0);
			max = cgp->cg_initediblk;
		}
		ino = cg * fsp->fs_ipg;

#ifdef CLEAR_FREE_INODES
		if (metaoptimize) {
			static uint32_t ufs1_magic = FS_UFS1_MAGIC;
			static uint32_t ufs2_magic = FS_UFS2_MAGIC;
			uint32_t *magic;

			if (debug > 1)
				fprintf(stderr,
					"        \t ifree  %9d\n",
					cgp->cg_cs.cs_nifree);
			if (debug > 2)
				fprintf(stderr, "                   ");

			magic = (fsp->fs_magic == FS_UFS2_MAGIC) ?
				&ufs2_magic : &ufs1_magic;
			for (count = i = 0; i < max; i++) {
				if (isset(p, i)) {
					continue;
				}
				if (ino_to_fsbo(fsp, ino+i) == 0) {
					j = i;
					while ((i+1) < max && !isset(p, i+1))
						i++;

					dboff = fsbtodb(fsp,
							ino_to_fsba(fsp, ino+j));
					edboff = fsbtodb(fsp,
							 ino_to_fsba(fsp, ino+i));
#if 0
					fprintf(stderr, "      found free inodes %d-%d"
						" db %lu.%u to %lu.%u\n",
						ino+j, ino+i,
						dboff+offset, ino_to_fsbo(fsp, ino+j),
						edboff+offset, ino_to_fsbo(fsp, ino+i));
#endif
					tifree += (i+1 - j);
					dbcount = edboff - dboff;
					if ((i+1) == max)
						dbcount++;
					if (dbcount == 0)
						continue;

					addfixupfunc(inodefixup,
						     sectobytes(dboff+offset),
						     sectobytes(offset),
						     sectobytes(dbcount),
						     magic, sizeof(magic),
						     RELOC_NONE);
					if (debug > 2) {
						if (count)
							fprintf(stderr, ",%s",
								count % 4 ?
								" " :
								"\n                   ");
						fprintf(stderr, "%lu:%ld",
							dboff+offset, dbcount);
					}
					count++;
				} else
					tifree++;
			}
			assert(i == max);

			if (debug > 2)
				fprintf(stderr, "\n");
		}
#endif

		/*
		 * For UFS2, deal with uninitialized inodes.
		 * These are sweet, we just add them to the skip list.
		 */
		if (fsp->fs_magic == FS_UFS2_MAGIC && max < fsp->fs_ipg) {
			i = max;
			if (debug > 1)
				fprintf(stderr,
					"        \t uninit %9d\n",
					fsp->fs_ipg - i);
			if (debug > 2)
				fprintf(stderr, "                   ");

			max = fsp->fs_ipg;
#if 1
			/*
			 * Paranoia!
			 */
			j = i;
			while ((j+1) < max) {
				assert(!isset(p, j+1));
				j++;
			}
#endif
			tifree += (max - i);
			dboff = fsbtodb(fsp, ino_to_fsba(fsp, ino+i));
			edboff = fsbtodb(fsp, ino_to_fsba(fsp, ino+max-1));
			dbcount = edboff - dboff + 1;

			if (debug > 2)
				fprintf(stderr, "%lu:%ld",
					dboff+offset, dbcount);

			addskip(dboff+offset, dbcount);
			if (debug > 2)
				fprintf(stderr, "\n");
		}

#ifdef CLEAR_FREE_INODES
		if (metaoptimize && tifree != cgp->cg_cs.cs_nifree)
			fprintf(stderr, "Uh-oh! found %d free inodes, "
				"shoulda found %d\n",
				tifree, cgp->cg_cs.cs_nifree);
#endif
	}
#endif

	return 0;
}
Esempio n. 15
0
/*
 * Includes code yanked from UFS2 ffs_vfsops.c
 */
static int
read_bsdsblock(int infd, u_int32_t offset, int part, struct fs *fsp)
{
	static int sblock_try[] = SBLOCKSEARCH;
	union {
		struct fs fs;
		char pad[SBLOCKSIZE];
	} fsu;
	struct fs *fs = &fsu.fs;
	int64_t sblockloc = 0;
	int cc, i, altsblockloc = -1;

	/*
	 * Try reading the superblock in each of its possible locations.
	 */
	i = 0;
 tryagain:
	for ( ; sblock_try[i] != -1; i++) {
		off_t sbloc = sectobytes(offset) + sblock_try[i];

		if (devlseek(infd, sbloc, SEEK_SET) < 0) {
			warnx("BSD Partition '%c': "
			      "Could not seek to superblock",
			      BSDPARTNAME(part));
			return 1;
		}

		if ((cc = devread(infd, &fsu, SBLOCKSIZE)) < 0) {
			warn("BSD Partition '%c': Could not read superblock",
			     BSDPARTNAME(part));
			return 1;
		}
		if (cc != SBLOCKSIZE) {
			warnx("BSD Partition '%c': Truncated superblock",
			      BSDPARTNAME(part));
			return 1;
		}
		sblockloc = sblock_try[i];
		if ((fs->fs_magic == FS_UFS1_MAGIC ||
		     (fs->fs_magic == FS_UFS2_MAGIC &&
		      (fs->fs_sblockloc == sblockloc ||
		       (fs->fs_old_flags & FS_FLAGS_UPDATED) == 0))) &&
		    fs->fs_bsize <= MAXBSIZE &&
		    fs->fs_bsize >= sizeof(struct fs)) {
			/*
			 * Found a UFS1 superblock at something other
			 * than the UFS1 location, might be an alternate
			 * superblock that is out of date so continue
			 * looking for the primary superblock.
			 */
			if (fs->fs_magic == FS_UFS1_MAGIC &&
			    sblockloc != SBLOCK_UFS1 && altsblockloc == -1) {
				altsblockloc = i;
				continue;
			}
			break;
		}
	}
	if (sblock_try[i] == -1) {
		/*
		 * We had found a previous, valid UFS1 superblock at a
		 * non-standard location.  Go back and use that one.
		 */
		if (altsblockloc != -1) {
			i = altsblockloc;
			goto tryagain;
		}
		warnx("BSD Partition '%c': No superblock found",
		      BSDPARTNAME(part));
		return 1;
	}
	if (fs->fs_clean == 0)
		warnx("BSD Partition '%c': WARNING filesystem not clean",
		      BSDPARTNAME(part));
	if (fs->fs_pendingblocks != 0 || fs->fs_pendinginodes != 0)
		warnx("BSD Partition '%c': "
		      "WARNING filesystem has pending blocks/files",
		      BSDPARTNAME(part));

	if (debug)
		fprintf(stderr, "    '%c' UFS%d, superblock at %llu\n",
			BSDPARTNAME(part),
			fs->fs_magic == FS_UFS2_MAGIC ? 2 : 1,
			(unsigned long long)sblockloc);

	/*
	 * Copy UFS1 fields into newer, roomier UFS2 equivs that we use
	 * in our code.
	 */
	if (fs->fs_magic == FS_UFS1_MAGIC && fs->fs_maxbsize != fs->fs_bsize) {
		fs->fs_maxbsize = fs->fs_bsize;
		fs->fs_size = fs->fs_old_size;
		fs->fs_dsize = fs->fs_old_dsize;
		fs->fs_cstotal.cs_nbfree = fs->fs_old_cstotal.cs_nbfree;
		fs->fs_cstotal.cs_nffree = fs->fs_old_cstotal.cs_nffree;
	}

	*fsp = *fs;
	return 0;
}
Esempio n. 16
0
/*
 * BSD partition table offsets are relative to the start of the raw disk.
 * Very convenient.
 */
static int
read_bsdpartition(int infd, struct disklabel *dlabel, int part)
{
	int		i, cc, rval = 0;
	struct fs	fs;
	union {
		struct cg cg;
		char pad[MAXBSIZE];
	} cg;
	u_int32_t	size, offset, fssect;
	int32_t		sbfree;

	offset = dlabel->d_partitions[part].p_offset;
	size   = dlabel->d_partitions[part].p_size;
	
	if (dlabel->d_partitions[part].p_fstype == FS_SWAP) {
		addskip(offset, size);
		return 0;
	}

	if (dlabel->d_partitions[part].p_fstype != FS_BSDFFS) {
		warnx("BSD Partition '%c': Not a BSD Filesystem",
		      BSDPARTNAME(part));
		return 1;
	}

	if (read_bsdsblock(infd, offset, part, &fs))
		return 1;

	sbfree = (fs.fs_cstotal.cs_nbfree * fs.fs_frag) +
		fs.fs_cstotal.cs_nffree;

	if (debug) {
		fprintf(stderr, "        bfree %9lld, bsize %9d, cgsize %9d\n",
			(long long)fs.fs_cstotal.cs_nbfree,
			fs.fs_bsize, fs.fs_cgsize);
	}
	assert(fs.fs_cgsize <= MAXBSIZE);
	assert((fs.fs_cgsize % secsize) == 0);

	/*
	 * See if the filesystem is smaller than the containing partition.
	 * If so, and we are skipping such space, inform the user.
	 */
	fssect = bytestosec(fs.fs_fsize * (off_t)fs.fs_size);
	if (excludenonfs && fssect < size) {
		warnx("BSD Partition '%c': filesystem smaller than partition, "
		      "excluding [%u-%u]",
		      BSDPARTNAME(part), offset+fssect, offset+size-1);
		addskip(offset + fssect, size - fssect);
	}

	freecount = 0;
	for (i = 0; i < fs.fs_ncg; i++) {
		unsigned long	cgoff;

		cgoff = fsbtodb(&fs, cgtod(&fs, i)) + offset;

		if (devlseek(infd, sectobytes(cgoff), SEEK_SET) < 0) {
			warn("BSD Partition '%c': "
			     "Could not seek to cg %d at %lld",
			     BSDPARTNAME(part), i,
			     (long long)sectobytes(cgoff));
			return 1;
		}
		if ((cc = devread(infd, &cg, fs.fs_cgsize)) < 0) {
			warn("BSD Partition '%c': Could not read cg %d",
			     BSDPARTNAME(part), i);
			return 1;
		}
		if (cc != fs.fs_cgsize) {
			warn("BSD Partition '%c': Truncated cg %d",
			     BSDPARTNAME(part), i);
			return 1;
		}
		if (debug > 1) {
			fprintf(stderr,
				"        CG%d\t offset %9ld, bfree %6d\n",
				i, cgoff, cg.cg.cg_cs.cs_nbfree);
		}
		
		rval = read_bsdcg(&fs, &cg.cg, i, offset);
		if (rval)
			return rval;
	}

	if (rval == 0 && freecount != sbfree) {
		warnx("BSD Partition '%c': "
		      "computed free count (%d) != expected free count (%d)",
		      BSDPARTNAME(part), freecount, sbfree);
	}

	return rval;
}