Beispiel #1
0
static
void
write_mrecord(int fdout, uint32_t type, hammer_ioc_mrecord_any_t mrec,
	      int bytes)
{
	char zbuf[HAMMER_HEAD_ALIGN];
	int pad;

	pad = HAMMER_HEAD_DOALIGN(bytes) - bytes;

	assert(bytes >= (int)sizeof(mrec->head));
	bzero(&mrec->head, sizeof(mrec->head));
	mrec->head.signature = HAMMER_IOC_MIRROR_SIGNATURE;
	mrec->head.type = type;
	mrec->head.rec_size = bytes;
	hammer_crc_set_mrec_head(&mrec->head, bytes);
	if (write(fdout, mrec, bytes) != bytes) {
		fprintf(stderr, "write_mrecord: error %d (%s)\n",
			errno, strerror(errno));
		exit(1);
	}
	if (pad) {
		bzero(zbuf, pad);
		if (write(fdout, zbuf, pad) != pad) {
			fprintf(stderr, "write_mrecord: error %d (%s)\n",
				errno, strerror(errno));
			exit(1);
		}
	}
}
Beispiel #2
0
/*
 * All B-Tree records within the specified key range which also conform
 * to the transaction id range are returned.  Mirroring code keeps track
 * of the last transaction id fully scanned and can efficiently pick up
 * where it left off if interrupted.
 *
 * The PFS is identified in the mirror structure.  The passed ip is just
 * some directory in the overall HAMMER filesystem and has nothing to
 * do with the PFS.
 */
int
hammer_ioc_mirror_read(hammer_transaction_t trans, hammer_inode_t ip,
		       struct hammer_ioc_mirror_rw *mirror)
{
	struct hammer_cmirror cmirror;
	struct hammer_cursor cursor;
	union hammer_ioc_mrecord_any mrec;
	hammer_btree_leaf_elm_t elm;
	const int crc_start = HAMMER_MREC_CRCOFF;
	char *uptr;
	int error;
	int data_len;
	int bytes;
	int eatdisk;
	int mrec_flags;
	u_int32_t localization;
	u_int32_t rec_crc;

	localization = (u_int32_t)mirror->pfs_id << 16;

	if ((mirror->key_beg.localization | mirror->key_end.localization) &
	    HAMMER_LOCALIZE_PSEUDOFS_MASK) {
		return(EINVAL);
	}
	if (hammer_btree_cmp(&mirror->key_beg, &mirror->key_end) > 0)
		return(EINVAL);

	mirror->key_cur = mirror->key_beg;
	mirror->key_cur.localization &= HAMMER_LOCALIZE_MASK;
	mirror->key_cur.localization += localization;
	bzero(&mrec, sizeof(mrec));
	bzero(&cmirror, sizeof(cmirror));

	/*
	 * Make CRC errors non-fatal (at least on data), causing an EDOM
	 * error instead of EIO.
	 */
	trans->flags |= HAMMER_TRANSF_CRCDOM;

retry:
	error = hammer_init_cursor(trans, &cursor, NULL, NULL);
	if (error) {
		hammer_done_cursor(&cursor);
		goto failed;
	}
	cursor.key_beg = mirror->key_cur;
	cursor.key_end = mirror->key_end;
	cursor.key_end.localization &= HAMMER_LOCALIZE_MASK;
	cursor.key_end.localization += localization;

	cursor.flags |= HAMMER_CURSOR_END_INCLUSIVE;
	cursor.flags |= HAMMER_CURSOR_BACKEND;

	/*
	 * This flag filters the search to only return elements whos create
	 * or delete TID is >= mirror_tid.  The B-Tree uses the mirror_tid
	 * field stored with internal and leaf nodes to shortcut the scan.
	 */
	cursor.flags |= HAMMER_CURSOR_MIRROR_FILTERED;
	cursor.cmirror = &cmirror;
	cmirror.mirror_tid = mirror->tid_beg;

	error = hammer_btree_first(&cursor);
	while (error == 0) {
		/*
		 * Yield to more important tasks
		 */
		if (error == 0) {
			error = hammer_signal_check(trans->hmp);
			if (error)
				break;
		}

		/*
		 * An internal node can be returned in mirror-filtered
		 * mode and indicates that the scan is returning a skip
		 * range in the cursor->cmirror structure.
		 */
		uptr = (char *)mirror->ubuf + mirror->count;
		if (cursor.node->ondisk->type == HAMMER_BTREE_TYPE_INTERNAL) {
			/*
			 * Check space
			 */
			mirror->key_cur = cmirror.skip_beg;
			bytes = sizeof(mrec.skip);
			if (mirror->count + HAMMER_HEAD_DOALIGN(bytes) >
			    mirror->size) {
				break;
			}

			/*
			 * Fill mrec
			 */
			mrec.head.signature = HAMMER_IOC_MIRROR_SIGNATURE;
			mrec.head.type = HAMMER_MREC_TYPE_SKIP;
			mrec.head.rec_size = bytes;
			mrec.skip.skip_beg = cmirror.skip_beg;
			mrec.skip.skip_end = cmirror.skip_end;
			mrec.head.rec_crc = crc32(&mrec.head.rec_size,
						 bytes - crc_start);
			error = copyout(&mrec, uptr, bytes);
			eatdisk = 0;
			goto didwrite;
		}

		/*
		 * Leaf node.  In full-history mode we could filter out
		 * elements modified outside the user-requested TID range.
		 *
		 * However, such elements must be returned so the writer
		 * can compare them against the target to determine what
		 * needs to be deleted on the target, particular for
		 * no-history mirrors.
		 */
		KKASSERT(cursor.node->ondisk->type == HAMMER_BTREE_TYPE_LEAF);
		elm = &cursor.node->ondisk->elms[cursor.index].leaf;
		mirror->key_cur = elm->base;

		/*
		 * If the record was created after our end point we just
		 * ignore it.
		 */
		if (elm->base.create_tid > mirror->tid_end) {
			error = 0;
			bytes = 0;
			eatdisk = 1;
			goto didwrite;
		}

		/*
		 * Determine if we should generate a PASS or a REC.  PASS
		 * records are records without any data payload.  Such
		 * records will be generated if the target is already expected
		 * to have the record, allowing it to delete the gaps.
		 *
		 * A PASS record is also used to perform deletions on the
		 * target.
		 *
		 * Such deletions are needed if the master or files on the
		 * master are no-history, or if the slave is so far behind
		 * the master has already been pruned.
		 */
		if (elm->base.create_tid < mirror->tid_beg) {
			bytes = sizeof(mrec.rec);
			if (mirror->count + HAMMER_HEAD_DOALIGN(bytes) >
			    mirror->size) {
				break;
			}

			/*
			 * Fill mrec.
			 */
			mrec.head.signature = HAMMER_IOC_MIRROR_SIGNATURE;
			mrec.head.type = HAMMER_MREC_TYPE_PASS;
			mrec.head.rec_size = bytes;
			mrec.rec.leaf = *elm;
			mrec.head.rec_crc = crc32(&mrec.head.rec_size,
						 bytes - crc_start);
			error = copyout(&mrec, uptr, bytes);
			eatdisk = 1;
			goto didwrite;
			
		}

		/*
		 * The core code exports the data to userland.
		 *
		 * CRC errors on data are reported but passed through,
		 * but the data must be washed by the user program.
		 *
		 * If userland just wants the btree records it can
		 * request that bulk data not be returned.  This is
		 * use during mirror-stream histogram generation.
		 */
		mrec_flags = 0;
		data_len = (elm->data_offset) ? elm->data_len : 0;
		if (data_len &&
		    (mirror->head.flags & HAMMER_IOC_MIRROR_NODATA)) {
			data_len = 0;
			mrec_flags |= HAMMER_MRECF_NODATA;
		}
		if (data_len) {
			error = hammer_btree_extract(&cursor,
						     HAMMER_CURSOR_GET_DATA);
			if (error) {
				if (error != EDOM)
					break;
				mrec_flags |= HAMMER_MRECF_CRC_ERROR |
					      HAMMER_MRECF_DATA_CRC_BAD;
			}
		}

		bytes = sizeof(mrec.rec) + data_len;
		if (mirror->count + HAMMER_HEAD_DOALIGN(bytes) > mirror->size)
			break;

		/*
		 * Construct the record for userland and copyout.
		 *
		 * The user is asking for a snapshot, if the record was
		 * deleted beyond the user-requested ending tid, the record
		 * is not considered deleted from the point of view of
		 * userland and delete_tid is cleared.
		 */
		mrec.head.signature = HAMMER_IOC_MIRROR_SIGNATURE;
		mrec.head.type = HAMMER_MREC_TYPE_REC | mrec_flags;
		mrec.head.rec_size = bytes;
		mrec.rec.leaf = *elm;

		if (elm->base.delete_tid > mirror->tid_end)
			mrec.rec.leaf.base.delete_tid = 0;
		rec_crc = crc32(&mrec.head.rec_size,
				sizeof(mrec.rec) - crc_start);
		if (data_len)
			rec_crc = crc32_ext(cursor.data, data_len, rec_crc);
		mrec.head.rec_crc = rec_crc;
		error = copyout(&mrec, uptr, sizeof(mrec.rec));
		if (data_len && error == 0) {
			error = copyout(cursor.data, uptr + sizeof(mrec.rec),
					data_len);
		}
		eatdisk = 1;

		/*
		 * eatdisk controls whether we skip the current cursor
		 * position on the next scan or not.  If doing a SKIP
		 * the cursor is already positioned properly for the next
		 * scan and eatdisk will be 0.
		 */
didwrite:
		if (error == 0) {
			mirror->count += HAMMER_HEAD_DOALIGN(bytes);
			if (eatdisk)
				cursor.flags |= HAMMER_CURSOR_ATEDISK;
			else
				cursor.flags &= ~HAMMER_CURSOR_ATEDISK;
			error = hammer_btree_iterate(&cursor);
		}
	}
	if (error == ENOENT) {
		mirror->key_cur = mirror->key_end;
		error = 0;
	}
	hammer_done_cursor(&cursor);
	if (error == EDEADLK)
		goto retry;
	if (error == EINTR) {
		mirror->head.flags |= HAMMER_IOC_HEAD_INTR;
		error = 0;
	}
failed:
	mirror->key_cur.localization &= HAMMER_LOCALIZE_MASK;
	return(error);
}
Beispiel #3
0
/*
 * Copy records from userland to the target mirror.
 *
 * The PFS is identified in the mirror structure.  The passed ip is just
 * some directory in the overall HAMMER filesystem and has nothing to
 * do with the PFS.  In fact, there might not even be a root directory for
 * the PFS yet!
 */
int
hammer_ioc_mirror_write(hammer_transaction_t trans, hammer_inode_t ip,
		       struct hammer_ioc_mirror_rw *mirror)
{
	union hammer_ioc_mrecord_any mrec;
	struct hammer_cursor cursor;
	u_int32_t localization;
	int checkspace_count = 0;
	int error;
	int bytes;
	char *uptr;
	int seq;

	localization = (u_int32_t)mirror->pfs_id << 16;
	seq = trans->hmp->flusher.done;

	/*
	 * Validate the mirror structure and relocalize the tracking keys.
	 */
	if (mirror->size < 0 || mirror->size > 0x70000000)
		return(EINVAL);
	mirror->key_beg.localization &= HAMMER_LOCALIZE_MASK;
	mirror->key_beg.localization += localization;
	mirror->key_end.localization &= HAMMER_LOCALIZE_MASK;
	mirror->key_end.localization += localization;
	mirror->key_cur.localization &= HAMMER_LOCALIZE_MASK;
	mirror->key_cur.localization += localization;

	/*
	 * Set up our tracking cursor for the loop.  The tracking cursor
	 * is used to delete records that are no longer present on the
	 * master.  The last handled record at key_cur must be skipped.
	 */
	error = hammer_init_cursor(trans, &cursor, NULL, NULL);

	cursor.key_beg = mirror->key_cur;
	cursor.key_end = mirror->key_end;
	cursor.flags |= HAMMER_CURSOR_BACKEND;
	error = hammer_btree_first(&cursor);
	if (error == 0)
		cursor.flags |= HAMMER_CURSOR_ATEDISK;
	if (error == ENOENT)
		error = 0;

	/*
	 * Loop until our input buffer has been exhausted.
	 */
	while (error == 0 &&
		mirror->count + sizeof(mrec.head) <= mirror->size) {

	        /*
		 * Don't blow out the buffer cache.  Leave room for frontend
		 * cache as well.
		 *
		 * WARNING: See warnings in hammer_unlock_cursor() function.
		 */
		while (hammer_flusher_meta_halflimit(trans->hmp) ||
		       hammer_flusher_undo_exhausted(trans, 2)) {
			hammer_unlock_cursor(&cursor);
			hammer_flusher_wait(trans->hmp, seq);
			hammer_lock_cursor(&cursor);
			seq = hammer_flusher_async_one(trans->hmp);
		}

		/*
		 * If there is insufficient free space it may be due to
		 * reserved bigblocks, which flushing might fix.
		 */
		if (hammer_checkspace(trans->hmp, HAMMER_CHKSPC_MIRROR)) {
			if (++checkspace_count == 10) {
				error = ENOSPC;
				break;
			}
			hammer_unlock_cursor(&cursor);
			hammer_flusher_wait(trans->hmp, seq);
			hammer_lock_cursor(&cursor);
			seq = hammer_flusher_async(trans->hmp, NULL);
		}


		/*
		 * Acquire and validate header
		 */
		if ((bytes = mirror->size - mirror->count) > sizeof(mrec))
			bytes = sizeof(mrec);
		uptr = (char *)mirror->ubuf + mirror->count;
		error = copyin(uptr, &mrec, bytes);
		if (error)
			break;
		if (mrec.head.signature != HAMMER_IOC_MIRROR_SIGNATURE) {
			error = EINVAL;
			break;
		}
		if (mrec.head.rec_size < sizeof(mrec.head) ||
		    mrec.head.rec_size > sizeof(mrec) + HAMMER_XBUFSIZE ||
		    mirror->count + mrec.head.rec_size > mirror->size) {
			error = EINVAL;
			break;
		}

		switch(mrec.head.type & HAMMER_MRECF_TYPE_MASK) {
		case HAMMER_MREC_TYPE_SKIP:
			if (mrec.head.rec_size != sizeof(mrec.skip))
				error = EINVAL;
			if (error == 0)
				error = hammer_ioc_mirror_write_skip(&cursor, &mrec.skip, mirror, localization);
			break;
		case HAMMER_MREC_TYPE_REC:
			if (mrec.head.rec_size < sizeof(mrec.rec))
				error = EINVAL;
			if (error == 0)
				error = hammer_ioc_mirror_write_rec(&cursor, &mrec.rec, mirror, localization, uptr + sizeof(mrec.rec));
			break;
		case HAMMER_MREC_TYPE_REC_NODATA:
		case HAMMER_MREC_TYPE_REC_BADCRC:
			/*
			 * Records with bad data payloads are ignored XXX.
			 * Records with no data payload have to be skipped
			 * (they shouldn't have been written in the first
			 * place).
			 */
			if (mrec.head.rec_size < sizeof(mrec.rec))
				error = EINVAL;
			break;
		case HAMMER_MREC_TYPE_PASS:
			if (mrec.head.rec_size != sizeof(mrec.rec))
				error = EINVAL;
			if (error == 0)
				error = hammer_ioc_mirror_write_pass(&cursor, &mrec.rec, mirror, localization);
			break;
		default:
			error = EINVAL;
			break;
		}

		/*
		 * Retry the current record on deadlock, otherwise setup
		 * for the next loop.
		 */
		if (error == EDEADLK) {
			while (error == EDEADLK) {
				hammer_sync_lock_sh(trans);
				hammer_recover_cursor(&cursor);
				error = hammer_cursor_upgrade(&cursor);
				hammer_sync_unlock(trans);
			}
		} else {
			if (error == EALREADY)
				error = 0;
			if (error == 0) {
				mirror->count += 
					HAMMER_HEAD_DOALIGN(mrec.head.rec_size);
			}
		}
	}
	hammer_done_cursor(&cursor);

	/*
	 * cumulative error 
	 */
	if (error) {
		mirror->head.flags |= HAMMER_IOC_HEAD_ERROR;
		mirror->head.error = error;
	}

	/*
	 * ioctls don't update the RW data structure if an error is returned,
	 * always return 0.
	 */
	return(0);
}
Beispiel #4
0
void
hammer_cmd_mirror_dump(char **av, int ac)
{
	char *buf = malloc(SERIALBUF_SIZE);
	struct hammer_ioc_mrecord_head pickup;
	hammer_ioc_mrecord_any_t mrec;
	int error;
	int size;
	int offset;
	int bytes;
	int header_only = 0;

	if (ac == 1 && strcmp(*av, "header") == 0)
		header_only = 1;
	else if (ac != 0)
		mirror_usage(1);

	/*
	 * Read and process the PFS header
	 */
	pickup.signature = 0;
	pickup.type = 0;

	mrec = read_mrecord(0, &error, &pickup);

	/*
	 * Dump the PFS header. mirror-dump takes its input from the output
	 * of a mirror-read so getpfs() can't be used to get a fd to be passed
	 * to dump_pfsd().
	 */
	if (header_only && mrec != NULL) {
		dump_pfsd(&mrec->pfs.pfsd, -1);
		free(mrec);
		free(buf);
		return;
	}
	free(mrec);

again:
	/*
	 * Read and process bulk records
	 */
	for (;;) {
		size = read_mrecords(0, buf, SERIALBUF_SIZE, &pickup);
		if (size <= 0)
			break;
		offset = 0;
		while (offset < size) {
			mrec = (void *)((char *)buf + offset);
			bytes = HAMMER_HEAD_DOALIGN(mrec->head.rec_size);
			if (offset + bytes > size) {
				fprintf(stderr, "Misaligned record\n");
				exit(1);
			}

			switch(mrec->head.type & HAMMER_MRECF_TYPE_MASK) {
			case HAMMER_MREC_TYPE_REC_BADCRC:
			case HAMMER_MREC_TYPE_REC:
				printf("Record lo=%08x obj=%016jx key=%016jx "
				       "rt=%02x ot=%02x",
				        mrec->rec.leaf.base.localization,
					(uintmax_t)mrec->rec.leaf.base.obj_id,
					(uintmax_t)mrec->rec.leaf.base.key,
					mrec->rec.leaf.base.rec_type,
					mrec->rec.leaf.base.obj_type);
				if (mrec->head.type ==
				    HAMMER_MREC_TYPE_REC_BADCRC) {
					printf(" (BAD CRC)");
				}
				printf("\n");
				printf("       tids %016jx:%016jx data=%d\n",
				    (uintmax_t)mrec->rec.leaf.base.create_tid,
				    (uintmax_t)mrec->rec.leaf.base.delete_tid,
				    mrec->rec.leaf.data_len);
				break;
			case HAMMER_MREC_TYPE_PASS:
				printf("Pass   lo=%08x obj=%016jx key=%016jx "
				       "rt=%02x ot=%02x\n",
				        mrec->rec.leaf.base.localization,
					(uintmax_t)mrec->rec.leaf.base.obj_id,
					(uintmax_t)mrec->rec.leaf.base.key,
					mrec->rec.leaf.base.rec_type,
					mrec->rec.leaf.base.obj_type);
				printf("       tids %016jx:%016jx data=%d\n",
				    (uintmax_t)mrec->rec.leaf.base.create_tid,
				    (uintmax_t)mrec->rec.leaf.base.delete_tid,
					mrec->rec.leaf.data_len);
				break;
			case HAMMER_MREC_TYPE_SKIP:
				printf("Skip   lo=%08x obj=%016jx key=%016jx rt=%02x to\n"
				       "       lo=%08x obj=%016jx key=%016jx rt=%02x\n",
				       mrec->skip.skip_beg.localization,
				       (uintmax_t)mrec->skip.skip_beg.obj_id,
				       (uintmax_t)mrec->skip.skip_beg.key,
				       mrec->skip.skip_beg.rec_type,
				       mrec->skip.skip_end.localization,
				       (uintmax_t)mrec->skip.skip_end.obj_id,
				       (uintmax_t)mrec->skip.skip_end.key,
				       mrec->skip.skip_end.rec_type);
			default:
				break;
			}
			offset += bytes;
		}
	}

	/*
	 * Read and process the termination sync record.
	 */
	mrec = read_mrecord(0, &error, &pickup);
	if (mrec == NULL ||
	    (mrec->head.type != HAMMER_MREC_TYPE_SYNC &&
	     mrec->head.type != HAMMER_MREC_TYPE_IDLE)) {
		fprintf(stderr, "Mirror-dump: Did not get termination "
				"sync record\n");
	}
	free(mrec);

	/*
	 * Continue with more batches until EOF.
	 */
	mrec = read_mrecord(0, &error, &pickup);
	if (mrec) {
		free(mrec);
		goto again;
	}
	free(buf);
}
Beispiel #5
0
static int
generate_histogram(int fd, const char *filesystem,
		   histogram_t *histogram_ary,
		   struct hammer_ioc_mirror_rw *mirror_base,
		   int *repeatp)
{
	struct hammer_ioc_mirror_rw mirror;
	union hammer_ioc_mrecord_any *mrec;
	hammer_tid_t tid_beg;
	hammer_tid_t tid_end;
	hammer_tid_t tid;
	hammer_tid_t tidx;
	uint64_t *tid_bytes;
	uint64_t total;
	uint64_t accum;
	int chunkno;
	int i;
	int res;
	int off;
	int len;

	mirror = *mirror_base;
	tid_beg = mirror.tid_beg;
	tid_end = mirror.tid_end;
	mirror.head.flags |= HAMMER_IOC_MIRROR_NODATA;

	if (*histogram_ary == NULL) {
		*histogram_ary = malloc(sizeof(struct histogram) *
					(HIST_COUNT + 2));
	}
	if (tid_beg >= tid_end)
		return(0);

	/* needs 2 extra */
	tid_bytes = malloc(sizeof(*tid_bytes) * (HIST_COUNT + 2));
	bzero(tid_bytes, sizeof(*tid_bytes) * (HIST_COUNT + 2));

	if (*repeatp == 0) {
		fprintf(stderr, "Prescan to break up bulk transfer");
		if (VerboseOpt > 1)
			fprintf(stderr, " (%juMB chunks)",
				(uintmax_t)(SplitupOpt / (1024 * 1024)));
		fprintf(stderr, "\n");
	}

	/*
	 * Note: (tid_beg,tid_end), range is inclusive of both beg & end.
	 *
	 * Note: Estimates can be off when the mirror is way behind due
	 *	 to skips.
	 */
	total = 0;
	accum = 0;
	chunkno = 0;
	for (;;) {
		mirror.count = 0;
		if (ioctl(fd, HAMMERIOC_MIRROR_READ, &mirror) < 0) {
			fprintf(stderr, "Mirror-read %s failed: %s\n",
				filesystem, strerror(errno));
			exit(1);
		}
		if (mirror.head.flags & HAMMER_IOC_HEAD_ERROR) {
			fprintf(stderr,
				"Mirror-read %s fatal error %d\n",
				filesystem, mirror.head.error);
			exit(1);
		}
		for (off = 0;
		     off < mirror.count;
		     off += HAMMER_HEAD_DOALIGN(mrec->head.rec_size)) {
			mrec = (void *)((char *)mirror.ubuf + off);

			/*
			 * We only care about general RECs and PASS
			 * records.  We ignore SKIPs.
			 */
			switch (mrec->head.type & HAMMER_MRECF_TYPE_LOMASK) {
			case HAMMER_MREC_TYPE_REC:
			case HAMMER_MREC_TYPE_PASS:
				break;
			default:
				continue;
			}

			/*
			 * Calculate for two indices, create_tid and
			 * delete_tid.  Record data only applies to
			 * the create_tid.
			 *
			 * When tid is exactly on the boundary it really
			 * belongs to the previous entry because scans
			 * are inclusive of the ending entry.
			 */
			tid = mrec->rec.leaf.base.delete_tid;
			if (tid && tid >= tid_beg && tid <= tid_end) {
				len = HAMMER_HEAD_DOALIGN(mrec->head.rec_size);
				if (mrec->head.type ==
				    HAMMER_MREC_TYPE_REC) {
					len -= HAMMER_HEAD_DOALIGN(
						    mrec->rec.leaf.data_len);
					assert(len > 0);
				}
				i = (tid - tid_beg) * HIST_COUNT /
				    (tid_end - tid_beg);
				tidx = tid_beg + i * (tid_end - tid_beg) /
						 HIST_COUNT;
				if (tid == tidx && i)
					--i;
				assert(i >= 0 && i < HIST_COUNT);
				tid_bytes[i] += len;
				total += len;
				accum += len;
			}

			tid = mrec->rec.leaf.base.create_tid;
			if (tid && tid >= tid_beg && tid <= tid_end) {
				len = HAMMER_HEAD_DOALIGN(mrec->head.rec_size);
				if (mrec->head.type ==
				    HAMMER_MREC_TYPE_REC_NODATA) {
					len += HAMMER_HEAD_DOALIGN(
						    mrec->rec.leaf.data_len);
				}
				i = (tid - tid_beg) * HIST_COUNT /
				    (tid_end - tid_beg);
				tidx = tid_beg + i * (tid_end - tid_beg) /
						 HIST_COUNT;
				if (tid == tidx && i)
					--i;
				assert(i >= 0 && i < HIST_COUNT);
				tid_bytes[i] += len;
				total += len;
				accum += len;
			}
		}
		if (*repeatp == 0 && accum > SplitupOpt) {
			if (VerboseOpt > 1) {
				fprintf(stderr, ".");
				fflush(stderr);
			}
			++chunkno;
			score_printf(LINE2, "Prescan chunk %d", chunkno);
			accum = 0;
		}
		if (mirror.count == 0)
			break;
		mirror.key_beg = mirror.key_cur;
	}

	/*
	 * Reduce to SplitupOpt (default 4GB) chunks.  This code may
	 * use up to two additional elements.  Do the array in-place.
	 *
	 * Inefficient degenerate cases can occur if we do not accumulate
	 * at least the requested split amount, so error on the side of
	 * going over a bit.
	 */
	res = 0;
	(*histogram_ary)[res].tid = tid_beg;
	(*histogram_ary)[res].bytes = tid_bytes[0];
	for (i = 1; i < HIST_COUNT; ++i) {
		if ((*histogram_ary)[res].bytes >= SplitupOpt) {
			++res;
			(*histogram_ary)[res].tid = tid_beg +
					i * (tid_end - tid_beg) /
					HIST_COUNT;
			(*histogram_ary)[res].bytes = 0;

		}
		(*histogram_ary)[res].bytes += tid_bytes[i];
	}
	++res;
	(*histogram_ary)[res].tid = tid_end;
	(*histogram_ary)[res].bytes = -1;

	if (*repeatp == 0) {
		if (VerboseOpt > 1)
			fprintf(stderr, "\n");	/* newline after ... */
		score_printf(LINE3, "Prescan %d chunks, total %ju MBytes",
			res, (uintmax_t)total / (1024 * 1024));
		fprintf(stderr, "Prescan %d chunks, total %ju MBytes (",
			res, (uintmax_t)total / (1024 * 1024));
		for (i = 0; i < res && i < 3; ++i) {
			if (i)
				fprintf(stderr, ", ");
			fprintf(stderr, "%ju",
				(uintmax_t)(*histogram_ary)[i].bytes);
		}
		if (i < res)
			fprintf(stderr, ", ...");
		fprintf(stderr, ")\n");
	}
	assert(res <= HIST_COUNT);
	*repeatp = 1;

	free(tid_bytes);
	return(res);
}
Beispiel #6
0
/*
 * Read and return a single mrecord.
 */
static
hammer_ioc_mrecord_any_t
read_mrecord(int fdin, int *errorp, hammer_ioc_mrecord_head_t pickup)
{
	hammer_ioc_mrecord_any_t mrec;
	struct hammer_ioc_mrecord_head mrechd;
	size_t bytes;
	size_t n;
	size_t i;

	if (pickup && pickup->type != 0) {
		mrechd = *pickup;
		pickup->signature = 0;
		pickup->type = 0;
		n = HAMMER_MREC_HEADSIZE;
	} else {
		/*
		 * Read in the PFSD header from the sender.
		 */
		for (n = 0; n < HAMMER_MREC_HEADSIZE; n += i) {
			i = read(fdin, (char *)&mrechd + n, HAMMER_MREC_HEADSIZE - n);
			if (i <= 0)
				break;
		}
		if (n == 0) {
			*errorp = 0;	/* EOF */
			return(NULL);
		}
		if (n != HAMMER_MREC_HEADSIZE) {
			fprintf(stderr, "short read of mrecord header\n");
			*errorp = EPIPE;
			return(NULL);
		}
	}
	if (mrechd.signature != HAMMER_IOC_MIRROR_SIGNATURE) {
		fprintf(stderr, "read_mrecord: bad signature\n");
		*errorp = EINVAL;
		return(NULL);
	}
	bytes = HAMMER_HEAD_DOALIGN(mrechd.rec_size);
	assert(bytes >= sizeof(mrechd));
	mrec = malloc(bytes);
	mrec->head = mrechd;

	while (n < bytes) {
		i = read(fdin, (char *)mrec + n, bytes - n);
		if (i <= 0)
			break;
		n += i;
	}
	if (n != bytes) {
		fprintf(stderr, "read_mrecord: short read on payload\n");
		*errorp = EPIPE;
		return(NULL);
	}
	if (!hammer_crc_test_mrec_head(&mrec->head, mrec->head.rec_size)) {
		fprintf(stderr, "read_mrecord: bad CRC\n");
		*errorp = EINVAL;
		return(NULL);
	}
	*errorp = 0;
	return(mrec);
}
Beispiel #7
0
/*
 * Read and return multiple mrecords
 */
static int
read_mrecords(int fd, char *buf, u_int size, hammer_ioc_mrecord_head_t pickup)
{
	hammer_ioc_mrecord_any_t mrec;
	u_int count;
	size_t n;
	size_t i;
	size_t bytes;
	int type;

	count = 0;
	while (size - count >= HAMMER_MREC_HEADSIZE) {
		/*
		 * Cached the record header in case we run out of buffer
		 * space.
		 */
		fflush(stdout);
		if (pickup->signature == 0) {
			for (n = 0; n < HAMMER_MREC_HEADSIZE; n += i) {
				i = read(fd, (char *)pickup + n,
					 HAMMER_MREC_HEADSIZE - n);
				if (i <= 0)
					break;
			}
			if (n == 0)
				break;
			if (n != HAMMER_MREC_HEADSIZE) {
				fprintf(stderr, "read_mrecords: short read on pipe\n");
				exit(1);
			}
			if (pickup->signature != HAMMER_IOC_MIRROR_SIGNATURE) {
				fprintf(stderr, "read_mrecords: malformed record on pipe, "
					"bad signature\n");
				exit(1);
			}
		}
		if (pickup->rec_size < HAMMER_MREC_HEADSIZE ||
		    pickup->rec_size > sizeof(*mrec) + HAMMER_XBUFSIZE) {
			fprintf(stderr, "read_mrecords: malformed record on pipe, "
				"illegal rec_size\n");
			exit(1);
		}

		/*
		 * Stop if we have insufficient space for the record and data.
		 */
		bytes = HAMMER_HEAD_DOALIGN(pickup->rec_size);
		if (size - count < bytes)
			break;

		/*
		 * Stop if the record type is not a REC, SKIP, or PASS,
		 * which are the only types the ioctl supports.  Other types
		 * are used only by the userland protocol.
		 *
		 * Ignore all flags.
		 */
		type = pickup->type & HAMMER_MRECF_TYPE_LOMASK;
		if (type != HAMMER_MREC_TYPE_PFSD &&
		    type != HAMMER_MREC_TYPE_REC &&
		    type != HAMMER_MREC_TYPE_SKIP &&
		    type != HAMMER_MREC_TYPE_PASS) {
			break;
		}

		/*
		 * Read the remainder and clear the pickup signature.
		 */
		for (n = HAMMER_MREC_HEADSIZE; n < bytes; n += i) {
			i = read(fd, buf + count + n, bytes - n);
			if (i <= 0)
				break;
		}
		if (n != bytes) {
			fprintf(stderr, "read_mrecords: short read on pipe\n");
			exit(1);
		}

		bcopy(pickup, buf + count, HAMMER_MREC_HEADSIZE);
		pickup->signature = 0;
		pickup->type = 0;
		mrec = (void *)(buf + count);

		/*
		 * Validate the completed record
		 */
		if (!hammer_crc_test_mrec_head(&mrec->head, mrec->head.rec_size)) {
			fprintf(stderr, "read_mrecords: malformed record "
					"on pipe, bad crc\n");
			exit(1);
		}

		/*
		 * If its a B-Tree record validate the data crc.
		 *
		 * NOTE: If the VFS passes us an explicitly errorde mrec
		 *	 we just pass it through.
		 */
		type = mrec->head.type & HAMMER_MRECF_TYPE_MASK;

		if (type == HAMMER_MREC_TYPE_REC) {
			if (mrec->head.rec_size <
			    sizeof(mrec->rec) + mrec->rec.leaf.data_len) {
				fprintf(stderr,
					"read_mrecords: malformed record on "
					"pipe, illegal element data_len\n");
				exit(1);
			}
			if (mrec->rec.leaf.data_len &&
			    mrec->rec.leaf.data_offset &&
			    hammer_crc_test_leaf(&mrec->rec + 1, &mrec->rec.leaf) == 0) {
				fprintf(stderr,
					"read_mrecords: data_crc did not "
					"match data! obj=%016jx key=%016jx\n",
					(uintmax_t)mrec->rec.leaf.base.obj_id,
					(uintmax_t)mrec->rec.leaf.base.key);
				fprintf(stderr,
					"continuing, but there are problems\n");
			}
		}
		count += bytes;
	}
	return(count);
}
Beispiel #8
0
/*
 * Generate UNDO record(s) for the block of data at the specified zone1
 * or zone2 offset.
 *
 * The recovery code will execute UNDOs in reverse order, allowing overlaps.
 * All the UNDOs are executed together so if we already laid one down we
 * do not have to lay another one down for the same range.
 *
 * For HAMMER version 4+ UNDO a 512 byte boundary is enforced and a PAD
 * will be laid down for any unused space.  UNDO FIFO media structures
 * will implement the hdr_seq field (it used to be reserved01), and
 * both flush and recovery mechanics will be very different.
 *
 * WARNING!  See also hammer_generate_redo() in hammer_redo.c
 */
int
hammer_generate_undo(hammer_transaction_t trans,
		     hammer_off_t zone_off, void *base, int len)
{
	hammer_mount_t hmp;
	hammer_volume_t root_volume;
	hammer_blockmap_t undomap;
	hammer_buffer_t buffer = NULL;
	hammer_fifo_undo_t undo;
	hammer_fifo_tail_t tail;
	hammer_off_t next_offset;
	int error;
	int bytes;
	int n;

	hmp = trans->hmp;

	/*
	 * A SYNC record may be required before we can lay down a general
	 * UNDO.  This ensures that the nominal recovery span contains
	 * at least one SYNC record telling the recovery code how far
	 * out-of-span it must go to run the REDOs.
	 */
	if ((hmp->flags & HAMMER_MOUNT_REDO_SYNC) == 0 &&
	    hmp->version >= HAMMER_VOL_VERSION_FOUR) {
		hammer_generate_redo_sync(trans);
	}

	/*
	 * Enter the offset into our undo history.  If there is an existing
	 * undo we do not have to generate a new one.
	 */
	if (hammer_enter_undo_history(hmp, zone_off, len) == EALREADY)
		return(0);

	root_volume = trans->rootvol;
	undomap = &hmp->blockmap[HAMMER_ZONE_UNDO_INDEX];

	/* no undo recursion */
	hammer_modify_volume_noundo(NULL, root_volume);
	hammer_lock_ex(&hmp->undo_lock);

	/* undo had better not roll over (loose test) */
	if (hammer_undo_space(trans) < len + HAMMER_BUFSIZE*3)
		hpanic("insufficient UNDO/REDO FIFO space for undo!");

	/*
	 * Loop until the undo for the entire range has been laid down.
	 */
	while (len) {
		/*
		 * Fetch the layout offset in the UNDO FIFO, wrap it as
		 * necessary.
		 */
		if (undomap->next_offset == undomap->alloc_offset)
			undomap->next_offset = HAMMER_ENCODE_UNDO(0);
		next_offset = undomap->next_offset;

		/*
		 * This is a tail-chasing FIFO, when we hit the start of a new
		 * buffer we don't have to read it in.
		 */
		if ((next_offset & HAMMER_BUFMASK) == 0) {
			undo = hammer_bnew(hmp, next_offset, &error, &buffer);
			hammer_format_undo(undo, hmp->undo_seqno ^ 0x40000000);
		} else {
			undo = hammer_bread(hmp, next_offset, &error, &buffer);
		}
		if (error)
			break;
		/* no undo recursion */
		hammer_modify_buffer_noundo(NULL, buffer);

		/*
		 * Calculate how big a media structure fits up to the next
		 * alignment point and how large a data payload we can
		 * accomodate.
		 *
		 * If n calculates to 0 or negative there is no room for
		 * anything but a PAD.
		 */
		bytes = HAMMER_UNDO_ALIGN -
			((int)next_offset & HAMMER_UNDO_MASK);
		n = bytes -
		    (int)sizeof(struct hammer_fifo_undo) -
		    (int)sizeof(struct hammer_fifo_tail);

		/*
		 * If available space is insufficient for any payload
		 * we have to lay down a PAD.
		 *
		 * The minimum PAD is 8 bytes and the head and tail will
		 * overlap each other in that case.  PADs do not have
		 * sequence numbers or CRCs.
		 *
		 * A PAD may not start on a boundary.  That is, every
		 * 512-byte block in the UNDO/REDO FIFO must begin with
		 * a record containing a sequence number.
		 */
		if (n <= 0) {
			KKASSERT(bytes >= sizeof(struct hammer_fifo_tail));
			KKASSERT(((int)next_offset & HAMMER_UNDO_MASK) != 0);
			tail = (void *)((char *)undo + bytes - sizeof(*tail));
			if ((void *)undo != (void *)tail) {
				tail->tail_signature = HAMMER_TAIL_SIGNATURE;
				tail->tail_type = HAMMER_HEAD_TYPE_PAD;
				tail->tail_size = bytes;
			}
			undo->head.hdr_signature = HAMMER_HEAD_SIGNATURE;
			undo->head.hdr_type = HAMMER_HEAD_TYPE_PAD;
			undo->head.hdr_size = bytes;
			/* NO CRC OR SEQ NO */
			undomap->next_offset += bytes;
			hammer_modify_buffer_done(buffer);
			hammer_stats_undo += bytes;
			continue;
		}

		/*
		 * Calculate the actual payload and recalculate the size
		 * of the media structure as necessary.
		 */
		if (n > len) {
			n = len;
			bytes = HAMMER_HEAD_DOALIGN(n) +
				(int)sizeof(struct hammer_fifo_undo) +
				(int)sizeof(struct hammer_fifo_tail);
		}
		if (hammer_debug_general & 0x0080) {
			hdkprintf("undo %016jx %d %d\n",
				(intmax_t)next_offset, bytes, n);
		}

		undo->head.hdr_signature = HAMMER_HEAD_SIGNATURE;
		undo->head.hdr_type = HAMMER_HEAD_TYPE_UNDO;
		undo->head.hdr_size = bytes;
		undo->head.hdr_seq = hmp->undo_seqno++;
		undo->head.hdr_crc = 0;
		undo->undo_offset = zone_off;
		undo->undo_data_bytes = n;
		bcopy(base, undo + 1, n);

		tail = (void *)((char *)undo + bytes - sizeof(*tail));
		tail->tail_signature = HAMMER_TAIL_SIGNATURE;
		tail->tail_type = HAMMER_HEAD_TYPE_UNDO;
		tail->tail_size = bytes;

		KKASSERT(bytes >= sizeof(undo->head));
		hammer_crc_set_fifo_head(&undo->head, bytes);
		undomap->next_offset += bytes;
		hammer_stats_undo += bytes;

		/*
		 * Before we finish off the buffer we have to deal with any
		 * junk between the end of the media structure we just laid
		 * down and the UNDO alignment boundary.  We do this by laying
		 * down a dummy PAD.  Even though we will probably overwrite
		 * it almost immediately we have to do this so recovery runs
		 * can iterate the UNDO space without having to depend on
		 * the indices in the volume header.
		 *
		 * This dummy PAD will be overwritten on the next undo so
		 * we do not adjust undomap->next_offset.
		 */
		bytes = HAMMER_UNDO_ALIGN -
			((int)undomap->next_offset & HAMMER_UNDO_MASK);
		if (bytes != HAMMER_UNDO_ALIGN) {
			KKASSERT(bytes >= sizeof(struct hammer_fifo_tail));
			undo = (void *)(tail + 1);
			tail = (void *)((char *)undo + bytes - sizeof(*tail));
			if ((void *)undo != (void *)tail) {
				tail->tail_signature = HAMMER_TAIL_SIGNATURE;
				tail->tail_type = HAMMER_HEAD_TYPE_PAD;
				tail->tail_size = bytes;
			}
			undo->head.hdr_signature = HAMMER_HEAD_SIGNATURE;
			undo->head.hdr_type = HAMMER_HEAD_TYPE_PAD;
			undo->head.hdr_size = bytes;
			/* NO CRC OR SEQ NO */
		}
		hammer_modify_buffer_done(buffer);

		/*
		 * Adjust for loop
		 */
		len -= n;
		base = (char *)base + n;
		zone_off += n;
	}
	hammer_modify_volume_done(root_volume);
	hammer_unlock(&hmp->undo_lock);

	if (buffer)
		hammer_rel_buffer(buffer, 0);
	return(error);
}