Beispiel #1
0
void
hammer_flusher_clean_loose_ios(hammer_mount_t hmp)
{
	hammer_buffer_t buffer;
	hammer_io_t io;

	/*
	 * loose ends - buffers without bp's aren't tracked by the kernel
	 * and can build up, so clean them out.  This can occur when an
	 * IO completes on a buffer with no references left.
	 *
	 * The io_token is needed to protect the list.
	 */
	if ((io = RB_ROOT(&hmp->lose_root)) != NULL) {
		lwkt_gettoken(&hmp->io_token);
		while ((io = RB_ROOT(&hmp->lose_root)) != NULL) {
			KKASSERT(io->mod_root == &hmp->lose_root);
			RB_REMOVE(hammer_mod_rb_tree, io->mod_root, io);
			io->mod_root = NULL;
			hammer_ref(&io->lock);
			buffer = (void *)io;
			hammer_rel_buffer(buffer, 0);
		}
		lwkt_reltoken(&hmp->io_token);
	}
}
Beispiel #2
0
static inline int
_dev_validate(hammer_dedup_cache_t dcp, void *data, int *errorp)
{
	hammer_buffer_t buffer = NULL;
	void *ondisk_data;
	int result, error;

	result = error = 0;
	*errorp = 0;

	ondisk_data = hammer_bread_ext(dcp->hmp, dcp->data_offset,
	    dcp->bytes, &error, &buffer);
	if (error) {
		*errorp = 1;
		goto failed;
	}

	if (bcmp(data, ondisk_data, dcp->bytes) == 0)
		result = 1;

failed:
	if (buffer)
		hammer_rel_buffer(buffer, 0);

	return (result);
}
Beispiel #3
0
/*
 * Reblock a record's data.  Both the B-Tree element and record pointers
 * to the data must be adjusted.
 */
static int
hammer_reblock_data(struct hammer_ioc_reblock *reblock,
		    hammer_cursor_t cursor, hammer_btree_elm_t elm)
{
	hammer_buffer_t data_buffer = NULL;
	hammer_off_t odata_offset;
	hammer_off_t ndata_offset;
	int error;
	void *ndata;

	error = hammer_btree_extract_data(cursor);
	if (error)
		return (error);
	ndata = hammer_alloc_data(cursor->trans, elm->leaf.data_len,
				  elm->leaf.base.rec_type,
				  &ndata_offset, &data_buffer,
				  0, &error);
	if (error)
		goto done;
	hammer_io_notmeta(data_buffer);

	/*
	 * Move the data.  Note that we must invalidate any cached
	 * data buffer in the cursor before calling blockmap_free.
	 * The blockmap_free may free up the entire big-block and
	 * will not be able to invalidate it if the cursor is holding
	 * a data buffer cached in that big-block.
	 */
	hammer_modify_buffer_noundo(cursor->trans, data_buffer);
	bcopy(cursor->data, ndata, elm->leaf.data_len);
	hammer_modify_buffer_done(data_buffer);
	hammer_cursor_invalidate_cache(cursor);

	hammer_blockmap_free(cursor->trans,
			     elm->leaf.data_offset, elm->leaf.data_len);

	hammer_modify_node(cursor->trans, cursor->node,
			   &elm->leaf.data_offset, sizeof(hammer_off_t));
	odata_offset = elm->leaf.data_offset;
	elm->leaf.data_offset = ndata_offset;
	hammer_modify_node_done(cursor->node);

	if (hammer_debug_general & 0x4000) {
		hdkprintf("%08x %016jx -> %016jx\n",
			(elm ? elm->base.localization : -1),
			(intmax_t)odata_offset,
			(intmax_t)ndata_offset);
	}
done:
	if (data_buffer)
		hammer_rel_buffer(data_buffer, 0);
	return (error);
}
/*
 * Reblock a record's data.  Both the B-Tree element and record pointers
 * to the data must be adjusted.
 */
static int
hammer_reblock_data(struct hammer_ioc_reblock *reblock,
		    hammer_cursor_t cursor, hammer_btree_elm_t elm)
{
	struct hammer_buffer *data_buffer = NULL;
	hammer_off_t ndata_offset;
	int error;
	void *ndata;

	error = hammer_btree_extract(cursor, HAMMER_CURSOR_GET_DATA |
					     HAMMER_CURSOR_GET_LEAF);
	if (error)
		return (error);
	ndata = hammer_alloc_data(cursor->trans, elm->leaf.data_len,
				  elm->leaf.base.rec_type,
				  &ndata_offset, &data_buffer,
				  0, &error);
	if (error)
		goto done;
	hammer_io_notmeta(data_buffer);

	/*
	 * Move the data.  Note that we must invalidate any cached
	 * data buffer in the cursor before calling blockmap_free.
	 * The blockmap_free may free up the entire large-block and
	 * will not be able to invalidate it if the cursor is holding
	 * a data buffer cached in that large block.
	 */
	hammer_modify_buffer(cursor->trans, data_buffer, NULL, 0);
	bcopy(cursor->data, ndata, elm->leaf.data_len);
	hammer_modify_buffer_done(data_buffer);
	hammer_cursor_invalidate_cache(cursor);

	hammer_blockmap_free(cursor->trans,
			     elm->leaf.data_offset, elm->leaf.data_len);

	hammer_modify_node(cursor->trans, cursor->node,
			   &elm->leaf.data_offset, sizeof(hammer_off_t));
	elm->leaf.data_offset = ndata_offset;
	hammer_modify_node_done(cursor->node);

done:
	if (data_buffer)
		hammer_rel_buffer(data_buffer, 0);
	return (error);
}
Beispiel #5
0
/*
 * Reblock a record's data.  Both the B-Tree element and record pointers
 * to the data must be adjusted.
 */
static int
hammer_reblock_data(struct hammer_ioc_reblock *reblock,
		    hammer_cursor_t cursor, hammer_btree_elm_t elm)
{
	struct hammer_buffer *data_buffer = NULL;
	hammer_off_t ndata_offset;
	int error;
	void *ndata;

	error = hammer_btree_extract(cursor, HAMMER_CURSOR_GET_DATA |
					     HAMMER_CURSOR_GET_LEAF);
	if (error)
		return (error);
	ndata = hammer_alloc_data(cursor->trans, elm->leaf.data_len,
				  elm->leaf.base.rec_type,
				  &ndata_offset, &data_buffer, &error);
	if (error)
		goto done;

	/*
	 * Move the data
	 */
	hammer_modify_buffer(cursor->trans, data_buffer, NULL, 0);
	bcopy(cursor->data, ndata, elm->leaf.data_len);
	hammer_modify_buffer_done(data_buffer);

	hammer_blockmap_free(cursor->trans,
			     elm->leaf.data_offset, elm->leaf.data_len);

	hammer_modify_node(cursor->trans, cursor->node,
			   &elm->leaf.data_offset, sizeof(hammer_off_t));
	elm->leaf.data_offset = ndata_offset;
	hammer_modify_node_done(cursor->node);

done:
	if (data_buffer)
		hammer_rel_buffer(data_buffer, 0);
	return (error);
}
Beispiel #6
0
/*
 * Write out a new record.
 */
static
int
hammer_mirror_write(hammer_cursor_t cursor,
		    struct hammer_ioc_mrecord_rec *mrec,
		    char *udata)
{
	hammer_transaction_t trans;
	hammer_buffer_t data_buffer;
	hammer_off_t ndata_offset;
	hammer_tid_t high_tid;
	void *ndata;
	int error;
	int doprop;

	trans = cursor->trans;
	data_buffer = NULL;

	/*
	 * Get the sync lock so the whole mess is atomic
	 */
	hammer_sync_lock_sh(trans);

	/*
	 * Allocate and adjust data
	 */
	if (mrec->leaf.data_len && mrec->leaf.data_offset) {
		ndata = hammer_alloc_data(trans, mrec->leaf.data_len,
					  mrec->leaf.base.rec_type,
					  &ndata_offset, &data_buffer,
					  0, &error);
		if (ndata == NULL)
			return(error);
		mrec->leaf.data_offset = ndata_offset;
		hammer_modify_buffer(trans, data_buffer, NULL, 0);
		error = copyin(udata, ndata, mrec->leaf.data_len);
		if (error == 0) {
			if (hammer_crc_test_leaf(ndata, &mrec->leaf) == 0) {
				kprintf("data crc mismatch on pipe\n");
				error = EINVAL;
			} else {
				error = hammer_mirror_localize_data(
							ndata, &mrec->leaf);
			}
		}
		hammer_modify_buffer_done(data_buffer);
	} else {
		mrec->leaf.data_offset = 0;
		error = 0;
		ndata = NULL;
	}
	if (error)
		goto failed;

	/*
	 * Do the insertion.  This can fail with a EDEADLK or EALREADY
	 */
	cursor->flags |= HAMMER_CURSOR_INSERT;
	error = hammer_btree_lookup(cursor);
	if (error != ENOENT) {
		if (error == 0)
			error = EALREADY;
		goto failed;
	}

	error = hammer_btree_insert(cursor, &mrec->leaf, &doprop);

	/*
	 * Cursor is left on the current element, we want to skip it now.
	 */
	cursor->flags |= HAMMER_CURSOR_ATEDISK;
	cursor->flags &= ~HAMMER_CURSOR_INSERT;

	/*
	 * Track a count of active inodes.
	 */
	if (error == 0 &&
	    mrec->leaf.base.rec_type == HAMMER_RECTYPE_INODE &&
	    mrec->leaf.base.delete_tid == 0) {
		hammer_modify_volume_field(trans,
					   trans->rootvol,
					   vol0_stat_inodes);
		++trans->hmp->rootvol->ondisk->vol0_stat_inodes;
		hammer_modify_volume_done(trans->rootvol);
	}

	/*
	 * vol0_next_tid must track the highest TID stored in the filesystem.
	 * We do not need to generate undo for this update.
	 */
	high_tid = mrec->leaf.base.create_tid;
	if (high_tid < mrec->leaf.base.delete_tid)
		high_tid = mrec->leaf.base.delete_tid;
	if (trans->rootvol->ondisk->vol0_next_tid < high_tid) {
		hammer_modify_volume(trans, trans->rootvol, NULL, 0);
		trans->rootvol->ondisk->vol0_next_tid = high_tid;
		hammer_modify_volume_done(trans->rootvol);
	}

	/*
	 * WARNING!  cursor's leaf pointer may have changed after
	 *	     do_propagation returns.
	 */
	if (error == 0 && doprop)
		hammer_btree_do_propagation(cursor, NULL, &mrec->leaf);

failed:
	/*
	 * Cleanup
	 */
	if (error && mrec->leaf.data_offset) {
		hammer_blockmap_free(cursor->trans,
				     mrec->leaf.data_offset,
				     mrec->leaf.data_len);
	}
	hammer_sync_unlock(trans);
	if (data_buffer)
		hammer_rel_buffer(data_buffer, 0);
	return(error);
}
Beispiel #7
0
/*
 * HAMMER version 4+ REDO support.
 *
 * REDO records are used to improve fsync() performance.  Instead of having
 * to go through a complete double-flush cycle involving at least two disk
 * synchronizations the fsync need only flush UNDO/REDO FIFO buffers through
 * the related REDO records, which is a single synchronization requiring
 * no track seeking.  If a recovery becomes necessary the recovery code
 * will generate logical data writes based on the REDO records encountered.
 * That is, the recovery code will UNDO any partial meta-data/data writes
 * at the raw disk block level and then REDO the data writes at the logical
 * level.
 */
int
hammer_generate_redo(hammer_transaction_t trans, hammer_inode_t ip,
		     hammer_off_t file_off, u_int32_t flags,
		     void *base, int len)
{
	hammer_mount_t hmp;
	hammer_volume_t root_volume;
	hammer_blockmap_t undomap;
	hammer_buffer_t buffer = NULL;
	hammer_fifo_redo_t redo;
	hammer_fifo_tail_t tail;
	hammer_off_t next_offset;
	int error;
	int bytes;
	int n;

	/*
	 * Setup
	 */
	hmp = trans->hmp;

	root_volume = trans->rootvol;
	undomap = &hmp->blockmap[HAMMER_ZONE_UNDO_INDEX];

	/*
	 * No undo recursion when modifying the root volume
	 */
	hammer_modify_volume(NULL, root_volume, NULL, 0);
	hammer_lock_ex(&hmp->undo_lock);

	/* undo had better not roll over (loose test) */
	if (hammer_undo_space(trans) < len + HAMMER_BUFSIZE*3)
		panic("hammer: insufficient undo FIFO space!");

	/*
	 * Loop until the undo for the entire range has been laid down.
	 * Loop at least once (len might be 0 as a degenerate case).
	 */
	for (;;) {
		/*
		 * Fetch the layout offset in the UNDO FIFO, wrap it as
		 * necessary.
		 */
		if (undomap->next_offset == undomap->alloc_offset) {
			undomap->next_offset =
				HAMMER_ZONE_ENCODE(HAMMER_ZONE_UNDO_INDEX, 0);
		}
		next_offset = undomap->next_offset;

		/*
		 * This is a tail-chasing FIFO, when we hit the start of a new
		 * buffer we don't have to read it in.
		 */
		if ((next_offset & HAMMER_BUFMASK) == 0) {
			redo = hammer_bnew(hmp, next_offset, &error, &buffer);
			hammer_format_undo(redo, hmp->undo_seqno ^ 0x40000000);
		} else {
			redo = hammer_bread(hmp, next_offset, &error, &buffer);
		}
		if (error)
			break;
		hammer_modify_buffer(NULL, buffer, NULL, 0);

		/*
		 * Calculate how big a media structure fits up to the next
		 * alignment point and how large a data payload we can
		 * accomodate.
		 *
		 * If n calculates to 0 or negative there is no room for
		 * anything but a PAD.
		 */
		bytes = HAMMER_UNDO_ALIGN -
			((int)next_offset & HAMMER_UNDO_MASK);
		n = bytes -
		    (int)sizeof(struct hammer_fifo_redo) -
		    (int)sizeof(struct hammer_fifo_tail);

		/*
		 * If available space is insufficient for any payload
		 * we have to lay down a PAD.
		 *
		 * The minimum PAD is 8 bytes and the head and tail will
		 * overlap each other in that case.  PADs do not have
		 * sequence numbers or CRCs.
		 *
		 * A PAD may not start on a boundary.  That is, every
		 * 512-byte block in the UNDO/REDO FIFO must begin with
		 * a record containing a sequence number.
		 */
		if (n <= 0) {
			KKASSERT(bytes >= sizeof(struct hammer_fifo_tail));
			KKASSERT(((int)next_offset & HAMMER_UNDO_MASK) != 0);
			tail = (void *)((char *)redo + bytes - sizeof(*tail));
			if ((void *)redo != (void *)tail) {
				tail->tail_signature = HAMMER_TAIL_SIGNATURE;
				tail->tail_type = HAMMER_HEAD_TYPE_PAD;
				tail->tail_size = bytes;
			}
			redo->head.hdr_signature = HAMMER_HEAD_SIGNATURE;
			redo->head.hdr_type = HAMMER_HEAD_TYPE_PAD;
			redo->head.hdr_size = bytes;
			/* NO CRC OR SEQ NO */
			undomap->next_offset += bytes;
			hammer_modify_buffer_done(buffer);
			hammer_stats_redo += bytes;
			continue;
		}

		/*
		 * When generating an inode-related REDO record we track
		 * the point in the UNDO/REDO FIFO containing the inode's
		 * earliest REDO record.  See hammer_generate_redo_sync().
		 *
		 * redo_fifo_next is cleared when an inode is staged to
		 * the backend and then used to determine how to reassign
		 * redo_fifo_start after the inode flush completes.
		 */
		if (ip) {
			redo->redo_objid = ip->obj_id;
			redo->redo_localization = ip->obj_localization;
			if ((ip->flags & HAMMER_INODE_RDIRTY) == 0) {
				ip->redo_fifo_start = next_offset;
				if (RB_INSERT(hammer_redo_rb_tree,
					      &hmp->rb_redo_root, ip)) {
					panic("hammer_generate_redo: "
					      "cannot insert inode %p on "
					      "redo FIFO", ip);
				}
				ip->flags |= HAMMER_INODE_RDIRTY;
			}
			if (ip->redo_fifo_next == 0)
				ip->redo_fifo_next = next_offset;
		} else {
			redo->redo_objid = 0;
			redo->redo_localization = 0;
		}

		/*
		 * Calculate the actual payload and recalculate the size
		 * of the media structure as necessary.  If no data buffer
		 * is supplied there is no payload.
		 */
		if (base == NULL) {
			n = 0;
		} else if (n > len) {
			n = len;
		}
		bytes = ((n + HAMMER_HEAD_ALIGN_MASK) &
			 ~HAMMER_HEAD_ALIGN_MASK) +
			(int)sizeof(struct hammer_fifo_redo) +
			(int)sizeof(struct hammer_fifo_tail);
		if (hammer_debug_general & 0x0080) {
			kprintf("redo %016llx %d %d\n",
				(long long)next_offset, bytes, n);
		}

		redo->head.hdr_signature = HAMMER_HEAD_SIGNATURE;
		redo->head.hdr_type = HAMMER_HEAD_TYPE_REDO;
		redo->head.hdr_size = bytes;
		redo->head.hdr_seq = hmp->undo_seqno++;
		redo->head.hdr_crc = 0;
		redo->redo_mtime = trans->time;
		redo->redo_offset = file_off;
		redo->redo_flags = flags;

		/*
		 * Incremental payload.  If no payload we throw the entire
		 * len into redo_data_bytes and will not loop.
		 */
		if (base) {
			redo->redo_data_bytes = n;
			bcopy(base, redo + 1, n);
			len -= n;
			base = (char *)base + n;
			file_off += n;
		} else {
			redo->redo_data_bytes = len;
			file_off += len;
			len = 0;
		}

		tail = (void *)((char *)redo + bytes - sizeof(*tail));
		tail->tail_signature = HAMMER_TAIL_SIGNATURE;
		tail->tail_type = HAMMER_HEAD_TYPE_REDO;
		tail->tail_size = bytes;

		KKASSERT(bytes >= sizeof(redo->head));
		redo->head.hdr_crc = crc32(redo, HAMMER_FIFO_HEAD_CRCOFF) ^
			     crc32(&redo->head + 1, bytes - sizeof(redo->head));
		undomap->next_offset += bytes;
		hammer_stats_redo += bytes;

		/*
		 * Before we finish off the buffer we have to deal with any
		 * junk between the end of the media structure we just laid
		 * down and the UNDO alignment boundary.  We do this by laying
		 * down a dummy PAD.  Even though we will probably overwrite
		 * it almost immediately we have to do this so recovery runs
		 * can iterate the UNDO space without having to depend on
		 * the indices in the volume header.
		 *
		 * This dummy PAD will be overwritten on the next undo so
		 * we do not adjust undomap->next_offset.
		 */
		bytes = HAMMER_UNDO_ALIGN -
			((int)undomap->next_offset & HAMMER_UNDO_MASK);
		if (bytes != HAMMER_UNDO_ALIGN) {
			KKASSERT(bytes >= sizeof(struct hammer_fifo_tail));
			redo = (void *)(tail + 1);
			tail = (void *)((char *)redo + bytes - sizeof(*tail));
			if ((void *)redo != (void *)tail) {
				tail->tail_signature = HAMMER_TAIL_SIGNATURE;
				tail->tail_type = HAMMER_HEAD_TYPE_PAD;
				tail->tail_size = bytes;
			}
			redo->head.hdr_signature = HAMMER_HEAD_SIGNATURE;
			redo->head.hdr_type = HAMMER_HEAD_TYPE_PAD;
			redo->head.hdr_size = bytes;
			/* NO CRC OR SEQ NO */
		}
		hammer_modify_buffer_done(buffer);
		if (len == 0)
			break;
	}
	hammer_modify_volume_done(root_volume);
	hammer_unlock(&hmp->undo_lock);

	if (buffer)
		hammer_rel_buffer(buffer, 0);

	/*
	 * Make sure the nominal undo span contains at least one REDO_SYNC,
	 * otherwise the REDO recovery will not be triggered.
	 */
	if ((hmp->flags & HAMMER_MOUNT_REDO_SYNC) == 0 &&
	    flags != HAMMER_REDO_SYNC) {
		hammer_generate_redo_sync(trans);
	}

	return(error);
}
Beispiel #8
0
/*
 * Generate UNDO record(s) for the block of data at the specified zone1
 * or zone2 offset.
 *
 * The recovery code will execute UNDOs in reverse order, allowing overlaps.
 * All the UNDOs are executed together so if we already laid one down we
 * do not have to lay another one down for the same range.
 *
 * For HAMMER version 4+ UNDO a 512 byte boundary is enforced and a PAD
 * will be laid down for any unused space.  UNDO FIFO media structures
 * will implement the hdr_seq field (it used to be reserved01), and
 * both flush and recovery mechanics will be very different.
 *
 * WARNING!  See also hammer_generate_redo() in hammer_redo.c
 */
int
hammer_generate_undo(hammer_transaction_t trans,
		     hammer_off_t zone_off, void *base, int len)
{
	hammer_mount_t hmp;
	hammer_volume_t root_volume;
	hammer_blockmap_t undomap;
	hammer_buffer_t buffer = NULL;
	hammer_fifo_undo_t undo;
	hammer_fifo_tail_t tail;
	hammer_off_t next_offset;
	int error;
	int bytes;
	int n;

	hmp = trans->hmp;

	/*
	 * A SYNC record may be required before we can lay down a general
	 * UNDO.  This ensures that the nominal recovery span contains
	 * at least one SYNC record telling the recovery code how far
	 * out-of-span it must go to run the REDOs.
	 */
	if ((hmp->flags & HAMMER_MOUNT_REDO_SYNC) == 0 &&
	    hmp->version >= HAMMER_VOL_VERSION_FOUR) {
		hammer_generate_redo_sync(trans);
	}

	/*
	 * Enter the offset into our undo history.  If there is an existing
	 * undo we do not have to generate a new one.
	 */
	if (hammer_enter_undo_history(hmp, zone_off, len) == EALREADY)
		return(0);

	root_volume = trans->rootvol;
	undomap = &hmp->blockmap[HAMMER_ZONE_UNDO_INDEX];

	/* no undo recursion */
	hammer_modify_volume_noundo(NULL, root_volume);
	hammer_lock_ex(&hmp->undo_lock);

	/* undo had better not roll over (loose test) */
	if (hammer_undo_space(trans) < len + HAMMER_BUFSIZE*3)
		panic("hammer: insufficient undo FIFO space!");

	/*
	 * Loop until the undo for the entire range has been laid down.
	 */
	while (len) {
		/*
		 * Fetch the layout offset in the UNDO FIFO, wrap it as
		 * necessary.
		 */
		if (undomap->next_offset == undomap->alloc_offset) {
			undomap->next_offset =
				HAMMER_ZONE_ENCODE(HAMMER_ZONE_UNDO_INDEX, 0);
		}
		next_offset = undomap->next_offset;

		/*
		 * This is a tail-chasing FIFO, when we hit the start of a new
		 * buffer we don't have to read it in.
		 */
		if ((next_offset & HAMMER_BUFMASK) == 0) {
			undo = hammer_bnew(hmp, next_offset, &error, &buffer);
			hammer_format_undo(undo, hmp->undo_seqno ^ 0x40000000);
		} else {
			undo = hammer_bread(hmp, next_offset, &error, &buffer);
		}
		if (error)
			break;
		/* no undo recursion */
		hammer_modify_buffer_noundo(NULL, buffer);

		/*
		 * Calculate how big a media structure fits up to the next
		 * alignment point and how large a data payload we can
		 * accomodate.
		 *
		 * If n calculates to 0 or negative there is no room for
		 * anything but a PAD.
		 */
		bytes = HAMMER_UNDO_ALIGN -
			((int)next_offset & HAMMER_UNDO_MASK);
		n = bytes -
		    (int)sizeof(struct hammer_fifo_undo) -
		    (int)sizeof(struct hammer_fifo_tail);

		/*
		 * If available space is insufficient for any payload
		 * we have to lay down a PAD.
		 *
		 * The minimum PAD is 8 bytes and the head and tail will
		 * overlap each other in that case.  PADs do not have
		 * sequence numbers or CRCs.
		 *
		 * A PAD may not start on a boundary.  That is, every
		 * 512-byte block in the UNDO/REDO FIFO must begin with
		 * a record containing a sequence number.
		 */
		if (n <= 0) {
			KKASSERT(bytes >= sizeof(struct hammer_fifo_tail));
			KKASSERT(((int)next_offset & HAMMER_UNDO_MASK) != 0);
			tail = (void *)((char *)undo + bytes - sizeof(*tail));
			if ((void *)undo != (void *)tail) {
				tail->tail_signature = HAMMER_TAIL_SIGNATURE;
				tail->tail_type = HAMMER_HEAD_TYPE_PAD;
				tail->tail_size = bytes;
			}
			undo->head.hdr_signature = HAMMER_HEAD_SIGNATURE;
			undo->head.hdr_type = HAMMER_HEAD_TYPE_PAD;
			undo->head.hdr_size = bytes;
			/* NO CRC OR SEQ NO */
			undomap->next_offset += bytes;
			hammer_modify_buffer_done(buffer);
			hammer_stats_undo += bytes;
			continue;
		}

		/*
		 * Calculate the actual payload and recalculate the size
		 * of the media structure as necessary.
		 */
		if (n > len) {
			n = len;
			bytes = ((n + HAMMER_HEAD_ALIGN_MASK) &
				 ~HAMMER_HEAD_ALIGN_MASK) +
				(int)sizeof(struct hammer_fifo_undo) +
				(int)sizeof(struct hammer_fifo_tail);
		}
		if (hammer_debug_general & 0x0080) {
			kprintf("undo %016llx %d %d\n",
				(long long)next_offset, bytes, n);
		}

		undo->head.hdr_signature = HAMMER_HEAD_SIGNATURE;
		undo->head.hdr_type = HAMMER_HEAD_TYPE_UNDO;
		undo->head.hdr_size = bytes;
		undo->head.hdr_seq = hmp->undo_seqno++;
		undo->head.hdr_crc = 0;
		undo->undo_offset = zone_off;
		undo->undo_data_bytes = n;
		bcopy(base, undo + 1, n);

		tail = (void *)((char *)undo + bytes - sizeof(*tail));
		tail->tail_signature = HAMMER_TAIL_SIGNATURE;
		tail->tail_type = HAMMER_HEAD_TYPE_UNDO;
		tail->tail_size = bytes;

		KKASSERT(bytes >= sizeof(undo->head));
		undo->head.hdr_crc = crc32(undo, HAMMER_FIFO_HEAD_CRCOFF) ^
			     crc32(&undo->head + 1, bytes - sizeof(undo->head));
		undomap->next_offset += bytes;
		hammer_stats_undo += bytes;

		/*
		 * Before we finish off the buffer we have to deal with any
		 * junk between the end of the media structure we just laid
		 * down and the UNDO alignment boundary.  We do this by laying
		 * down a dummy PAD.  Even though we will probably overwrite
		 * it almost immediately we have to do this so recovery runs
		 * can iterate the UNDO space without having to depend on
		 * the indices in the volume header.
		 *
		 * This dummy PAD will be overwritten on the next undo so
		 * we do not adjust undomap->next_offset.
		 */
		bytes = HAMMER_UNDO_ALIGN -
			((int)undomap->next_offset & HAMMER_UNDO_MASK);
		if (bytes != HAMMER_UNDO_ALIGN) {
			KKASSERT(bytes >= sizeof(struct hammer_fifo_tail));
			undo = (void *)(tail + 1);
			tail = (void *)((char *)undo + bytes - sizeof(*tail));
			if ((void *)undo != (void *)tail) {
				tail->tail_signature = HAMMER_TAIL_SIGNATURE;
				tail->tail_type = HAMMER_HEAD_TYPE_PAD;
				tail->tail_size = bytes;
			}
			undo->head.hdr_signature = HAMMER_HEAD_SIGNATURE;
			undo->head.hdr_type = HAMMER_HEAD_TYPE_PAD;
			undo->head.hdr_size = bytes;
			/* NO CRC OR SEQ NO */
		}
		hammer_modify_buffer_done(buffer);

		/*
		 * Adjust for loop
		 */
		len -= n;
		base = (char *)base + n;
		zone_off += n;
	}
	hammer_modify_volume_done(root_volume);
	hammer_unlock(&hmp->undo_lock);

	if (buffer)
		hammer_rel_buffer(buffer, 0);
	return(error);
}
Beispiel #9
0
/*
 * HAMMER version 4+ conversion support.
 *
 * Convert a HAMMER version < 4 UNDO FIFO area to a 4+ UNDO FIFO area.
 * The 4+ UNDO FIFO area is backwards compatible.  The conversion is
 * needed to initialize the sequence space and place headers on the
 * new 512-byte undo boundary.
 */
int
hammer_upgrade_undo_4(hammer_transaction_t trans)
{
	hammer_mount_t hmp;
	hammer_volume_t root_volume;
	hammer_blockmap_t undomap;
	hammer_buffer_t buffer = NULL;
	hammer_fifo_head_t head;
	hammer_fifo_tail_t tail;
	hammer_off_t next_offset;
	u_int32_t seqno;
	int error;
	int bytes;

	hmp = trans->hmp;

	root_volume = trans->rootvol;

	/* no undo recursion */
	hammer_lock_ex(&hmp->undo_lock);
	hammer_modify_volume_noundo(NULL, root_volume);

	/*
	 * Adjust the in-core undomap and the on-disk undomap.
	 */
	next_offset = HAMMER_ZONE_ENCODE(HAMMER_ZONE_UNDO_INDEX, 0);
	undomap = &hmp->blockmap[HAMMER_ZONE_UNDO_INDEX];
	undomap->next_offset = next_offset;
	undomap->first_offset = next_offset;

	undomap = &root_volume->ondisk->vol0_blockmap[HAMMER_ZONE_UNDO_INDEX];
	undomap->next_offset = next_offset;
	undomap->first_offset = next_offset;

	/*
	 * Loop over the entire UNDO space creating DUMMY entries.  Sequence
	 * numbers are assigned.
	 */
	seqno = 0;
	bytes = HAMMER_UNDO_ALIGN;

	while (next_offset != undomap->alloc_offset) {
		head = hammer_bnew(hmp, next_offset, &error, &buffer);
		if (error)
			break;
		hammer_modify_buffer_noundo(NULL, buffer);
		tail = (void *)((char *)head + bytes - sizeof(*tail));

		head->hdr_signature = HAMMER_HEAD_SIGNATURE;
		head->hdr_type = HAMMER_HEAD_TYPE_DUMMY;
		head->hdr_size = bytes;
		head->hdr_seq = seqno;
		head->hdr_crc = 0;

		tail = (void *)((char *)head + bytes - sizeof(*tail));
		tail->tail_signature = HAMMER_TAIL_SIGNATURE;
		tail->tail_type = HAMMER_HEAD_TYPE_DUMMY;
		tail->tail_size = bytes;

		head->hdr_crc = crc32(head, HAMMER_FIFO_HEAD_CRCOFF) ^
			     crc32(head + 1, bytes - sizeof(*head));
		hammer_modify_buffer_done(buffer);

		hammer_stats_undo += bytes;
		next_offset += HAMMER_UNDO_ALIGN;
		++seqno;
	}

	/*
	 * The sequence number will be the next sequence number to lay down.
	 */
	hmp->undo_seqno = seqno;
	kprintf("version upgrade seqno start %08x\n", seqno);

	hammer_modify_volume_done(root_volume);
	hammer_unlock(&hmp->undo_lock);

	if (buffer)
		hammer_rel_buffer(buffer, 0);
	return (error);
}
/*
 * Iterate over all usable L1 entries of the volume and
 * the corresponding L2 entries.
 */
static int
hammer_iterate_l1l2_entries(hammer_transaction_t trans, hammer_volume_t volume,
	int (*callback)(hammer_transaction_t, hammer_volume_t, hammer_buffer_t*,
		struct hammer_blockmap_layer1*, struct hammer_blockmap_layer2*,
		hammer_off_t, hammer_off_t, void*),
	void *data)
{
	struct hammer_mount *hmp = trans->hmp;
	hammer_blockmap_t freemap = &hmp->blockmap[HAMMER_ZONE_FREEMAP_INDEX];
	hammer_buffer_t buffer = NULL;
	int error = 0;

	hammer_off_t phys_off;
	hammer_off_t block_off;
	hammer_off_t layer1_off;
	hammer_off_t layer2_off;
	hammer_off_t aligned_buf_end_off;
	struct hammer_blockmap_layer1 *layer1;
	struct hammer_blockmap_layer2 *layer2;

	/*
	 * Calculate the usable size of the volume, which
	 * must be aligned at a bigblock (8 MB) boundary.
	 */
	aligned_buf_end_off = (HAMMER_ENCODE_RAW_BUFFER(volume->ondisk->vol_no,
		(volume->ondisk->vol_buf_end - volume->ondisk->vol_buf_beg)
		& ~HAMMER_LARGEBLOCK_MASK64));

	/*
	 * Iterate the volume's address space in chunks of 4 TB, where each
	 * chunk consists of at least one physically available 8 MB bigblock.
	 *
	 * For each chunk we need one L1 entry and one L2 bigblock.
	 * We use the first bigblock of each chunk as L2 block.
	 */
	for (phys_off = HAMMER_ENCODE_RAW_BUFFER(volume->ondisk->vol_no, 0);
	     phys_off < aligned_buf_end_off;
	     phys_off += HAMMER_BLOCKMAP_LAYER2) {
		for (block_off = 0;
		     block_off < HAMMER_BLOCKMAP_LAYER2;
		     block_off += HAMMER_LARGEBLOCK_SIZE) {
			layer2_off = phys_off +
				HAMMER_BLOCKMAP_LAYER2_OFFSET(block_off);
			layer2 = hammer_bread(hmp, layer2_off, &error, &buffer);
			if (error)
				goto end;

			error = callback(trans, volume, &buffer, NULL,
					 layer2, phys_off, block_off, data);
			if (error)
				goto end;
		}

		layer1_off = freemap->phys_offset +
				HAMMER_BLOCKMAP_LAYER1_OFFSET(phys_off);
		layer1 = hammer_bread(hmp, layer1_off, &error, &buffer);
		if (error)
			goto end;

		error = callback(trans, volume, &buffer, layer1, NULL,
				 phys_off, 0, data);
		if (error)
			goto end;
	}

end:
	if (buffer) {
		hammer_rel_buffer(buffer, 0);
		buffer = NULL;
	}

	return error;
}