Exemplo n.º 1
0
/*
 * As part of the mirror write we iterate across swaths of records
 * on the target which no longer exist on the source, and mark them
 * deleted.
 *
 * The caller has indexed the cursor and set up key_end.  We iterate
 * through to key_end.
 *
 * There is an edge case where the master has deleted a record whos
 * create_tid exactly matches our end_tid.  We cannot delete this
 * record on the slave yet because we cannot assign delete_tid == create_tid.
 * The deletion should be picked up on the next sequence since in order
 * to have been deleted on the master a transaction must have occured with
 * a TID greater then the create_tid of the record.
 *
 * To support incremental re-mirroring, just for robustness, we do not
 * touch any records created beyond (or equal to) mirror->tid_end.
 */
static
int
hammer_mirror_delete_to(hammer_cursor_t cursor,
		       struct hammer_ioc_mirror_rw *mirror)
{
	hammer_btree_leaf_elm_t elm;
	int error;

	error = hammer_btree_iterate(cursor);
	while (error == 0) {
		elm = &cursor->node->ondisk->elms[cursor->index].leaf;
		KKASSERT(elm->base.btype == HAMMER_BTREE_TYPE_RECORD);
		cursor->flags |= HAMMER_CURSOR_ATEDISK;

		/*
		 * Certain records are not part of the mirroring operation
		 */
		if (hammer_mirror_nomirror(&elm->base)) {
			error = hammer_btree_iterate(cursor);
			continue;
		}

		/*
		 * Note: Must still delete records with create_tid < tid_beg,
		 *	 as record may have been pruned-away on source.
		 */
		if (elm->base.delete_tid == 0 &&
		    elm->base.create_tid < mirror->tid_end) {
			error = hammer_delete_at_cursor(cursor,
							HAMMER_DELETE_ADJUST,
							mirror->tid_end,
							time_second,
							1, NULL);
		}
		if (error == 0)
			error = hammer_btree_iterate(cursor);
	}
	if (error == ENOENT)
		error = 0;
	return(error);
}
Exemplo n.º 2
0
/*
 * All B-Tree records within the specified key range which also conform
 * to the transaction id range are returned.  Mirroring code keeps track
 * of the last transaction id fully scanned and can efficiently pick up
 * where it left off if interrupted.
 *
 * The PFS is identified in the mirror structure.  The passed ip is just
 * some directory in the overall HAMMER filesystem and has nothing to
 * do with the PFS.
 */
int
hammer_ioc_mirror_read(hammer_transaction_t trans, hammer_inode_t ip,
		       struct hammer_ioc_mirror_rw *mirror)
{
	struct hammer_cmirror cmirror;
	struct hammer_cursor cursor;
	union hammer_ioc_mrecord_any mrec;
	hammer_btree_leaf_elm_t elm;
	const int crc_start = HAMMER_MREC_CRCOFF;
	char *uptr;
	int error;
	int data_len;
	int bytes;
	int eatdisk;
	int mrec_flags;
	u_int32_t localization;
	u_int32_t rec_crc;

	localization = (u_int32_t)mirror->pfs_id << 16;

	if ((mirror->key_beg.localization | mirror->key_end.localization) &
	    HAMMER_LOCALIZE_PSEUDOFS_MASK) {
		return(EINVAL);
	}
	if (hammer_btree_cmp(&mirror->key_beg, &mirror->key_end) > 0)
		return(EINVAL);

	mirror->key_cur = mirror->key_beg;
	mirror->key_cur.localization &= HAMMER_LOCALIZE_MASK;
	mirror->key_cur.localization += localization;
	bzero(&mrec, sizeof(mrec));
	bzero(&cmirror, sizeof(cmirror));

	/*
	 * Make CRC errors non-fatal (at least on data), causing an EDOM
	 * error instead of EIO.
	 */
	trans->flags |= HAMMER_TRANSF_CRCDOM;

retry:
	error = hammer_init_cursor(trans, &cursor, NULL, NULL);
	if (error) {
		hammer_done_cursor(&cursor);
		goto failed;
	}
	cursor.key_beg = mirror->key_cur;
	cursor.key_end = mirror->key_end;
	cursor.key_end.localization &= HAMMER_LOCALIZE_MASK;
	cursor.key_end.localization += localization;

	cursor.flags |= HAMMER_CURSOR_END_INCLUSIVE;
	cursor.flags |= HAMMER_CURSOR_BACKEND;

	/*
	 * This flag filters the search to only return elements whos create
	 * or delete TID is >= mirror_tid.  The B-Tree uses the mirror_tid
	 * field stored with internal and leaf nodes to shortcut the scan.
	 */
	cursor.flags |= HAMMER_CURSOR_MIRROR_FILTERED;
	cursor.cmirror = &cmirror;
	cmirror.mirror_tid = mirror->tid_beg;

	error = hammer_btree_first(&cursor);
	while (error == 0) {
		/*
		 * Yield to more important tasks
		 */
		if (error == 0) {
			error = hammer_signal_check(trans->hmp);
			if (error)
				break;
		}

		/*
		 * An internal node can be returned in mirror-filtered
		 * mode and indicates that the scan is returning a skip
		 * range in the cursor->cmirror structure.
		 */
		uptr = (char *)mirror->ubuf + mirror->count;
		if (cursor.node->ondisk->type == HAMMER_BTREE_TYPE_INTERNAL) {
			/*
			 * Check space
			 */
			mirror->key_cur = cmirror.skip_beg;
			bytes = sizeof(mrec.skip);
			if (mirror->count + HAMMER_HEAD_DOALIGN(bytes) >
			    mirror->size) {
				break;
			}

			/*
			 * Fill mrec
			 */
			mrec.head.signature = HAMMER_IOC_MIRROR_SIGNATURE;
			mrec.head.type = HAMMER_MREC_TYPE_SKIP;
			mrec.head.rec_size = bytes;
			mrec.skip.skip_beg = cmirror.skip_beg;
			mrec.skip.skip_end = cmirror.skip_end;
			mrec.head.rec_crc = crc32(&mrec.head.rec_size,
						 bytes - crc_start);
			error = copyout(&mrec, uptr, bytes);
			eatdisk = 0;
			goto didwrite;
		}

		/*
		 * Leaf node.  In full-history mode we could filter out
		 * elements modified outside the user-requested TID range.
		 *
		 * However, such elements must be returned so the writer
		 * can compare them against the target to determine what
		 * needs to be deleted on the target, particular for
		 * no-history mirrors.
		 */
		KKASSERT(cursor.node->ondisk->type == HAMMER_BTREE_TYPE_LEAF);
		elm = &cursor.node->ondisk->elms[cursor.index].leaf;
		mirror->key_cur = elm->base;

		/*
		 * If the record was created after our end point we just
		 * ignore it.
		 */
		if (elm->base.create_tid > mirror->tid_end) {
			error = 0;
			bytes = 0;
			eatdisk = 1;
			goto didwrite;
		}

		/*
		 * Determine if we should generate a PASS or a REC.  PASS
		 * records are records without any data payload.  Such
		 * records will be generated if the target is already expected
		 * to have the record, allowing it to delete the gaps.
		 *
		 * A PASS record is also used to perform deletions on the
		 * target.
		 *
		 * Such deletions are needed if the master or files on the
		 * master are no-history, or if the slave is so far behind
		 * the master has already been pruned.
		 */
		if (elm->base.create_tid < mirror->tid_beg) {
			bytes = sizeof(mrec.rec);
			if (mirror->count + HAMMER_HEAD_DOALIGN(bytes) >
			    mirror->size) {
				break;
			}

			/*
			 * Fill mrec.
			 */
			mrec.head.signature = HAMMER_IOC_MIRROR_SIGNATURE;
			mrec.head.type = HAMMER_MREC_TYPE_PASS;
			mrec.head.rec_size = bytes;
			mrec.rec.leaf = *elm;
			mrec.head.rec_crc = crc32(&mrec.head.rec_size,
						 bytes - crc_start);
			error = copyout(&mrec, uptr, bytes);
			eatdisk = 1;
			goto didwrite;
			
		}

		/*
		 * The core code exports the data to userland.
		 *
		 * CRC errors on data are reported but passed through,
		 * but the data must be washed by the user program.
		 *
		 * If userland just wants the btree records it can
		 * request that bulk data not be returned.  This is
		 * use during mirror-stream histogram generation.
		 */
		mrec_flags = 0;
		data_len = (elm->data_offset) ? elm->data_len : 0;
		if (data_len &&
		    (mirror->head.flags & HAMMER_IOC_MIRROR_NODATA)) {
			data_len = 0;
			mrec_flags |= HAMMER_MRECF_NODATA;
		}
		if (data_len) {
			error = hammer_btree_extract(&cursor,
						     HAMMER_CURSOR_GET_DATA);
			if (error) {
				if (error != EDOM)
					break;
				mrec_flags |= HAMMER_MRECF_CRC_ERROR |
					      HAMMER_MRECF_DATA_CRC_BAD;
			}
		}

		bytes = sizeof(mrec.rec) + data_len;
		if (mirror->count + HAMMER_HEAD_DOALIGN(bytes) > mirror->size)
			break;

		/*
		 * Construct the record for userland and copyout.
		 *
		 * The user is asking for a snapshot, if the record was
		 * deleted beyond the user-requested ending tid, the record
		 * is not considered deleted from the point of view of
		 * userland and delete_tid is cleared.
		 */
		mrec.head.signature = HAMMER_IOC_MIRROR_SIGNATURE;
		mrec.head.type = HAMMER_MREC_TYPE_REC | mrec_flags;
		mrec.head.rec_size = bytes;
		mrec.rec.leaf = *elm;

		if (elm->base.delete_tid > mirror->tid_end)
			mrec.rec.leaf.base.delete_tid = 0;
		rec_crc = crc32(&mrec.head.rec_size,
				sizeof(mrec.rec) - crc_start);
		if (data_len)
			rec_crc = crc32_ext(cursor.data, data_len, rec_crc);
		mrec.head.rec_crc = rec_crc;
		error = copyout(&mrec, uptr, sizeof(mrec.rec));
		if (data_len && error == 0) {
			error = copyout(cursor.data, uptr + sizeof(mrec.rec),
					data_len);
		}
		eatdisk = 1;

		/*
		 * eatdisk controls whether we skip the current cursor
		 * position on the next scan or not.  If doing a SKIP
		 * the cursor is already positioned properly for the next
		 * scan and eatdisk will be 0.
		 */
didwrite:
		if (error == 0) {
			mirror->count += HAMMER_HEAD_DOALIGN(bytes);
			if (eatdisk)
				cursor.flags |= HAMMER_CURSOR_ATEDISK;
			else
				cursor.flags &= ~HAMMER_CURSOR_ATEDISK;
			error = hammer_btree_iterate(&cursor);
		}
	}
	if (error == ENOENT) {
		mirror->key_cur = mirror->key_end;
		error = 0;
	}
	hammer_done_cursor(&cursor);
	if (error == EDEADLK)
		goto retry;
	if (error == EINTR) {
		mirror->head.flags |= HAMMER_IOC_HEAD_INTR;
		error = 0;
	}
failed:
	mirror->key_cur.localization &= HAMMER_LOCALIZE_MASK;
	return(error);
}
Exemplo n.º 3
0
/*
 * Rollback the specified PFS to (trunc_tid - 1), removing everything
 * greater or equal to trunc_tid.  The PFS must not have been in no-mirror
 * mode or the MIRROR_FILTERED scan will not work properly.
 *
 * This is typically used to remove any partial syncs when upgrading a
 * slave to a master.  It can theoretically also be used to rollback
 * any PFS, including PFS#0, BUT ONLY TO POINTS THAT HAVE NOT YET BEEN
 * PRUNED, and to points that are older only if they are on a retained
 * (pruning softlink) boundary.
 *
 * Rollbacks destroy information.  If you don't mind inode numbers changing
 * a better way would be to cpdup a snapshot back onto the master.
 */
static
int
hammer_pfs_rollback(hammer_transaction_t trans,
		    hammer_pseudofs_inmem_t pfsm,
		    hammer_tid_t trunc_tid)
{
	struct hammer_cmirror cmirror;
	struct hammer_cursor cursor;
	struct hammer_base_elm key_cur;
	int error;
	int seq;

	bzero(&cmirror, sizeof(cmirror));
	bzero(&key_cur, sizeof(key_cur));
	key_cur.localization = HAMMER_MIN_LOCALIZATION | pfsm->localization;
	key_cur.obj_id = HAMMER_MIN_OBJID;
	key_cur.key = HAMMER_MIN_KEY;
	key_cur.create_tid = 1;
	key_cur.rec_type = HAMMER_MIN_RECTYPE;

	seq = trans->hmp->flusher.done;

retry:
	error = hammer_init_cursor(trans, &cursor, NULL, NULL);
	if (error) {
		hammer_done_cursor(&cursor);
		goto failed;
	}
	cursor.key_beg = key_cur;
	cursor.key_end.localization = HAMMER_MAX_LOCALIZATION |
				      pfsm->localization;
	cursor.key_end.obj_id = HAMMER_MAX_OBJID;
	cursor.key_end.key = HAMMER_MAX_KEY;
	cursor.key_end.create_tid = HAMMER_MAX_TID;
	cursor.key_end.rec_type = HAMMER_MAX_RECTYPE;

	cursor.flags |= HAMMER_CURSOR_END_INCLUSIVE;
	cursor.flags |= HAMMER_CURSOR_BACKEND;

	/*
	 * Do an optimized scan of only records created or modified
	 * >= trunc_tid, so we can fix up those records.  We must
	 * still check the TIDs but this greatly reduces the size of
	 * the scan.
	 */
	cursor.flags |= HAMMER_CURSOR_MIRROR_FILTERED;
	cursor.cmirror = &cmirror;
	cmirror.mirror_tid = trunc_tid;

	error = hammer_btree_first(&cursor);
	while (error == 0) {
		/*
		 * Abort the rollback.
		 */
		if (error == 0) {
			error = hammer_signal_check(trans->hmp);
			if (error)
				break;
		}

		/*
		 * We only care about leafs.  Internal nodes can be returned
		 * in mirror-filtered mode (they are used to generate SKIP
		 * mrecords), but we don't need them for this code.
		 *
		 * WARNING: See warnings in hammer_unlock_cursor() function.
		 */
		cursor.flags |= HAMMER_CURSOR_ATEDISK;
		if (cursor.node->ondisk->type == HAMMER_BTREE_TYPE_LEAF) {
			key_cur = cursor.node->ondisk->elms[cursor.index].base;
			error = hammer_pfs_delete_at_cursor(&cursor, trunc_tid);
		}

		while (hammer_flusher_meta_halflimit(trans->hmp) ||
		       hammer_flusher_undo_exhausted(trans, 2)) {
			hammer_unlock_cursor(&cursor);
			hammer_flusher_wait(trans->hmp, seq);
			hammer_lock_cursor(&cursor);
			seq = hammer_flusher_async_one(trans->hmp);
		}

		if (error == 0)
			error = hammer_btree_iterate(&cursor);
	}
	if (error == ENOENT)
		error = 0;
	hammer_done_cursor(&cursor);
	if (error == EDEADLK)
		goto retry;
failed:
	return(error);
}
Exemplo n.º 4
0
/*
 * Retrieve as many snapshot ids as possible or until the array is
 * full, starting after the last transaction id passed in.  If count
 * is 0 we retrieve starting at the beginning.
 *
 * NOTE: Because the b-tree key field is signed but transaction ids
 *       are unsigned the returned list will be signed-sorted instead
 *	 of unsigned sorted.  The Caller must still sort the aggregate
 *	 results.
 */
static
int
hammer_ioc_get_snapshot(hammer_transaction_t trans, hammer_inode_t ip,
			struct hammer_ioc_snapshot *snap)
{
	struct hammer_cursor cursor;
	int error;

	/*
	 * Validate structure
	 */
	if (snap->index != 0)
		return (EINVAL);
	if (snap->count > HAMMER_SNAPS_PER_IOCTL)
		return (EINVAL);

	/*
	 * Look for keys starting after the previous iteration, or at
	 * the beginning if snap->count is 0.
	 */
	error = hammer_init_cursor(trans, &cursor, &ip->cache[0], NULL);
	if (error) {
		hammer_done_cursor(&cursor);
		return(error);
	}

	cursor.key_beg.obj_id = HAMMER_OBJID_ROOT;
	cursor.key_beg.create_tid = 0;
	cursor.key_beg.delete_tid = 0;
	cursor.key_beg.obj_type = 0;
	cursor.key_beg.rec_type = HAMMER_RECTYPE_SNAPSHOT;
	cursor.key_beg.localization = ip->obj_localization | HAMMER_LOCALIZE_INODE;
	if (snap->count == 0)
		cursor.key_beg.key = HAMMER_MIN_KEY;
	else
		cursor.key_beg.key = (int64_t)snap->snaps[snap->count - 1].tid + 1;

	cursor.key_end = cursor.key_beg;
	cursor.key_end.key = HAMMER_MAX_KEY;
	cursor.asof = HAMMER_MAX_TID;
	cursor.flags |= HAMMER_CURSOR_END_EXCLUSIVE | HAMMER_CURSOR_ASOF;

	snap->count = 0;

	error = hammer_btree_first(&cursor);
	while (error == 0 && snap->count < HAMMER_SNAPS_PER_IOCTL) {
		error = hammer_btree_extract_leaf(&cursor);
		if (error)
			break;
		if (cursor.leaf->base.rec_type == HAMMER_RECTYPE_SNAPSHOT) {
			error = hammer_btree_extract_data(&cursor);
			snap->snaps[snap->count] = cursor.data->snap;

			/*
			 * The snap data tid should match the key but might
			 * not due to a bug in the HAMMER v3 conversion code.
			 *
			 * This error will work itself out over time but we
			 * have to force a match or the snapshot will not
			 * be deletable.
			 */
			if (cursor.data->snap.tid !=
			    (hammer_tid_t)cursor.leaf->base.key) {
				hkprintf("lo=%08x snapshot key "
					"0x%016jx data mismatch 0x%016jx\n",
					cursor.key_beg.localization,
					(uintmax_t)cursor.data->snap.tid,
					cursor.leaf->base.key);
				hkprintf("Probably left over from the "
					"original v3 conversion, hammer "
					"cleanup should get it eventually\n");
				snap->snaps[snap->count].tid =
					cursor.leaf->base.key;
			}
			++snap->count;
		}
		error = hammer_btree_iterate(&cursor);
	}

	if (error == ENOENT) {
		snap->head.flags |= HAMMER_IOC_SNAPSHOT_EOF;
		error = 0;
	}
	snap->head.error = error;
	hammer_done_cursor(&cursor);
	return(0);
}
Exemplo n.º 5
0
static
int
hammer_ioc_gethistory(hammer_transaction_t trans, hammer_inode_t ip,
		      struct hammer_ioc_history *hist)
{
	struct hammer_cursor cursor;
	hammer_btree_elm_t elm;
	int error;

	/*
	 * Validate the structure and initialize for return.
	 */
	if (hist->beg_tid > hist->end_tid)
		return(EINVAL);
	if (hist->head.flags & HAMMER_IOC_HISTORY_ATKEY) {
		if (hist->key > hist->nxt_key)
			return(EINVAL);
	}

	hist->obj_id = ip->obj_id;
	hist->count = 0;
	hist->nxt_tid = hist->end_tid;
	hist->head.flags &= ~HAMMER_IOC_HISTORY_NEXT_TID;
	hist->head.flags &= ~HAMMER_IOC_HISTORY_NEXT_KEY;
	hist->head.flags &= ~HAMMER_IOC_HISTORY_EOF;
	hist->head.flags &= ~HAMMER_IOC_HISTORY_UNSYNCED;
	if ((ip->flags & HAMMER_INODE_MODMASK) &
	    ~(HAMMER_INODE_ATIME | HAMMER_INODE_MTIME)) {
		hist->head.flags |= HAMMER_IOC_HISTORY_UNSYNCED;
	}

	/*
	 * Setup the cursor.  We can't handle undeletable records
	 * (create_tid of 0) at the moment.  A create_tid of 0 has
	 * a special meaning and cannot be specified in the cursor.
	 */
	error = hammer_init_cursor(trans, &cursor, &ip->cache[0], NULL);
	if (error) {
		hammer_done_cursor(&cursor);
		return(error);
	}

	cursor.key_beg.obj_id = hist->obj_id;
	cursor.key_beg.create_tid = hist->beg_tid;
	cursor.key_beg.delete_tid = 0;
	cursor.key_beg.obj_type = 0;
	if (cursor.key_beg.create_tid == HAMMER_MIN_TID)
		cursor.key_beg.create_tid = 1;

	cursor.key_end.obj_id = hist->obj_id;
	cursor.key_end.create_tid = hist->end_tid;
	cursor.key_end.delete_tid = 0;
	cursor.key_end.obj_type = 0;

	cursor.flags |= HAMMER_CURSOR_END_EXCLUSIVE;

	if (hist->head.flags & HAMMER_IOC_HISTORY_ATKEY) {
		/*
		 * key-range within the file.  For a regular file the
		 * on-disk key represents BASE+LEN, not BASE, so the
		 * first possible record containing the offset 'key'
		 * has an on-disk key of (key + 1).
		 */
		cursor.key_beg.key = hist->key;
		cursor.key_end.key = HAMMER_MAX_KEY;
		cursor.key_beg.localization = ip->obj_localization |
					      HAMMER_LOCALIZE_MISC;
		cursor.key_end.localization = ip->obj_localization |
					      HAMMER_LOCALIZE_MISC;

		switch(ip->ino_data.obj_type) {
		case HAMMER_OBJTYPE_REGFILE:
			++cursor.key_beg.key;
			cursor.key_beg.rec_type = HAMMER_RECTYPE_DATA;
			break;
		case HAMMER_OBJTYPE_DIRECTORY:
			cursor.key_beg.rec_type = HAMMER_RECTYPE_DIRENTRY;
			cursor.key_beg.localization = ip->obj_localization |
						hammer_dir_localization(ip);
			cursor.key_end.localization = ip->obj_localization |
						hammer_dir_localization(ip);
			break;
		case HAMMER_OBJTYPE_DBFILE:
			cursor.key_beg.rec_type = HAMMER_RECTYPE_DB;
			break;
		default:
			error = EINVAL;
			break;
		}
		cursor.key_end.rec_type = cursor.key_beg.rec_type;
	} else {
		/*
		 * The inode itself.
		 */
		cursor.key_beg.key = 0;
		cursor.key_end.key = 0;
		cursor.key_beg.rec_type = HAMMER_RECTYPE_INODE;
		cursor.key_end.rec_type = HAMMER_RECTYPE_INODE;
		cursor.key_beg.localization = ip->obj_localization |
					      HAMMER_LOCALIZE_INODE;
		cursor.key_end.localization = ip->obj_localization |
					      HAMMER_LOCALIZE_INODE;
	}

	error = hammer_btree_first(&cursor);
	while (error == 0) {
		elm = &cursor.node->ondisk->elms[cursor.index];

		add_history(ip, hist, elm);
		if (hist->head.flags & (HAMMER_IOC_HISTORY_NEXT_TID |
				        HAMMER_IOC_HISTORY_NEXT_KEY |
				        HAMMER_IOC_HISTORY_EOF)) {
			break;
		}
		error = hammer_btree_iterate(&cursor);
	}
	if (error == ENOENT) {
		hist->head.flags |= HAMMER_IOC_HISTORY_EOF;
		error = 0;
	}
	hammer_done_cursor(&cursor);
	return(error);
}
Exemplo n.º 6
0
int
hammer_ioc_reblock(hammer_transaction_t trans, hammer_inode_t ip,
		   struct hammer_ioc_reblock *reblock)
{
	struct hammer_cursor cursor;
	hammer_btree_elm_t elm;
	int checkspace_count;
	int error;
	int seq;
	int slop;

	/*
	 * A fill level <= 20% is considered an emergency.  free_level is
	 * inverted from fill_level.
	 */
	if (reblock->free_level >= HAMMER_LARGEBLOCK_SIZE * 8 / 10)
		slop = HAMMER_CHKSPC_EMERGENCY;
	else
		slop = HAMMER_CHKSPC_REBLOCK;

	if ((reblock->key_beg.localization | reblock->key_end.localization) &
	    HAMMER_LOCALIZE_PSEUDOFS_MASK) {
		return(EINVAL);
	}
	if (reblock->key_beg.obj_id >= reblock->key_end.obj_id)
		return(EINVAL);
	if (reblock->free_level < 0)
		return(EINVAL);

	reblock->key_cur = reblock->key_beg;
	reblock->key_cur.localization &= HAMMER_LOCALIZE_MASK;
	reblock->key_cur.localization += ip->obj_localization;

	checkspace_count = 0;
	seq = trans->hmp->flusher.done;
retry:
	error = hammer_init_cursor(trans, &cursor, NULL, NULL);
	if (error) {
		hammer_done_cursor(&cursor);
		goto failed;
	}
	cursor.key_beg.localization = reblock->key_cur.localization;
	cursor.key_beg.obj_id = reblock->key_cur.obj_id;
	cursor.key_beg.key = HAMMER_MIN_KEY;
	cursor.key_beg.create_tid = 1;
	cursor.key_beg.delete_tid = 0;
	cursor.key_beg.rec_type = HAMMER_MIN_RECTYPE;
	cursor.key_beg.obj_type = 0;

	cursor.key_end.localization = (reblock->key_end.localization &
					HAMMER_LOCALIZE_MASK) +
				      ip->obj_localization;
	cursor.key_end.obj_id = reblock->key_end.obj_id;
	cursor.key_end.key = HAMMER_MAX_KEY;
	cursor.key_end.create_tid = HAMMER_MAX_TID - 1;
	cursor.key_end.delete_tid = 0;
	cursor.key_end.rec_type = HAMMER_MAX_RECTYPE;
	cursor.key_end.obj_type = 0;

	cursor.flags |= HAMMER_CURSOR_END_INCLUSIVE;
	cursor.flags |= HAMMER_CURSOR_BACKEND;
	cursor.flags |= HAMMER_CURSOR_NOSWAPCACHE;

	/*
	 * This flag allows the btree scan code to return internal nodes,
	 * so we can reblock them in addition to the leafs.  Only specify it
	 * if we intend to reblock B-Tree nodes.
	 */
	if (reblock->head.flags & HAMMER_IOC_DO_BTREE)
		cursor.flags |= HAMMER_CURSOR_REBLOCKING;

	error = hammer_btree_first(&cursor);
	while (error == 0) {
		/*
		 * Internal or Leaf node
		 */
		KKASSERT(cursor.index < cursor.node->ondisk->count);
		elm = &cursor.node->ondisk->elms[cursor.index];
		reblock->key_cur.obj_id = elm->base.obj_id;
		reblock->key_cur.localization = elm->base.localization;

		/*
		 * Yield to more important tasks
		 */
		if ((error = hammer_signal_check(trans->hmp)) != 0)
			break;

		/*
		 * If there is insufficient free space it may be due to
		 * reserved bigblocks, which flushing might fix.
		 *
		 * We must force a retest in case the unlocked cursor is
		 * moved to the end of the leaf, or moved to an internal
		 * node.
		 *
		 * WARNING: See warnings in hammer_unlock_cursor() function.
		 */
		if (hammer_checkspace(trans->hmp, slop)) {
			if (++checkspace_count == 10) {
				error = ENOSPC;
				break;
			}
			hammer_unlock_cursor(&cursor);
			cursor.flags |= HAMMER_CURSOR_RETEST;
			hammer_flusher_wait(trans->hmp, seq);
			hammer_lock_cursor(&cursor);
			seq = hammer_flusher_async(trans->hmp, NULL);
			goto skip;
		}

		/*
		 * Acquiring the sync_lock prevents the operation from
		 * crossing a synchronization boundary.
		 *
		 * NOTE: cursor.node may have changed on return.
		 *
		 * WARNING: See warnings in hammer_unlock_cursor() function.
		 */
		hammer_sync_lock_sh(trans);
		error = hammer_reblock_helper(reblock, &cursor, elm);
		hammer_sync_unlock(trans);

		while (hammer_flusher_meta_halflimit(trans->hmp) ||
		       hammer_flusher_undo_exhausted(trans, 2)) {
			hammer_unlock_cursor(&cursor);
			hammer_flusher_wait(trans->hmp, seq);
			hammer_lock_cursor(&cursor);
			seq = hammer_flusher_async_one(trans->hmp);
		}

		/*
		 * Setup for iteration, our cursor flags may be modified by
		 * other threads while we are unlocked.
		 */
		cursor.flags |= HAMMER_CURSOR_ATEDISK;

		/*
		 * We allocate data buffers, which atm we don't track
		 * dirty levels for because we allow the kernel to write
		 * them.  But if we allocate too many we can still deadlock
		 * the buffer cache.
		 *
		 * WARNING: See warnings in hammer_unlock_cursor() function.
		 *	    (The cursor's node and element may change!)
		 */
		if (bd_heatup()) {
			hammer_unlock_cursor(&cursor);
			bwillwrite(HAMMER_XBUFSIZE);
			hammer_lock_cursor(&cursor);
		}
		/* XXX vm_wait_nominal(); */
skip:
		if (error == 0) {
			error = hammer_btree_iterate(&cursor);
		}
	}
	if (error == ENOENT)
		error = 0;
	hammer_done_cursor(&cursor);
	if (error == EWOULDBLOCK) {
		hammer_flusher_sync(trans->hmp);
		goto retry;
	}
	if (error == EDEADLK)
		goto retry;
	if (error == EINTR) {
		reblock->head.flags |= HAMMER_IOC_HEAD_INTR;
		error = 0;
	}
failed:
	reblock->key_cur.localization &= HAMMER_LOCALIZE_MASK;
	return(error);
}