示例#1
0
/*
 * Reblock a B-Tree internal node.  The parent must be adjusted to point to
 * the new copy of the internal node, and the node's children's parent
 * pointers must also be adjusted to point to the new copy.
 *
 * elm is a pointer to the parent element pointing at cursor.node.
 */
static int
hammer_reblock_int_node(struct hammer_ioc_reblock *reblock,
			 hammer_cursor_t cursor, hammer_btree_elm_t elm)
{
	struct hammer_node_lock lockroot;
	hammer_node_t onode;
	hammer_node_t nnode;
	int error;

	hammer_node_lock_init(&lockroot, cursor->node);
	error = hammer_btree_lock_children(cursor, 1, &lockroot, NULL);
	if (error)
		goto done;

	/*
	 * Don't supply a hint when allocating the leaf.  Fills are done
	 * from the leaf upwards.
	 */
	onode = cursor->node;
	nnode = hammer_alloc_btree(cursor->trans, 0, &error);

	if (nnode == NULL)
		goto done;

	hammer_lock_ex(&nnode->lock);
	hammer_modify_node_noundo(cursor->trans, nnode);

	hammer_move_node(cursor, elm, onode, nnode);

	/*
	 * Clean up.
	 *
	 * The new node replaces the current node in the cursor.  The cursor
	 * expects it to be locked so leave it locked.  Discard onode.
	 */
	hammer_cursor_replaced_node(onode, nnode);
	hammer_delete_node(cursor->trans, onode);

	if (hammer_debug_general & 0x4000) {
		hdkprintf("%08x %016jx -> %016jx\n",
			(elm ? elm->base.localization : -1),
			(intmax_t)onode->node_offset,
			(intmax_t)nnode->node_offset);
	}
	hammer_modify_node_done(nnode);
	cursor->node = nnode;

	hammer_unlock(&onode->lock);
	hammer_rel_node(onode);

done:
	hammer_btree_unlock_children(cursor->trans->hmp, &lockroot, NULL);
	return (error);
}
示例#2
0
/*
 * Reblock a record's data.  Both the B-Tree element and record pointers
 * to the data must be adjusted.
 */
static int
hammer_reblock_data(struct hammer_ioc_reblock *reblock,
		    hammer_cursor_t cursor, hammer_btree_elm_t elm)
{
	hammer_buffer_t data_buffer = NULL;
	hammer_off_t odata_offset;
	hammer_off_t ndata_offset;
	int error;
	void *ndata;

	error = hammer_btree_extract_data(cursor);
	if (error)
		return (error);
	ndata = hammer_alloc_data(cursor->trans, elm->leaf.data_len,
				  elm->leaf.base.rec_type,
				  &ndata_offset, &data_buffer,
				  0, &error);
	if (error)
		goto done;
	hammer_io_notmeta(data_buffer);

	/*
	 * Move the data.  Note that we must invalidate any cached
	 * data buffer in the cursor before calling blockmap_free.
	 * The blockmap_free may free up the entire big-block and
	 * will not be able to invalidate it if the cursor is holding
	 * a data buffer cached in that big-block.
	 */
	hammer_modify_buffer_noundo(cursor->trans, data_buffer);
	bcopy(cursor->data, ndata, elm->leaf.data_len);
	hammer_modify_buffer_done(data_buffer);
	hammer_cursor_invalidate_cache(cursor);

	hammer_blockmap_free(cursor->trans,
			     elm->leaf.data_offset, elm->leaf.data_len);

	hammer_modify_node(cursor->trans, cursor->node,
			   &elm->leaf.data_offset, sizeof(hammer_off_t));
	odata_offset = elm->leaf.data_offset;
	elm->leaf.data_offset = ndata_offset;
	hammer_modify_node_done(cursor->node);

	if (hammer_debug_general & 0x4000) {
		hdkprintf("%08x %016jx -> %016jx\n",
			(elm ? elm->base.localization : -1),
			(intmax_t)odata_offset,
			(intmax_t)ndata_offset);
	}
done:
	if (data_buffer)
		hammer_rel_buffer(data_buffer, 0);
	return (error);
}
示例#3
0
void
hammer_lock_ex_ident(struct hammer_lock *lock, const char *ident)
{
	thread_t td = curthread;
	u_int lv;
	u_int nlv;

	KKASSERT(lock->refs);
	for (;;) {
		lv = lock->lockval;

		if (lv == 0) {
			nlv = 1 | HAMMER_LOCKF_EXCLUSIVE;
			if (atomic_cmpset_int(&lock->lockval, lv, nlv)) {
				lock->lowner = td;
				break;
			}
		} else if ((lv & HAMMER_LOCKF_EXCLUSIVE) &&
			   lock->lowner == td) {
			nlv = (lv + 1);
			if (atomic_cmpset_int(&lock->lockval, lv, nlv))
				break;
		} else {
			if (hammer_debug_locks) {
				hdkprintf("held by %p\n", lock->lowner);
			}
			nlv = lv | HAMMER_LOCKF_WANTED;
			++hammer_contention_count;
			tsleep_interlock(&lock->lockval, 0);
			if (atomic_cmpset_int(&lock->lockval, lv, nlv)) {
				tsleep(&lock->lockval, PINTERLOCKED, ident, 0);
				if (hammer_debug_locks)
					hdkprintf("try again\n");
			}
		}
	}
}
示例#4
0
/*
 * Write out a new record.
 */
static
int
hammer_mirror_write(hammer_cursor_t cursor,
		    struct hammer_ioc_mrecord_rec *mrec,
		    char *udata)
{
	hammer_transaction_t trans;
	hammer_buffer_t data_buffer;
	hammer_off_t ndata_offset;
	hammer_tid_t high_tid;
	void *ndata;
	int error;
	int doprop;

	trans = cursor->trans;
	data_buffer = NULL;

	/*
	 * Get the sync lock so the whole mess is atomic
	 */
	hammer_sync_lock_sh(trans);

	/*
	 * Allocate and adjust data
	 */
	if (mrec->leaf.data_len && mrec->leaf.data_offset) {
		ndata = hammer_alloc_data(trans, mrec->leaf.data_len,
					  mrec->leaf.base.rec_type,
					  &ndata_offset, &data_buffer,
					  0, &error);
		if (ndata == NULL)
			return(error);
		mrec->leaf.data_offset = ndata_offset;
		hammer_modify_buffer_noundo(trans, data_buffer);
		error = copyin(udata, ndata, mrec->leaf.data_len);
		if (error == 0) {
			if (hammer_crc_test_leaf(ndata, &mrec->leaf) == 0) {
				hdkprintf("CRC DATA @ %016llx/%d MISMATCH ON PIPE\n",
					(long long)ndata_offset,
					mrec->leaf.data_len);
				error = EINVAL;
			} else {
				error = hammer_mirror_localize_data(
							ndata, &mrec->leaf);
			}
		}
		hammer_modify_buffer_done(data_buffer);
	} else {
		mrec->leaf.data_offset = 0;
		error = 0;
		ndata = NULL;
	}
	if (error)
		goto failed;

	/*
	 * Do the insertion.  This can fail with a EDEADLK or EALREADY
	 */
	cursor->flags |= HAMMER_CURSOR_INSERT;
	error = hammer_btree_lookup(cursor);
	if (error != ENOENT) {
		if (error == 0)
			error = EALREADY;
		goto failed;
	}

	error = hammer_btree_insert(cursor, &mrec->leaf, &doprop);

	/*
	 * Cursor is left on the current element, we want to skip it now.
	 */
	cursor->flags |= HAMMER_CURSOR_ATEDISK;
	cursor->flags &= ~HAMMER_CURSOR_INSERT;

	/*
	 * Track a count of active inodes.
	 */
	if (error == 0 &&
	    mrec->leaf.base.rec_type == HAMMER_RECTYPE_INODE &&
	    mrec->leaf.base.delete_tid == 0) {
		hammer_modify_volume_field(trans,
					   trans->rootvol,
					   vol0_stat_inodes);
		++trans->hmp->rootvol->ondisk->vol0_stat_inodes;
		hammer_modify_volume_done(trans->rootvol);
	}

	/*
	 * vol0_next_tid must track the highest TID stored in the filesystem.
	 * We do not need to generate undo for this update.
	 */
	high_tid = mrec->leaf.base.create_tid;
	if (high_tid < mrec->leaf.base.delete_tid)
		high_tid = mrec->leaf.base.delete_tid;
	if (trans->rootvol->ondisk->vol0_next_tid < high_tid) {
		hammer_modify_volume_noundo(trans, trans->rootvol);
		trans->rootvol->ondisk->vol0_next_tid = high_tid;
		hammer_modify_volume_done(trans->rootvol);
	}

	/*
	 * WARNING!  cursor's leaf pointer may have changed after
	 *	     do_propagation returns.
	 */
	if (error == 0 && doprop)
		hammer_btree_do_propagation(cursor, NULL, &mrec->leaf);

failed:
	/*
	 * Cleanup
	 */
	if (error && mrec->leaf.data_offset) {
		hammer_blockmap_free(cursor->trans,
				     mrec->leaf.data_offset,
				     mrec->leaf.data_len);
	}
	hammer_sync_unlock(trans);
	if (data_buffer)
		hammer_rel_buffer(data_buffer, 0);
	return(error);
}
示例#5
0
/*
 * ALGORITHM VERSION 0:
 *	Return a namekey hash.   The 64 bit namekey hash consists of a 32 bit
 *	crc in the MSB and 0 in the LSB.  The caller will use the low 32 bits
 *	to generate a unique key and will scan all entries with the same upper
 *	32 bits when issuing a lookup.
 *
 *	0hhhhhhhhhhhhhhh hhhhhhhhhhhhhhhh 0000000000000000 0000000000000000
 *
 * ALGORITHM VERSION 1:
 *
 *	This algorithm breaks the filename down into a separate 32-bit crcs
 *	for each filename segment separated by a special character (dot,
 *	underscore, underline, or tilde).  The CRCs are then added together.
 *	This allows temporary names.  A full-filename 16 bit crc is also
 *	generated to deal with degenerate conditions.
 *
 *	The algorithm is designed to handle create/rename situations such
 *	that a create with an extention to a rename without an extention
 *	only shifts the key space rather than randomizes it.
 *
 *	NOTE: The inode allocator cache can only match 10 bits so we do
 *	      not really have any room for a partial sorted name, and
 *	      numbers don't sort well in that situation anyway.
 *
 *	0mmmmmmmmmmmmmmm mmmmmmmmmmmmmmmm llllllllllllllll 0000000000000000
 *
 *
 * We strip bit 63 in order to provide a positive key, this way a seek
 * offset of 0 will represent the base of the directory.
 *
 * We usually strip bit 0 (set it to 0) in order to provide a consistent
 * iteration space for collisions.
 *
 * This function can never return 0.  We use the MSB-0 space to synthesize
 * artificial directory entries such as "." and "..".
 */
int64_t
hammer_directory_namekey(hammer_inode_t dip, const void *name, int len,
			 u_int32_t *max_iterationsp)
{
	const char *aname = name;
	int32_t crcx;
	int64_t key;
	int i;
	int j;

	switch (dip->ino_data.cap_flags & HAMMER_INODE_CAP_DIRHASH_MASK) {
	case HAMMER_INODE_CAP_DIRHASH_ALG0:
		/*
		 * Original algorithm
		 */
		key = (int64_t)(crc32(aname, len) & 0x7FFFFFFF) << 32;
		if (key == 0)
			key |= 0x100000000LL;
		*max_iterationsp = 0xFFFFFFFFU;
		break;
	case HAMMER_INODE_CAP_DIRHASH_ALG1:
		/*
		 * Filesystem version 6 or better will create directories
		 * using the ALG1 dirhash.  This hash breaks the filename
		 * up into domains separated by special characters and
		 * hashes each domain independently.
		 *
		 * We also do a simple sub-sort using the first character
		 * of the filename in the top 5-bits.
		 */
		key = 0;

		/*
		 * m32
		 */
		crcx = 0;
		for (i = j = 0; i < len; ++i) {
			if (aname[i] == '.' ||
			    aname[i] == '-' ||
			    aname[i] == '_' ||
			    aname[i] == '~') {
				if (i != j)
					crcx += crc32(aname + j, i - j);
				j = i + 1;
			}
		}
		if (i != j)
			crcx += crc32(aname + j, i - j);

#if 0
		/*
		 * xor top 5 bits 0mmmm into low bits and steal the top 5
		 * bits as a semi sub sort using the first character of
		 * the filename.  bit 63 is always left as 0 so directory
		 * keys are positive numbers.
		 */
		crcx ^= (uint32_t)crcx >> (32 - 5);
		crcx = (crcx & 0x07FFFFFF) | ((aname[0] & 0x0F) << (32 - 5));
#endif
		crcx &= 0x7FFFFFFFU;

		key |= (uint64_t)crcx << 32;

		/*
		 * l16 - crc of entire filename
		 *
		 * This crc reduces degenerate hash collision conditions
		 */
		crcx = crc32(aname, len);
		crcx = crcx ^ (crcx << 16);
		key |= crcx & 0xFFFF0000U;

		/*
		 * Cleanup
		 */
		if ((key & 0xFFFFFFFF00000000LL) == 0)
			key |= 0x100000000LL;
		if (hammer_debug_general & 0x0400) {
			hdkprintf("0x%016llx %*.*s\n",
				(long long)key, len, len, aname);
		}
		*max_iterationsp = 0x00FFFFFF;
		break;
	case HAMMER_INODE_CAP_DIRHASH_ALG2:
	case HAMMER_INODE_CAP_DIRHASH_ALG3:
	default:
		key = 0;			/* compiler warning */
		*max_iterationsp = 1;		/* sanity */
		hpanic("bad algorithm %p", dip);
		break;
	}
	return(key);
}
示例#6
0
/*
 * Flush the next sequence number until an open flush group is encountered
 * or we reach (next).  Not all sequence numbers will have flush groups
 * associated with them.  These require that the UNDO/REDO FIFO still be
 * flushed since it can take at least one additional run to synchronize
 * the FIFO, and more to also synchronize the reserve structures.
 */
static int
hammer_flusher_flush(hammer_mount_t hmp, int *nomorep)
{
	hammer_flusher_info_t info;
	hammer_flush_group_t flg;
	hammer_reserve_t resv;
	int count;
	int seq;

	/*
	 * Just in-case there's a flush race on mount.  Seq number
	 * does not change.
	 */
	if (TAILQ_FIRST(&hmp->flusher.ready_list) == NULL) {
		*nomorep = 1;
		return (hmp->flusher.done);
	}
	*nomorep = 0;

	/*
	 * Flush the next sequence number.  Sequence numbers can exist
	 * without an assigned flush group, indicating that just a FIFO flush
	 * should occur.
	 */
	seq = hmp->flusher.done + 1;
	flg = TAILQ_FIRST(&hmp->flush_group_list);
	if (flg == NULL) {
		if (seq == hmp->flusher.next) {
			*nomorep = 1;
			return (hmp->flusher.done);
		}
	} else if (seq == flg->seq) {
		if (flg->closed) {
			KKASSERT(flg->running == 0);
			flg->running = 1;
			if (hmp->fill_flush_group == flg) {
				hmp->fill_flush_group =
					TAILQ_NEXT(flg, flush_entry);
			}
		} else {
			*nomorep = 1;
			return (hmp->flusher.done);
		}
	} else {
		/*
		 * Sequence number problems can only happen if a critical
		 * filesystem error occurred which forced the filesystem into
		 * read-only mode.
		 */
		KKASSERT(flg->seq - seq > 0 || hmp->ronly >= 2);
		flg = NULL;
	}

	/*
	 * We only do one flg but we may have to loop/retry.
	 *
	 * Due to various races it is possible to come across a flush
	 * group which as not yet been closed.
	 */
	count = 0;
	while (flg && flg->running) {
		++count;
		if (hammer_debug_general & 0x0001) {
			hdkprintf("%d ttl=%d recs=%d\n",
				flg->seq, flg->total_count, flg->refs);
		}
		if (hmp->flags & HAMMER_MOUNT_CRITICAL_ERROR)
			break;
		hammer_start_transaction_fls(&hmp->flusher.trans, hmp);

		/*
		 * If the previous flush cycle just about exhausted our
		 * UNDO space we may have to do a dummy cycle to move the
		 * first_offset up before actually digging into a new cycle,
		 * or the new cycle will not have sufficient undo space.
		 */
		if (hammer_flusher_undo_exhausted(&hmp->flusher.trans, 3))
			hammer_flusher_finalize(&hmp->flusher.trans, 0);

		KKASSERT(hmp->next_flush_group != flg);

		/*
		 * Place the flg in the flusher structure and start the
		 * slaves running.  The slaves will compete for inodes
		 * to flush.
		 *
		 * Make a per-thread copy of the transaction.
		 */
		while ((info = TAILQ_FIRST(&hmp->flusher.ready_list)) != NULL) {
			TAILQ_REMOVE(&hmp->flusher.ready_list, info, entry);
			info->flg = flg;
			info->runstate = 1;
			info->trans = hmp->flusher.trans;
			TAILQ_INSERT_TAIL(&hmp->flusher.run_list, info, entry);
			wakeup(&info->runstate);
		}

		/*
		 * Wait for all slaves to finish running
		 */
		while (TAILQ_FIRST(&hmp->flusher.run_list) != NULL)
			tsleep(&hmp->flusher.ready_list, 0, "hmrfcc", 0);

		/*
		 * Do the final finalization, clean up
		 */
		hammer_flusher_finalize(&hmp->flusher.trans, 1);
		hmp->flusher.tid = hmp->flusher.trans.tid;

		hammer_done_transaction(&hmp->flusher.trans);

		/*
		 * Loop up on the same flg.  If the flg is done clean it up
		 * and break out.  We only flush one flg.
		 */
		if (RB_EMPTY(&flg->flush_tree)) {
			KKASSERT(flg->refs == 0);
			TAILQ_REMOVE(&hmp->flush_group_list, flg, flush_entry);
			kfree(flg, hmp->m_misc);
			break;
		}
		KKASSERT(TAILQ_FIRST(&hmp->flush_group_list) == flg);
	}

	/*
	 * We may have pure meta-data to flush, or we may have to finish
	 * cycling the UNDO FIFO, even if there were no flush groups.
	 */
	if (count == 0 && hammer_flusher_haswork(hmp)) {
		hammer_start_transaction_fls(&hmp->flusher.trans, hmp);
		hammer_flusher_finalize(&hmp->flusher.trans, 1);
		hammer_done_transaction(&hmp->flusher.trans);
	}

	/*
	 * Clean up any freed big-blocks (typically zone-2).
	 * resv->flush_group is typically set several flush groups ahead
	 * of the free to ensure that the freed block is not reused until
	 * it can no longer be reused.
	 */
	while ((resv = TAILQ_FIRST(&hmp->delay_list)) != NULL) {
		if (resv->flg_no - seq > 0)
			break;
		hammer_reserve_clrdelay(hmp, resv);
	}
	return (seq);
}
示例#7
0
/*
 * Reblock the B-Tree (leaf) node, record, and/or data if necessary.
 *
 * XXX We have no visibility into internal B-Tree nodes at the moment,
 * only leaf nodes.
 */
static int
hammer_reblock_helper(struct hammer_ioc_reblock *reblock,
		      hammer_cursor_t cursor, hammer_btree_elm_t elm)
{
	hammer_mount_t hmp;
	hammer_off_t tmp_offset;
	hammer_node_ondisk_t ondisk;
	struct hammer_btree_leaf_elm leaf;
	int error;
	int bytes;
	int cur;
	int iocflags;

	error = 0;
	hmp = cursor->trans->hmp;

	/*
	 * Reblock data.  Note that data embedded in a record is reblocked
	 * by the record reblock code.  Data processing only occurs at leaf
	 * nodes and for RECORD element types.
	 */
	if (cursor->node->ondisk->type != HAMMER_BTREE_TYPE_LEAF)
		goto skip;
	if (elm->leaf.base.btype != HAMMER_BTREE_TYPE_RECORD)
		return(EINVAL);
	tmp_offset = elm->leaf.data_offset;
	if (tmp_offset == 0)
		goto skip;

	/*
	 * If reblock->vol_no is specified we only want to reblock data
	 * in that volume, but ignore everything else.
	 */
	if (reblock->vol_no != -1 &&
	    reblock->vol_no != HAMMER_VOL_DECODE(tmp_offset))
		goto skip;

	/*
	 * NOTE: Localization restrictions may also have been set-up, we can't
	 *	 just set the match flags willy-nilly here.
	 */
	switch(elm->leaf.base.rec_type) {
	case HAMMER_RECTYPE_INODE:
	case HAMMER_RECTYPE_SNAPSHOT:
	case HAMMER_RECTYPE_CONFIG:
		iocflags = HAMMER_IOC_DO_INODES;
		break;
	case HAMMER_RECTYPE_EXT:
	case HAMMER_RECTYPE_FIX:
	case HAMMER_RECTYPE_PFS:
	case HAMMER_RECTYPE_DIRENTRY:
		iocflags = HAMMER_IOC_DO_DIRS;
		break;
	case HAMMER_RECTYPE_DATA:
	case HAMMER_RECTYPE_DB:
		iocflags = HAMMER_IOC_DO_DATA;
		break;
	default:
		iocflags = 0;
		break;
	}
	if (reblock->head.flags & iocflags) {
		++reblock->data_count;
		reblock->data_byte_count += elm->leaf.data_len;
		bytes = hammer_blockmap_getfree(hmp, tmp_offset, &cur, &error);
		if (hammer_debug_general & 0x4000)
			hdkprintf("D %6d/%d\n", bytes, reblock->free_level);
		/*
		 * Start data reblock if
		 * 1. there is no error
		 * 2. the data and allocator offset are not in the same
		 *    big-block, or free level threshold is 0
		 * 3. free bytes in the data's big-block is larger than
		 *    free level threshold (means if threshold is 0 then
		 *    do reblock no matter what).
		 */
		if (error == 0 && (cur == 0 || reblock->free_level == 0) &&
		    bytes >= reblock->free_level) {
			/*
			 * This is nasty, the uncache code may have to get
			 * vnode locks and because of that we can't hold
			 * the cursor locked.
			 *
			 * WARNING: See warnings in hammer_unlock_cursor()
			 *	    function.
			 */
			leaf = elm->leaf;
			hammer_unlock_cursor(cursor);
			hammer_io_direct_uncache(hmp, &leaf);
			hammer_lock_cursor(cursor);

			/*
			 * elm may have become stale or invalid, reload it.
			 * ondisk variable is temporary only.  Note that
			 * cursor->node and thus cursor->node->ondisk may
			 * also changed.
			 */
			ondisk = cursor->node->ondisk;
			elm = &ondisk->elms[cursor->index];
			if (cursor->flags & HAMMER_CURSOR_RETEST) {
				hkprintf("debug: retest on reblocker uncache\n");
				error = EDEADLK;
			} else if (ondisk->type != HAMMER_BTREE_TYPE_LEAF ||
				   cursor->index >= ondisk->count) {
				hkprintf("debug: shifted on reblocker uncache\n");
				error = EDEADLK;
			} else if (bcmp(&elm->leaf, &leaf, sizeof(leaf))) {
				hkprintf("debug: changed on reblocker uncache\n");
				error = EDEADLK;
			}
			if (error == 0)
				error = hammer_cursor_upgrade(cursor);
			if (error == 0) {
				KKASSERT(cursor->index < ondisk->count);
				error = hammer_reblock_data(reblock,
							    cursor, elm);
			}
			if (error == 0) {
				++reblock->data_moves;
				reblock->data_byte_moves += elm->leaf.data_len;
			}
		}
	}

skip:
	/*
	 * Reblock a B-Tree internal or leaf node.  A leaf node is reblocked
	 * on initial entry only (element 0).  An internal node is reblocked
	 * when entered upward from its first leaf node only (also element 0,
	 * see hammer_btree_iterate() where cursor moves up and may return).
	 * Further revisits of the internal node (index > 0) are ignored.
	 */
	tmp_offset = cursor->node->node_offset;

	/*
	 * If reblock->vol_no is specified we only want to reblock data
	 * in that volume, but ignore everything else.
	 */
	if (reblock->vol_no != -1 &&
	    reblock->vol_no != HAMMER_VOL_DECODE(tmp_offset))
		goto end;

	if (cursor->index == 0 &&
	    error == 0 && (reblock->head.flags & HAMMER_IOC_DO_BTREE)) {
		++reblock->btree_count;
		bytes = hammer_blockmap_getfree(hmp, tmp_offset, &cur, &error);
		if (hammer_debug_general & 0x4000)
			hdkprintf("B %6d/%d\n", bytes, reblock->free_level);
		/*
		 * Start node reblock if
		 * 1. there is no error
		 * 2. the node and allocator offset are not in the same
		 *    big-block, or free level threshold is 0
		 * 3. free bytes in the node's big-block is larger than
		 *    free level threshold (means if threshold is 0 then
		 *    do reblock no matter what).
		 */
		if (error == 0 && (cur == 0 || reblock->free_level == 0) &&
		    bytes >= reblock->free_level) {
			error = hammer_cursor_upgrade(cursor);
			if (error == 0) {
				if (cursor->parent) {
					KKASSERT(cursor->parent_index <
						 cursor->parent->ondisk->count);
					elm = &cursor->parent->ondisk->elms[cursor->parent_index];
				} else {
					elm = NULL;
				}
				switch(cursor->node->ondisk->type) {
				case HAMMER_BTREE_TYPE_LEAF:
					error = hammer_reblock_leaf_node(
							reblock, cursor, elm);
					break;
				case HAMMER_BTREE_TYPE_INTERNAL:
					error = hammer_reblock_int_node(
							reblock, cursor, elm);
					break;
				default:
					hpanic("Illegal B-Tree node type");
				}
			}
			if (error == 0) {
				++reblock->btree_moves;
			}
		}
	}
end:
	hammer_cursor_downgrade(cursor);
	return(error);
}
示例#8
0
/*
 * Generate UNDO record(s) for the block of data at the specified zone1
 * or zone2 offset.
 *
 * The recovery code will execute UNDOs in reverse order, allowing overlaps.
 * All the UNDOs are executed together so if we already laid one down we
 * do not have to lay another one down for the same range.
 *
 * For HAMMER version 4+ UNDO a 512 byte boundary is enforced and a PAD
 * will be laid down for any unused space.  UNDO FIFO media structures
 * will implement the hdr_seq field (it used to be reserved01), and
 * both flush and recovery mechanics will be very different.
 *
 * WARNING!  See also hammer_generate_redo() in hammer_redo.c
 */
int
hammer_generate_undo(hammer_transaction_t trans,
		     hammer_off_t zone_off, void *base, int len)
{
	hammer_mount_t hmp;
	hammer_volume_t root_volume;
	hammer_blockmap_t undomap;
	hammer_buffer_t buffer = NULL;
	hammer_fifo_undo_t undo;
	hammer_fifo_tail_t tail;
	hammer_off_t next_offset;
	int error;
	int bytes;
	int n;

	hmp = trans->hmp;

	/*
	 * A SYNC record may be required before we can lay down a general
	 * UNDO.  This ensures that the nominal recovery span contains
	 * at least one SYNC record telling the recovery code how far
	 * out-of-span it must go to run the REDOs.
	 */
	if ((hmp->flags & HAMMER_MOUNT_REDO_SYNC) == 0 &&
	    hmp->version >= HAMMER_VOL_VERSION_FOUR) {
		hammer_generate_redo_sync(trans);
	}

	/*
	 * Enter the offset into our undo history.  If there is an existing
	 * undo we do not have to generate a new one.
	 */
	if (hammer_enter_undo_history(hmp, zone_off, len) == EALREADY)
		return(0);

	root_volume = trans->rootvol;
	undomap = &hmp->blockmap[HAMMER_ZONE_UNDO_INDEX];

	/* no undo recursion */
	hammer_modify_volume_noundo(NULL, root_volume);
	hammer_lock_ex(&hmp->undo_lock);

	/* undo had better not roll over (loose test) */
	if (hammer_undo_space(trans) < len + HAMMER_BUFSIZE*3)
		hpanic("insufficient UNDO/REDO FIFO space for undo!");

	/*
	 * Loop until the undo for the entire range has been laid down.
	 */
	while (len) {
		/*
		 * Fetch the layout offset in the UNDO FIFO, wrap it as
		 * necessary.
		 */
		if (undomap->next_offset == undomap->alloc_offset)
			undomap->next_offset = HAMMER_ENCODE_UNDO(0);
		next_offset = undomap->next_offset;

		/*
		 * This is a tail-chasing FIFO, when we hit the start of a new
		 * buffer we don't have to read it in.
		 */
		if ((next_offset & HAMMER_BUFMASK) == 0) {
			undo = hammer_bnew(hmp, next_offset, &error, &buffer);
			hammer_format_undo(undo, hmp->undo_seqno ^ 0x40000000);
		} else {
			undo = hammer_bread(hmp, next_offset, &error, &buffer);
		}
		if (error)
			break;
		/* no undo recursion */
		hammer_modify_buffer_noundo(NULL, buffer);

		/*
		 * Calculate how big a media structure fits up to the next
		 * alignment point and how large a data payload we can
		 * accomodate.
		 *
		 * If n calculates to 0 or negative there is no room for
		 * anything but a PAD.
		 */
		bytes = HAMMER_UNDO_ALIGN -
			((int)next_offset & HAMMER_UNDO_MASK);
		n = bytes -
		    (int)sizeof(struct hammer_fifo_undo) -
		    (int)sizeof(struct hammer_fifo_tail);

		/*
		 * If available space is insufficient for any payload
		 * we have to lay down a PAD.
		 *
		 * The minimum PAD is 8 bytes and the head and tail will
		 * overlap each other in that case.  PADs do not have
		 * sequence numbers or CRCs.
		 *
		 * A PAD may not start on a boundary.  That is, every
		 * 512-byte block in the UNDO/REDO FIFO must begin with
		 * a record containing a sequence number.
		 */
		if (n <= 0) {
			KKASSERT(bytes >= sizeof(struct hammer_fifo_tail));
			KKASSERT(((int)next_offset & HAMMER_UNDO_MASK) != 0);
			tail = (void *)((char *)undo + bytes - sizeof(*tail));
			if ((void *)undo != (void *)tail) {
				tail->tail_signature = HAMMER_TAIL_SIGNATURE;
				tail->tail_type = HAMMER_HEAD_TYPE_PAD;
				tail->tail_size = bytes;
			}
			undo->head.hdr_signature = HAMMER_HEAD_SIGNATURE;
			undo->head.hdr_type = HAMMER_HEAD_TYPE_PAD;
			undo->head.hdr_size = bytes;
			/* NO CRC OR SEQ NO */
			undomap->next_offset += bytes;
			hammer_modify_buffer_done(buffer);
			hammer_stats_undo += bytes;
			continue;
		}

		/*
		 * Calculate the actual payload and recalculate the size
		 * of the media structure as necessary.
		 */
		if (n > len) {
			n = len;
			bytes = HAMMER_HEAD_DOALIGN(n) +
				(int)sizeof(struct hammer_fifo_undo) +
				(int)sizeof(struct hammer_fifo_tail);
		}
		if (hammer_debug_general & 0x0080) {
			hdkprintf("undo %016jx %d %d\n",
				(intmax_t)next_offset, bytes, n);
		}

		undo->head.hdr_signature = HAMMER_HEAD_SIGNATURE;
		undo->head.hdr_type = HAMMER_HEAD_TYPE_UNDO;
		undo->head.hdr_size = bytes;
		undo->head.hdr_seq = hmp->undo_seqno++;
		undo->head.hdr_crc = 0;
		undo->undo_offset = zone_off;
		undo->undo_data_bytes = n;
		bcopy(base, undo + 1, n);

		tail = (void *)((char *)undo + bytes - sizeof(*tail));
		tail->tail_signature = HAMMER_TAIL_SIGNATURE;
		tail->tail_type = HAMMER_HEAD_TYPE_UNDO;
		tail->tail_size = bytes;

		KKASSERT(bytes >= sizeof(undo->head));
		hammer_crc_set_fifo_head(&undo->head, bytes);
		undomap->next_offset += bytes;
		hammer_stats_undo += bytes;

		/*
		 * Before we finish off the buffer we have to deal with any
		 * junk between the end of the media structure we just laid
		 * down and the UNDO alignment boundary.  We do this by laying
		 * down a dummy PAD.  Even though we will probably overwrite
		 * it almost immediately we have to do this so recovery runs
		 * can iterate the UNDO space without having to depend on
		 * the indices in the volume header.
		 *
		 * This dummy PAD will be overwritten on the next undo so
		 * we do not adjust undomap->next_offset.
		 */
		bytes = HAMMER_UNDO_ALIGN -
			((int)undomap->next_offset & HAMMER_UNDO_MASK);
		if (bytes != HAMMER_UNDO_ALIGN) {
			KKASSERT(bytes >= sizeof(struct hammer_fifo_tail));
			undo = (void *)(tail + 1);
			tail = (void *)((char *)undo + bytes - sizeof(*tail));
			if ((void *)undo != (void *)tail) {
				tail->tail_signature = HAMMER_TAIL_SIGNATURE;
				tail->tail_type = HAMMER_HEAD_TYPE_PAD;
				tail->tail_size = bytes;
			}
			undo->head.hdr_signature = HAMMER_HEAD_SIGNATURE;
			undo->head.hdr_type = HAMMER_HEAD_TYPE_PAD;
			undo->head.hdr_size = bytes;
			/* NO CRC OR SEQ NO */
		}
		hammer_modify_buffer_done(buffer);

		/*
		 * Adjust for loop
		 */
		len -= n;
		base = (char *)base + n;
		zone_off += n;
	}
	hammer_modify_volume_done(root_volume);
	hammer_unlock(&hmp->undo_lock);

	if (buffer)
		hammer_rel_buffer(buffer, 0);
	return(error);
}