Пример #1
0
/*
 * Flush a single inode that is part of a flush group.
 *
 * Flusher errors are extremely serious, even ENOSPC shouldn't occur because
 * the front-end should have reserved sufficient space on the media.  Any
 * error other then EWOULDBLOCK will force the mount to be read-only.
 */
static
int
hammer_flusher_flush_inode(hammer_inode_t ip, void *data)
{
	hammer_flusher_info_t info = data;
	hammer_mount_t hmp = info->hmp;
	hammer_transaction_t trans = &info->trans;
	int error;

	/*
	 * Several slaves are operating on the same flush group concurrently.
	 * The SLAVEFLUSH flag prevents them from tripping over each other.
	 *
	 * NOTE: It is possible for a EWOULDBLOCK'd ip returned by one slave
	 *	 to be resynced by another, but normally such inodes are not
	 *	 revisited until the master loop gets to them.
	 */
	if (ip->flags & HAMMER_INODE_SLAVEFLUSH)
		return(0);
	ip->flags |= HAMMER_INODE_SLAVEFLUSH;
	++hammer_stats_inode_flushes;

	hammer_flusher_clean_loose_ios(hmp);
	vm_wait_nominal();
	error = hammer_sync_inode(trans, ip);

	/*
	 * EWOULDBLOCK can happen under normal operation, all other errors
	 * are considered extremely serious.  We must set WOULDBLOCK
	 * mechanics to deal with the mess left over from the abort of the
	 * previous flush.
	 */
	if (error) {
		ip->flags |= HAMMER_INODE_WOULDBLOCK;
		if (error == EWOULDBLOCK)
			error = 0;
	}
	hammer_flush_inode_done(ip, error);
	/* ip invalid */

	while (hmp->flusher.finalize_want)
		tsleep(&hmp->flusher.finalize_want, 0, "hmrsxx", 0);
	if (hammer_flusher_undo_exhausted(trans, 1)) {
		hkprintf("Warning: UNDO area too small!\n");
		hammer_flusher_finalize(trans, 1);
	} else if (hammer_flusher_meta_limit(trans->hmp)) {
		hammer_flusher_finalize(trans, 0);
	}
	return (0);
}
Пример #2
0
/*
 * Copy records from userland to the target mirror.
 *
 * The PFS is identified in the mirror structure.  The passed ip is just
 * some directory in the overall HAMMER filesystem and has nothing to
 * do with the PFS.  In fact, there might not even be a root directory for
 * the PFS yet!
 */
int
hammer_ioc_mirror_write(hammer_transaction_t trans, hammer_inode_t ip,
		       struct hammer_ioc_mirror_rw *mirror)
{
	union hammer_ioc_mrecord_any mrec;
	struct hammer_cursor cursor;
	u_int32_t localization;
	int checkspace_count = 0;
	int error;
	int bytes;
	char *uptr;
	int seq;

	localization = (u_int32_t)mirror->pfs_id << 16;
	seq = trans->hmp->flusher.done;

	/*
	 * Validate the mirror structure and relocalize the tracking keys.
	 */
	if (mirror->size < 0 || mirror->size > 0x70000000)
		return(EINVAL);
	mirror->key_beg.localization &= HAMMER_LOCALIZE_MASK;
	mirror->key_beg.localization += localization;
	mirror->key_end.localization &= HAMMER_LOCALIZE_MASK;
	mirror->key_end.localization += localization;
	mirror->key_cur.localization &= HAMMER_LOCALIZE_MASK;
	mirror->key_cur.localization += localization;

	/*
	 * Set up our tracking cursor for the loop.  The tracking cursor
	 * is used to delete records that are no longer present on the
	 * master.  The last handled record at key_cur must be skipped.
	 */
	error = hammer_init_cursor(trans, &cursor, NULL, NULL);

	cursor.key_beg = mirror->key_cur;
	cursor.key_end = mirror->key_end;
	cursor.flags |= HAMMER_CURSOR_BACKEND;
	error = hammer_btree_first(&cursor);
	if (error == 0)
		cursor.flags |= HAMMER_CURSOR_ATEDISK;
	if (error == ENOENT)
		error = 0;

	/*
	 * Loop until our input buffer has been exhausted.
	 */
	while (error == 0 &&
		mirror->count + sizeof(mrec.head) <= mirror->size) {

	        /*
		 * Don't blow out the buffer cache.  Leave room for frontend
		 * cache as well.
		 *
		 * WARNING: See warnings in hammer_unlock_cursor() function.
		 */
		while (hammer_flusher_meta_halflimit(trans->hmp) ||
		       hammer_flusher_undo_exhausted(trans, 2)) {
			hammer_unlock_cursor(&cursor);
			hammer_flusher_wait(trans->hmp, seq);
			hammer_lock_cursor(&cursor);
			seq = hammer_flusher_async_one(trans->hmp);
		}

		/*
		 * If there is insufficient free space it may be due to
		 * reserved bigblocks, which flushing might fix.
		 */
		if (hammer_checkspace(trans->hmp, HAMMER_CHKSPC_MIRROR)) {
			if (++checkspace_count == 10) {
				error = ENOSPC;
				break;
			}
			hammer_unlock_cursor(&cursor);
			hammer_flusher_wait(trans->hmp, seq);
			hammer_lock_cursor(&cursor);
			seq = hammer_flusher_async(trans->hmp, NULL);
		}


		/*
		 * Acquire and validate header
		 */
		if ((bytes = mirror->size - mirror->count) > sizeof(mrec))
			bytes = sizeof(mrec);
		uptr = (char *)mirror->ubuf + mirror->count;
		error = copyin(uptr, &mrec, bytes);
		if (error)
			break;
		if (mrec.head.signature != HAMMER_IOC_MIRROR_SIGNATURE) {
			error = EINVAL;
			break;
		}
		if (mrec.head.rec_size < sizeof(mrec.head) ||
		    mrec.head.rec_size > sizeof(mrec) + HAMMER_XBUFSIZE ||
		    mirror->count + mrec.head.rec_size > mirror->size) {
			error = EINVAL;
			break;
		}

		switch(mrec.head.type & HAMMER_MRECF_TYPE_MASK) {
		case HAMMER_MREC_TYPE_SKIP:
			if (mrec.head.rec_size != sizeof(mrec.skip))
				error = EINVAL;
			if (error == 0)
				error = hammer_ioc_mirror_write_skip(&cursor, &mrec.skip, mirror, localization);
			break;
		case HAMMER_MREC_TYPE_REC:
			if (mrec.head.rec_size < sizeof(mrec.rec))
				error = EINVAL;
			if (error == 0)
				error = hammer_ioc_mirror_write_rec(&cursor, &mrec.rec, mirror, localization, uptr + sizeof(mrec.rec));
			break;
		case HAMMER_MREC_TYPE_REC_NODATA:
		case HAMMER_MREC_TYPE_REC_BADCRC:
			/*
			 * Records with bad data payloads are ignored XXX.
			 * Records with no data payload have to be skipped
			 * (they shouldn't have been written in the first
			 * place).
			 */
			if (mrec.head.rec_size < sizeof(mrec.rec))
				error = EINVAL;
			break;
		case HAMMER_MREC_TYPE_PASS:
			if (mrec.head.rec_size != sizeof(mrec.rec))
				error = EINVAL;
			if (error == 0)
				error = hammer_ioc_mirror_write_pass(&cursor, &mrec.rec, mirror, localization);
			break;
		default:
			error = EINVAL;
			break;
		}

		/*
		 * Retry the current record on deadlock, otherwise setup
		 * for the next loop.
		 */
		if (error == EDEADLK) {
			while (error == EDEADLK) {
				hammer_sync_lock_sh(trans);
				hammer_recover_cursor(&cursor);
				error = hammer_cursor_upgrade(&cursor);
				hammer_sync_unlock(trans);
			}
		} else {
			if (error == EALREADY)
				error = 0;
			if (error == 0) {
				mirror->count += 
					HAMMER_HEAD_DOALIGN(mrec.head.rec_size);
			}
		}
	}
	hammer_done_cursor(&cursor);

	/*
	 * cumulative error 
	 */
	if (error) {
		mirror->head.flags |= HAMMER_IOC_HEAD_ERROR;
		mirror->head.error = error;
	}

	/*
	 * ioctls don't update the RW data structure if an error is returned,
	 * always return 0.
	 */
	return(0);
}
Пример #3
0
/*
 * Rollback the specified PFS to (trunc_tid - 1), removing everything
 * greater or equal to trunc_tid.  The PFS must not have been in no-mirror
 * mode or the MIRROR_FILTERED scan will not work properly.
 *
 * This is typically used to remove any partial syncs when upgrading a
 * slave to a master.  It can theoretically also be used to rollback
 * any PFS, including PFS#0, BUT ONLY TO POINTS THAT HAVE NOT YET BEEN
 * PRUNED, and to points that are older only if they are on a retained
 * (pruning softlink) boundary.
 *
 * Rollbacks destroy information.  If you don't mind inode numbers changing
 * a better way would be to cpdup a snapshot back onto the master.
 */
static
int
hammer_pfs_rollback(hammer_transaction_t trans,
		    hammer_pseudofs_inmem_t pfsm,
		    hammer_tid_t trunc_tid)
{
	struct hammer_cmirror cmirror;
	struct hammer_cursor cursor;
	struct hammer_base_elm key_cur;
	int error;
	int seq;

	bzero(&cmirror, sizeof(cmirror));
	bzero(&key_cur, sizeof(key_cur));
	key_cur.localization = HAMMER_MIN_LOCALIZATION | pfsm->localization;
	key_cur.obj_id = HAMMER_MIN_OBJID;
	key_cur.key = HAMMER_MIN_KEY;
	key_cur.create_tid = 1;
	key_cur.rec_type = HAMMER_MIN_RECTYPE;

	seq = trans->hmp->flusher.done;

retry:
	error = hammer_init_cursor(trans, &cursor, NULL, NULL);
	if (error) {
		hammer_done_cursor(&cursor);
		goto failed;
	}
	cursor.key_beg = key_cur;
	cursor.key_end.localization = HAMMER_MAX_LOCALIZATION |
				      pfsm->localization;
	cursor.key_end.obj_id = HAMMER_MAX_OBJID;
	cursor.key_end.key = HAMMER_MAX_KEY;
	cursor.key_end.create_tid = HAMMER_MAX_TID;
	cursor.key_end.rec_type = HAMMER_MAX_RECTYPE;

	cursor.flags |= HAMMER_CURSOR_END_INCLUSIVE;
	cursor.flags |= HAMMER_CURSOR_BACKEND;

	/*
	 * Do an optimized scan of only records created or modified
	 * >= trunc_tid, so we can fix up those records.  We must
	 * still check the TIDs but this greatly reduces the size of
	 * the scan.
	 */
	cursor.flags |= HAMMER_CURSOR_MIRROR_FILTERED;
	cursor.cmirror = &cmirror;
	cmirror.mirror_tid = trunc_tid;

	error = hammer_btree_first(&cursor);
	while (error == 0) {
		/*
		 * Abort the rollback.
		 */
		if (error == 0) {
			error = hammer_signal_check(trans->hmp);
			if (error)
				break;
		}

		/*
		 * We only care about leafs.  Internal nodes can be returned
		 * in mirror-filtered mode (they are used to generate SKIP
		 * mrecords), but we don't need them for this code.
		 *
		 * WARNING: See warnings in hammer_unlock_cursor() function.
		 */
		cursor.flags |= HAMMER_CURSOR_ATEDISK;
		if (cursor.node->ondisk->type == HAMMER_BTREE_TYPE_LEAF) {
			key_cur = cursor.node->ondisk->elms[cursor.index].base;
			error = hammer_pfs_delete_at_cursor(&cursor, trunc_tid);
		}

		while (hammer_flusher_meta_halflimit(trans->hmp) ||
		       hammer_flusher_undo_exhausted(trans, 2)) {
			hammer_unlock_cursor(&cursor);
			hammer_flusher_wait(trans->hmp, seq);
			hammer_lock_cursor(&cursor);
			seq = hammer_flusher_async_one(trans->hmp);
		}

		if (error == 0)
			error = hammer_btree_iterate(&cursor);
	}
	if (error == ENOENT)
		error = 0;
	hammer_done_cursor(&cursor);
	if (error == EDEADLK)
		goto retry;
failed:
	return(error);
}
Пример #4
0
int
hammer_ioc_prune(hammer_transaction_t trans, hammer_inode_t ip,
		 struct hammer_ioc_prune *prune)
{
	struct hammer_cursor cursor;
	hammer_btree_leaf_elm_t elm;
	struct hammer_ioc_prune_elm *copy_elms;
	struct hammer_ioc_prune_elm *user_elms;
	int error;
	int isdir;
	int elm_array_size;
	int seq;

	if (prune->nelms < 0 || prune->nelms > HAMMER_MAX_PRUNE_ELMS)
		return(EINVAL);
	if ((prune->key_beg.localization | prune->key_end.localization) &
	    HAMMER_LOCALIZE_PSEUDOFS_MASK) {
		return(EINVAL);
	}
	if (prune->key_beg.localization > prune->key_end.localization)
		return(EINVAL);
	if (prune->key_beg.localization == prune->key_end.localization) {
		if (prune->key_beg.obj_id > prune->key_end.obj_id)
			return(EINVAL);
		/* key-space limitations - no check needed */
	}
	if ((prune->head.flags & HAMMER_IOC_PRUNE_ALL) && prune->nelms)
		return(EINVAL);
/* 22 EINVAL */

	prune->key_cur.localization = (prune->key_end.localization &
					HAMMER_LOCALIZE_MASK) +
				      ip->obj_localization;
	prune->key_cur.obj_id = prune->key_end.obj_id;
	prune->key_cur.key = HAMMER_MAX_KEY;

	/*
	 * Copy element array from userland
	 */
	elm_array_size = sizeof(*copy_elms) * prune->nelms;
	user_elms = prune->elms;
	copy_elms = kmalloc(elm_array_size, M_TEMP, M_WAITOK);
	if ((error = copyin(user_elms, copy_elms, elm_array_size)) != 0)
		goto failed;
	prune->elms = copy_elms;

	seq = trans->hmp->flusher.done;

	/*
	 * Scan backwards.  Retries typically occur if a deadlock is detected.
	 */
retry:
	error = hammer_init_cursor(trans, &cursor, NULL, NULL);
	if (error) {
		hammer_done_cursor(&cursor);
		goto failed;
	}
	cursor.key_beg.localization = (prune->key_beg.localization &
					HAMMER_LOCALIZE_MASK) +
				      ip->obj_localization;
	cursor.key_beg.obj_id = prune->key_beg.obj_id;
	cursor.key_beg.key = HAMMER_MIN_KEY;
	cursor.key_beg.create_tid = 1;
	cursor.key_beg.delete_tid = 0;
	cursor.key_beg.rec_type = HAMMER_MIN_RECTYPE;
	cursor.key_beg.obj_type = 0;

	cursor.key_end.localization = prune->key_cur.localization;
	cursor.key_end.obj_id = prune->key_cur.obj_id;
	cursor.key_end.key = prune->key_cur.key;
	cursor.key_end.create_tid = HAMMER_MAX_TID - 1;
	cursor.key_end.delete_tid = 0;
	cursor.key_end.rec_type = HAMMER_MAX_RECTYPE;
	cursor.key_end.obj_type = 0;

	cursor.flags |= HAMMER_CURSOR_END_INCLUSIVE;
	cursor.flags |= HAMMER_CURSOR_BACKEND;

	/*
	 * This flag allows the B-Tree code to clean up loose ends.  At
	 * the moment (XXX) it also means we have to hold the sync lock
	 * through the iteration.
	 */
	cursor.flags |= HAMMER_CURSOR_PRUNING;

	hammer_sync_lock_sh(trans);
	error = hammer_btree_last(&cursor);
	hammer_sync_unlock(trans);

	while (error == 0) {
		/*
		 * Check for work
		 */
		elm = &cursor.node->ondisk->elms[cursor.index].leaf;
		prune->key_cur = elm->base;

		/*
		 * Yield to more important tasks
		 */
		if ((error = hammer_signal_check(trans->hmp)) != 0)
			break;

		if (prune->stat_oldest_tid > elm->base.create_tid)
			prune->stat_oldest_tid = elm->base.create_tid;

		if (hammer_debug_general & 0x0200) {
			kprintf("check %016llx %016llx cre=%016llx del=%016llx\n",
					(long long)elm->base.obj_id,
					(long long)elm->base.key,
					(long long)elm->base.create_tid,
					(long long)elm->base.delete_tid);
		}
				
		if (prune_should_delete(prune, elm)) {
			if (hammer_debug_general & 0x0200) {
				kprintf("check %016llx %016llx: DELETE\n",
					(long long)elm->base.obj_id,
					(long long)elm->base.key);
			}

			/*
			 * NOTE: This can return EDEADLK
			 *
			 * Acquiring the sync lock guarantees that the
			 * operation will not cross a synchronization
			 * boundary (see the flusher).
			 *
			 * We dont need to track inodes or next_tid when
			 * we are destroying deleted records.
			 */
			isdir = (elm->base.rec_type == HAMMER_RECTYPE_DIRENTRY);

			hammer_sync_lock_sh(trans);
			error = hammer_delete_at_cursor(&cursor,
							HAMMER_DELETE_DESTROY,
							cursor.trans->tid,
							cursor.trans->time32,
							0, &prune->stat_bytes);
			hammer_sync_unlock(trans);
			if (error)
				break;

			if (isdir)
				++prune->stat_dirrecords;
			else
				++prune->stat_rawrecords;

			/*
			 * The current record might now be the one after
			 * the one we deleted, set ATEDISK to force us
			 * to skip it (since we are iterating backwards).
			 */
			cursor.flags |= HAMMER_CURSOR_ATEDISK;
		} else {
			/*
			 * Nothing to delete, but we may have to check other
			 * things.
			 */
			prune_check_nlinks(&cursor, elm);
			cursor.flags |= HAMMER_CURSOR_ATEDISK;
			if (hammer_debug_general & 0x0100) {
				kprintf("check %016llx %016llx: SKIP\n",
					(long long)elm->base.obj_id,
					(long long)elm->base.key);
			}
		}
		++prune->stat_scanrecords;

		/*
		 * WARNING: See warnings in hammer_unlock_cursor() function.
		 */
		while (hammer_flusher_meta_halflimit(trans->hmp) ||
		       hammer_flusher_undo_exhausted(trans, 2)) {
			hammer_unlock_cursor(&cursor);
			hammer_flusher_wait(trans->hmp, seq);
			hammer_lock_cursor(&cursor);
			seq = hammer_flusher_async_one(trans->hmp);
		}
		hammer_sync_lock_sh(trans);
		error = hammer_btree_iterate_reverse(&cursor);
		hammer_sync_unlock(trans);
	}
	if (error == ENOENT)
		error = 0;
	hammer_done_cursor(&cursor);
	if (error == EDEADLK)
		goto retry;
	if (error == EINTR) {
		prune->head.flags |= HAMMER_IOC_HEAD_INTR;
		error = 0;
	}
failed:
	prune->key_cur.localization &= HAMMER_LOCALIZE_MASK;
	prune->elms = user_elms;
	kfree(copy_elms, M_TEMP);
	return(error);
}
Пример #5
0
int
hammer_ioc_reblock(hammer_transaction_t trans, hammer_inode_t ip,
		   struct hammer_ioc_reblock *reblock)
{
	struct hammer_cursor cursor;
	hammer_btree_elm_t elm;
	int checkspace_count;
	int error;
	int seq;
	int slop;

	/*
	 * A fill level <= 20% is considered an emergency.  free_level is
	 * inverted from fill_level.
	 */
	if (reblock->free_level >= HAMMER_LARGEBLOCK_SIZE * 8 / 10)
		slop = HAMMER_CHKSPC_EMERGENCY;
	else
		slop = HAMMER_CHKSPC_REBLOCK;

	if ((reblock->key_beg.localization | reblock->key_end.localization) &
	    HAMMER_LOCALIZE_PSEUDOFS_MASK) {
		return(EINVAL);
	}
	if (reblock->key_beg.obj_id >= reblock->key_end.obj_id)
		return(EINVAL);
	if (reblock->free_level < 0)
		return(EINVAL);

	reblock->key_cur = reblock->key_beg;
	reblock->key_cur.localization &= HAMMER_LOCALIZE_MASK;
	reblock->key_cur.localization += ip->obj_localization;

	checkspace_count = 0;
	seq = trans->hmp->flusher.done;
retry:
	error = hammer_init_cursor(trans, &cursor, NULL, NULL);
	if (error) {
		hammer_done_cursor(&cursor);
		goto failed;
	}
	cursor.key_beg.localization = reblock->key_cur.localization;
	cursor.key_beg.obj_id = reblock->key_cur.obj_id;
	cursor.key_beg.key = HAMMER_MIN_KEY;
	cursor.key_beg.create_tid = 1;
	cursor.key_beg.delete_tid = 0;
	cursor.key_beg.rec_type = HAMMER_MIN_RECTYPE;
	cursor.key_beg.obj_type = 0;

	cursor.key_end.localization = (reblock->key_end.localization &
					HAMMER_LOCALIZE_MASK) +
				      ip->obj_localization;
	cursor.key_end.obj_id = reblock->key_end.obj_id;
	cursor.key_end.key = HAMMER_MAX_KEY;
	cursor.key_end.create_tid = HAMMER_MAX_TID - 1;
	cursor.key_end.delete_tid = 0;
	cursor.key_end.rec_type = HAMMER_MAX_RECTYPE;
	cursor.key_end.obj_type = 0;

	cursor.flags |= HAMMER_CURSOR_END_INCLUSIVE;
	cursor.flags |= HAMMER_CURSOR_BACKEND;
	cursor.flags |= HAMMER_CURSOR_NOSWAPCACHE;

	/*
	 * This flag allows the btree scan code to return internal nodes,
	 * so we can reblock them in addition to the leafs.  Only specify it
	 * if we intend to reblock B-Tree nodes.
	 */
	if (reblock->head.flags & HAMMER_IOC_DO_BTREE)
		cursor.flags |= HAMMER_CURSOR_REBLOCKING;

	error = hammer_btree_first(&cursor);
	while (error == 0) {
		/*
		 * Internal or Leaf node
		 */
		KKASSERT(cursor.index < cursor.node->ondisk->count);
		elm = &cursor.node->ondisk->elms[cursor.index];
		reblock->key_cur.obj_id = elm->base.obj_id;
		reblock->key_cur.localization = elm->base.localization;

		/*
		 * Yield to more important tasks
		 */
		if ((error = hammer_signal_check(trans->hmp)) != 0)
			break;

		/*
		 * If there is insufficient free space it may be due to
		 * reserved bigblocks, which flushing might fix.
		 *
		 * We must force a retest in case the unlocked cursor is
		 * moved to the end of the leaf, or moved to an internal
		 * node.
		 *
		 * WARNING: See warnings in hammer_unlock_cursor() function.
		 */
		if (hammer_checkspace(trans->hmp, slop)) {
			if (++checkspace_count == 10) {
				error = ENOSPC;
				break;
			}
			hammer_unlock_cursor(&cursor);
			cursor.flags |= HAMMER_CURSOR_RETEST;
			hammer_flusher_wait(trans->hmp, seq);
			hammer_lock_cursor(&cursor);
			seq = hammer_flusher_async(trans->hmp, NULL);
			goto skip;
		}

		/*
		 * Acquiring the sync_lock prevents the operation from
		 * crossing a synchronization boundary.
		 *
		 * NOTE: cursor.node may have changed on return.
		 *
		 * WARNING: See warnings in hammer_unlock_cursor() function.
		 */
		hammer_sync_lock_sh(trans);
		error = hammer_reblock_helper(reblock, &cursor, elm);
		hammer_sync_unlock(trans);

		while (hammer_flusher_meta_halflimit(trans->hmp) ||
		       hammer_flusher_undo_exhausted(trans, 2)) {
			hammer_unlock_cursor(&cursor);
			hammer_flusher_wait(trans->hmp, seq);
			hammer_lock_cursor(&cursor);
			seq = hammer_flusher_async_one(trans->hmp);
		}

		/*
		 * Setup for iteration, our cursor flags may be modified by
		 * other threads while we are unlocked.
		 */
		cursor.flags |= HAMMER_CURSOR_ATEDISK;

		/*
		 * We allocate data buffers, which atm we don't track
		 * dirty levels for because we allow the kernel to write
		 * them.  But if we allocate too many we can still deadlock
		 * the buffer cache.
		 *
		 * WARNING: See warnings in hammer_unlock_cursor() function.
		 *	    (The cursor's node and element may change!)
		 */
		if (bd_heatup()) {
			hammer_unlock_cursor(&cursor);
			bwillwrite(HAMMER_XBUFSIZE);
			hammer_lock_cursor(&cursor);
		}
		/* XXX vm_wait_nominal(); */
skip:
		if (error == 0) {
			error = hammer_btree_iterate(&cursor);
		}
	}
	if (error == ENOENT)
		error = 0;
	hammer_done_cursor(&cursor);
	if (error == EWOULDBLOCK) {
		hammer_flusher_sync(trans->hmp);
		goto retry;
	}
	if (error == EDEADLK)
		goto retry;
	if (error == EINTR) {
		reblock->head.flags |= HAMMER_IOC_HEAD_INTR;
		error = 0;
	}
failed:
	reblock->key_cur.localization &= HAMMER_LOCALIZE_MASK;
	return(error);
}
Пример #6
0
/*
 * Flush the next sequence number until an open flush group is encountered
 * or we reach (next).  Not all sequence numbers will have flush groups
 * associated with them.  These require that the UNDO/REDO FIFO still be
 * flushed since it can take at least one additional run to synchronize
 * the FIFO, and more to also synchronize the reserve structures.
 */
static int
hammer_flusher_flush(hammer_mount_t hmp, int *nomorep)
{
	hammer_flusher_info_t info;
	hammer_flush_group_t flg;
	hammer_reserve_t resv;
	int count;
	int seq;

	/*
	 * Just in-case there's a flush race on mount.  Seq number
	 * does not change.
	 */
	if (TAILQ_FIRST(&hmp->flusher.ready_list) == NULL) {
		*nomorep = 1;
		return (hmp->flusher.done);
	}
	*nomorep = 0;

	/*
	 * Flush the next sequence number.  Sequence numbers can exist
	 * without an assigned flush group, indicating that just a FIFO flush
	 * should occur.
	 */
	seq = hmp->flusher.done + 1;
	flg = TAILQ_FIRST(&hmp->flush_group_list);
	if (flg == NULL) {
		if (seq == hmp->flusher.next) {
			*nomorep = 1;
			return (hmp->flusher.done);
		}
	} else if (seq == flg->seq) {
		if (flg->closed) {
			KKASSERT(flg->running == 0);
			flg->running = 1;
			if (hmp->fill_flush_group == flg) {
				hmp->fill_flush_group =
					TAILQ_NEXT(flg, flush_entry);
			}
		} else {
			*nomorep = 1;
			return (hmp->flusher.done);
		}
	} else {
		/*
		 * Sequence number problems can only happen if a critical
		 * filesystem error occurred which forced the filesystem into
		 * read-only mode.
		 */
		KKASSERT(flg->seq - seq > 0 || hmp->ronly >= 2);
		flg = NULL;
	}

	/*
	 * We only do one flg but we may have to loop/retry.
	 *
	 * Due to various races it is possible to come across a flush
	 * group which as not yet been closed.
	 */
	count = 0;
	while (flg && flg->running) {
		++count;
		if (hammer_debug_general & 0x0001) {
			hdkprintf("%d ttl=%d recs=%d\n",
				flg->seq, flg->total_count, flg->refs);
		}
		if (hmp->flags & HAMMER_MOUNT_CRITICAL_ERROR)
			break;
		hammer_start_transaction_fls(&hmp->flusher.trans, hmp);

		/*
		 * If the previous flush cycle just about exhausted our
		 * UNDO space we may have to do a dummy cycle to move the
		 * first_offset up before actually digging into a new cycle,
		 * or the new cycle will not have sufficient undo space.
		 */
		if (hammer_flusher_undo_exhausted(&hmp->flusher.trans, 3))
			hammer_flusher_finalize(&hmp->flusher.trans, 0);

		KKASSERT(hmp->next_flush_group != flg);

		/*
		 * Place the flg in the flusher structure and start the
		 * slaves running.  The slaves will compete for inodes
		 * to flush.
		 *
		 * Make a per-thread copy of the transaction.
		 */
		while ((info = TAILQ_FIRST(&hmp->flusher.ready_list)) != NULL) {
			TAILQ_REMOVE(&hmp->flusher.ready_list, info, entry);
			info->flg = flg;
			info->runstate = 1;
			info->trans = hmp->flusher.trans;
			TAILQ_INSERT_TAIL(&hmp->flusher.run_list, info, entry);
			wakeup(&info->runstate);
		}

		/*
		 * Wait for all slaves to finish running
		 */
		while (TAILQ_FIRST(&hmp->flusher.run_list) != NULL)
			tsleep(&hmp->flusher.ready_list, 0, "hmrfcc", 0);

		/*
		 * Do the final finalization, clean up
		 */
		hammer_flusher_finalize(&hmp->flusher.trans, 1);
		hmp->flusher.tid = hmp->flusher.trans.tid;

		hammer_done_transaction(&hmp->flusher.trans);

		/*
		 * Loop up on the same flg.  If the flg is done clean it up
		 * and break out.  We only flush one flg.
		 */
		if (RB_EMPTY(&flg->flush_tree)) {
			KKASSERT(flg->refs == 0);
			TAILQ_REMOVE(&hmp->flush_group_list, flg, flush_entry);
			kfree(flg, hmp->m_misc);
			break;
		}
		KKASSERT(TAILQ_FIRST(&hmp->flush_group_list) == flg);
	}

	/*
	 * We may have pure meta-data to flush, or we may have to finish
	 * cycling the UNDO FIFO, even if there were no flush groups.
	 */
	if (count == 0 && hammer_flusher_haswork(hmp)) {
		hammer_start_transaction_fls(&hmp->flusher.trans, hmp);
		hammer_flusher_finalize(&hmp->flusher.trans, 1);
		hammer_done_transaction(&hmp->flusher.trans);
	}

	/*
	 * Clean up any freed big-blocks (typically zone-2).
	 * resv->flush_group is typically set several flush groups ahead
	 * of the free to ensure that the freed block is not reused until
	 * it can no longer be reused.
	 */
	while ((resv = TAILQ_FIRST(&hmp->delay_list)) != NULL) {
		if (resv->flg_no - seq > 0)
			break;
		hammer_reserve_clrdelay(hmp, resv);
	}
	return (seq);
}
Пример #7
0
int
hammer_ioc_dedup(hammer_transaction_t trans, hammer_inode_t ip,
		 struct hammer_ioc_dedup *dedup)
{
	struct hammer_cursor cursor1, cursor2;
	int error;
	int seq;

	/*
	 * Enforce hammer filesystem version requirements
	 */
	if (trans->hmp->version < HAMMER_VOL_VERSION_FIVE) {
		kprintf("hammer: Filesystem must be upgraded to v5 "
			"before you can run dedup\n");
		return (EOPNOTSUPP); /* 95*/
	}

	/*
	 * Cursor1, return an error -> candidate goes to pass2 list
	 */
	error = hammer_init_cursor(trans, &cursor1, NULL, NULL);
	if (error)
		goto done_cursor;
	cursor1.key_beg = dedup->elm1;
	cursor1.flags |= HAMMER_CURSOR_BACKEND;

	error = hammer_btree_lookup(&cursor1);
	if (error)
		goto done_cursor;
	error = hammer_btree_extract(&cursor1, HAMMER_CURSOR_GET_LEAF |
						HAMMER_CURSOR_GET_DATA);
	if (error)
		goto done_cursor;

	/*
	 * Cursor2, return an error -> candidate goes to pass2 list
	 */
	error = hammer_init_cursor(trans, &cursor2, NULL, NULL);
	if (error)
		goto done_cursors;
	cursor2.key_beg = dedup->elm2;
	cursor2.flags |= HAMMER_CURSOR_BACKEND;

	error = hammer_btree_lookup(&cursor2);
	if (error)
		goto done_cursors;
	error = hammer_btree_extract(&cursor2, HAMMER_CURSOR_GET_LEAF |
						HAMMER_CURSOR_GET_DATA);
	if (error)
		goto done_cursors;

	/*
	 * Zone validation. We can't de-dup any of the other zones
	 * (BTREE or META) or bad things will happen.
	 *
	 * Return with error = 0, but set an INVALID_ZONE flag.
	 */
	error = validate_zone(cursor1.leaf->data_offset) +
			    validate_zone(cursor2.leaf->data_offset);
	if (error) {
		dedup->head.flags |= HAMMER_IOC_DEDUP_INVALID_ZONE;
		error = 0;
		goto done_cursors;
	}

	/*
	 * Comparison checks
	 *
	 * If zones don't match or data_len fields aren't the same
	 * we consider it to be a comparison failure.
	 *
	 * Return with error = 0, but set a CMP_FAILURE flag.
	 */
	if ((cursor1.leaf->data_offset & HAMMER_OFF_ZONE_MASK) !=
	    (cursor2.leaf->data_offset & HAMMER_OFF_ZONE_MASK)) {
		dedup->head.flags |= HAMMER_IOC_DEDUP_CMP_FAILURE;
		goto done_cursors;
	}
	if (cursor1.leaf->data_len != cursor2.leaf->data_len) {
		dedup->head.flags |= HAMMER_IOC_DEDUP_CMP_FAILURE;
		goto done_cursors;
	}

	/* byte-by-byte comparison to be sure */
	if (bcmp(cursor1.data, cursor2.data, cursor1.leaf->data_len)) {
		dedup->head.flags |= HAMMER_IOC_DEDUP_CMP_FAILURE;
		goto done_cursors;
	}

	/*
	 * Upgrade both cursors together to an exclusive lock
	 *
	 * Return an error -> candidate goes to pass2 list
	 */
	hammer_sync_lock_sh(trans);
	error = hammer_cursor_upgrade2(&cursor1, &cursor2);
	if (error) {
		hammer_sync_unlock(trans);
		goto done_cursors;
	}

	error = hammer_blockmap_dedup(cursor1.trans,
			cursor1.leaf->data_offset, cursor1.leaf->data_len);
	if (error) {
		if (error == ERANGE) {
			/*
			 * Return with error = 0, but set an UNDERFLOW flag
			 */
			dedup->head.flags |= HAMMER_IOC_DEDUP_UNDERFLOW;
			error = 0;
			goto downgrade_cursors;
		} else {
			/*
			 * Return an error -> block goes to pass2 list
			 */
			goto downgrade_cursors;
		}
	}

	/*
	 * The cursor2's cache must be invalidated before calling
	 * hammer_blockmap_free(), otherwise it will not be able to
	 * invalidate the underlying data buffer.
	 */
	hammer_cursor_invalidate_cache(&cursor2);
	hammer_blockmap_free(cursor2.trans,
			cursor2.leaf->data_offset, cursor2.leaf->data_len);

	hammer_modify_node(cursor2.trans, cursor2.node,
			&cursor2.leaf->data_offset, sizeof(hammer_off_t));
	cursor2.leaf->data_offset = cursor1.leaf->data_offset;
	hammer_modify_node_done(cursor2.node);

downgrade_cursors:
	hammer_cursor_downgrade2(&cursor1, &cursor2);
	hammer_sync_unlock(trans);
done_cursors:
	hammer_done_cursor(&cursor2);
done_cursor:
	hammer_done_cursor(&cursor1);

	/*
	 * Avoid deadlocking the buffer cache
	 */
	seq = trans->hmp->flusher.done;
	while (hammer_flusher_meta_halflimit(trans->hmp) ||
	       hammer_flusher_undo_exhausted(trans, 2)) {
		hammer_flusher_wait(trans->hmp, seq);
		seq = hammer_flusher_async_one(trans->hmp);
	}
	return (error);
}