Пример #1
0
/*
 * Allocate an object id
 */
hammer_tid_t
hammer_alloc_objid(hammer_mount_t hmp, hammer_inode_t dip)
{
	hammer_objid_cache_t ocp;
	hammer_tid_t tid;

	while ((ocp = dip->objid_cache) == NULL) {
		if (hmp->objid_cache_count < OBJID_CACHE_SIZE) {
			ocp = kmalloc(sizeof(*ocp), hmp->m_misc,
				      M_WAITOK|M_ZERO);
			ocp->next_tid = hammer_alloc_tid(hmp, OBJID_CACHE_BULK);
			ocp->count = OBJID_CACHE_BULK;
			TAILQ_INSERT_HEAD(&hmp->objid_cache_list, ocp, entry);
			++hmp->objid_cache_count;
			/* may have blocked, recheck */
			if (dip->objid_cache == NULL) {
				dip->objid_cache = ocp;
				ocp->dip = dip;
			}
		} else {
			ocp = TAILQ_FIRST(&hmp->objid_cache_list);
			if (ocp->dip)
				ocp->dip->objid_cache = NULL;
			dip->objid_cache = ocp;
			ocp->dip = dip;
		}
	}
	TAILQ_REMOVE(&hmp->objid_cache_list, ocp, entry);

	/*
	 * The TID is incremented by 1 or by 16 depending what mode the
	 * mount is operating in.
	 */
	tid = ocp->next_tid;
	ocp->next_tid += (hmp->master_id < 0) ? 1 : HAMMER_MAX_MASTERS;

	if (--ocp->count == 0) {
		dip->objid_cache = NULL;
		--hmp->objid_cache_count;
		ocp->dip = NULL;
		kfree(ocp, hmp->m_misc);
	} else {
		TAILQ_INSERT_TAIL(&hmp->objid_cache_list, ocp, entry);
	}
	return(tid);
}
Пример #2
0
/*
 * Start a transaction using a particular TID.  Used by the sync code.
 * This does not stall.
 *
 * This routine may only be called from the flusher thread.  We predispose
 * sync_lock_refs, implying serialization against the synchronization stage
 * (which the flusher is responsible for).
 */
void
hammer_start_transaction_fls(struct hammer_transaction *trans,
			     struct hammer_mount *hmp)
{
	struct timeval tv;
	int error;

	bzero(trans, sizeof(*trans));

	trans->type = HAMMER_TRANS_FLS;
	trans->hmp = hmp;
	trans->rootvol = hammer_get_root_volume(hmp, &error);
	KKASSERT(error == 0);
	trans->tid = hammer_alloc_tid(hmp, 1);
	trans->sync_lock_refs = 1;
	trans->flags = 0;

	getmicrotime(&tv);
	trans->time = (unsigned long)tv.tv_sec * 1000000ULL + tv.tv_usec;
	trans->time32 = (u_int32_t)tv.tv_sec;
}
Пример #3
0
/*
 * Retrieve the PFS hammer cleanup utility config record.  This is
 * different (newer than) the PFS config.
 *
 * This is kinda a hack.
 */
static
int
hammer_ioc_set_config(hammer_transaction_t trans, hammer_inode_t ip,
			struct hammer_ioc_config *config)
{
	struct hammer_btree_leaf_elm leaf;
	struct hammer_cursor cursor;
	hammer_mount_t hmp = ip->hmp;
	int error;

again:
	error = hammer_init_cursor(trans, &cursor, &ip->cache[0], NULL);
	if (error) {
		hammer_done_cursor(&cursor);
		return(error);
	}

	bzero(&leaf, sizeof(leaf));
	leaf.base.obj_id = HAMMER_OBJID_ROOT;
	leaf.base.rec_type = HAMMER_RECTYPE_CONFIG;
	leaf.base.create_tid = hammer_alloc_tid(hmp, 1);
	leaf.base.btype = HAMMER_BTREE_TYPE_RECORD;
	leaf.base.localization = ip->obj_localization | HAMMER_LOCALIZE_INODE;
	leaf.base.key = 0;	/* page 0 */
	leaf.data_len = sizeof(struct hammer_config_data);

	cursor.key_beg = leaf.base;

	cursor.asof = HAMMER_MAX_TID;
	cursor.flags |= HAMMER_CURSOR_BACKEND | HAMMER_CURSOR_ASOF;

	error = hammer_btree_lookup(&cursor);
	if (error == 0) {
		error = hammer_btree_extract_data(&cursor);
		error = hammer_delete_at_cursor(&cursor, HAMMER_DELETE_DESTROY,
						0, 0, 0, NULL);
		if (error == EDEADLK) {
			hammer_done_cursor(&cursor);
			goto again;
		}
	}
	if (error == ENOENT)
		error = 0;
	if (error == 0) {
		/*
		 * NOTE: Must reload key_beg after an ASOF search because
		 *	 the create_tid may have been modified during the
		 *	 search.
		 */
		cursor.flags &= ~HAMMER_CURSOR_ASOF;
		cursor.key_beg = leaf.base;
		error = hammer_create_at_cursor(&cursor, &leaf,
						&config->config,
						HAMMER_CREATE_MODE_SYS);
		if (error == EDEADLK) {
			hammer_done_cursor(&cursor);
			goto again;
		}
	}
	config->head.error = error;
	hammer_done_cursor(&cursor);
	return(0);
}
Пример #4
0
/*
 * Add a snapshot transaction id(s) to the list of snapshots.
 *
 * NOTE: Records are created with an allocated TID.  If a flush cycle
 *	 is in progress the record may be synced in the current flush
 *	 cycle and the volume header will reflect the allocation of the
 *	 TID, but the synchronization point may not catch up to the
 *	 TID until the next flush cycle.
 */
static
int
hammer_ioc_add_snapshot(hammer_transaction_t trans, hammer_inode_t ip,
			struct hammer_ioc_snapshot *snap)
{
	hammer_mount_t hmp = ip->hmp;
	struct hammer_btree_leaf_elm leaf;
	struct hammer_cursor cursor;
	int error;

	/*
	 * Validate structure
	 */
	if (snap->count > HAMMER_SNAPS_PER_IOCTL)
		return (EINVAL);
	if (snap->index >= snap->count)
		return (EINVAL);

	hammer_lock_ex(&hmp->snapshot_lock);
again:
	/*
	 * Look for keys starting after the previous iteration, or at
	 * the beginning if snap->count is 0.
	 */
	error = hammer_init_cursor(trans, &cursor, &ip->cache[0], NULL);
	if (error) {
		hammer_done_cursor(&cursor);
		return(error);
	}

	cursor.asof = HAMMER_MAX_TID;
	cursor.flags |= HAMMER_CURSOR_BACKEND | HAMMER_CURSOR_ASOF;

	bzero(&leaf, sizeof(leaf));
	leaf.base.obj_id = HAMMER_OBJID_ROOT;
	leaf.base.rec_type = HAMMER_RECTYPE_SNAPSHOT;
	leaf.base.create_tid = hammer_alloc_tid(hmp, 1);
	leaf.base.btype = HAMMER_BTREE_TYPE_RECORD;
	leaf.base.localization = ip->obj_localization | HAMMER_LOCALIZE_INODE;
	leaf.data_len = sizeof(struct hammer_snapshot_data);

	while (snap->index < snap->count) {
		leaf.base.key = (int64_t)snap->snaps[snap->index].tid;
		cursor.key_beg = leaf.base;
		error = hammer_btree_lookup(&cursor);
		if (error == 0) {
			error = EEXIST;
			break;
		}

		/*
		 * NOTE: Must reload key_beg after an ASOF search because
		 *	 the create_tid may have been modified during the
		 *	 search.
		 */
		cursor.flags &= ~HAMMER_CURSOR_ASOF;
		cursor.key_beg = leaf.base;
		error = hammer_create_at_cursor(&cursor, &leaf,
						&snap->snaps[snap->index],
						HAMMER_CREATE_MODE_SYS);
		if (error == EDEADLK) {
			hammer_done_cursor(&cursor);
			goto again;
		}
		cursor.flags |= HAMMER_CURSOR_ASOF;
		if (error)
			break;
		++snap->index;
	}
	snap->head.error = error;
	hammer_done_cursor(&cursor);
	hammer_unlock(&hmp->snapshot_lock);
	return(0);
}