示例#1
0
/*
 * The nvlist will be consumed by this call.
 */
static void
log_internal(nvlist_t *nvl, const char *operation, spa_t *spa,
             dmu_tx_t *tx, const char *fmt, va_list adx)
{
    char *msg;

    /*
     * If this is part of creating a pool, not everything is
     * initialized yet, so don't bother logging the internal events.
     * Likewise if the pool is not writeable.
     */
    if (tx->tx_txg == TXG_INITIAL || !spa_writeable(spa)) {
        fnvlist_free(nvl);
        return;
    }

    msg = kmem_vasprintf(fmt, adx);
    fnvlist_add_string(nvl, ZPOOL_HIST_INT_STR, msg);
    strfree(msg);

    fnvlist_add_string(nvl, ZPOOL_HIST_INT_NAME, operation);
    fnvlist_add_uint64(nvl, ZPOOL_HIST_TXG, tx->tx_txg);

    if (dmu_tx_is_syncing(tx)) {
        spa_history_log_sync(nvl, tx);
    } else {
        dsl_sync_task_nowait(spa_get_dsl(spa),
                             spa_history_log_sync, nvl, 0, ZFS_SPACE_CHECK_NONE, tx);
    }
    /* spa_history_log_sync() will free nvl */
}
示例#2
0
int
spa_history_log_nvl(spa_t *spa, nvlist_t *nvl)
{
    int err = 0;
    dmu_tx_t *tx;
    nvlist_t *nvarg;

    if (spa_version(spa) < SPA_VERSION_ZPOOL_HISTORY || !spa_writeable(spa))
        return (SET_ERROR(EINVAL));

    tx = dmu_tx_create_dd(spa_get_dsl(spa)->dp_mos_dir);
    err = dmu_tx_assign(tx, TXG_WAIT);
    if (err) {
        dmu_tx_abort(tx);
        return (err);
    }

    VERIFY0(nvlist_dup(nvl, &nvarg, KM_SLEEP));
    if (spa_history_zone() != NULL) {
        fnvlist_add_string(nvarg, ZPOOL_HIST_ZONE,
                           spa_history_zone());
    }
    fnvlist_add_uint64(nvarg, ZPOOL_HIST_WHO, crgetruid(CRED()));

    /* Kick this off asynchronously; errors are ignored. */
    dsl_sync_task_nowait(spa_get_dsl(spa), spa_history_log_sync,
                         nvarg, 0, ZFS_SPACE_CHECK_NONE, tx);
    dmu_tx_commit(tx);

    /* spa_history_log_sync will free nvl */
    return (err);

}
示例#3
0
/*
 * Open-context function to add one entry to the new mapping.  The new
 * entry will be remembered and written from syncing context.
 */
static void
spa_condense_indirect_commit_entry(spa_t *spa,
    vdev_indirect_mapping_entry_phys_t *vimep, uint32_t count)
{
	spa_condensing_indirect_t *sci = spa->spa_condensing_indirect;

	ASSERT3U(count, <, DVA_GET_ASIZE(&vimep->vimep_dst));

	dmu_tx_t *tx = dmu_tx_create_dd(spa_get_dsl(spa)->dp_mos_dir);
	dmu_tx_hold_space(tx, sizeof (*vimep) + sizeof (count));
	VERIFY0(dmu_tx_assign(tx, TXG_WAIT));
	int txgoff = dmu_tx_get_txg(tx) & TXG_MASK;

	/*
	 * If we are the first entry committed this txg, kick off the sync
	 * task to write to the MOS on our behalf.
	 */
	if (list_is_empty(&sci->sci_new_mapping_entries[txgoff])) {
		dsl_sync_task_nowait(dmu_tx_pool(tx),
		    spa_condense_indirect_commit_sync, sci,
		    0, ZFS_SPACE_CHECK_NONE, tx);
	}

	vdev_indirect_mapping_entry_t *vime =
	    kmem_alloc(sizeof (*vime), KM_SLEEP);
	vime->vime_mapping = *vimep;
	vime->vime_obsolete_count = count;
	list_insert_tail(&sci->sci_new_mapping_entries[txgoff], vime);

	dmu_tx_commit(tx);
}
示例#4
0
/* Takes care of physical writing and limiting # of concurrent ZIOs. */
static int
vdev_initialize_write(vdev_t *vd, uint64_t start, uint64_t size, abd_t *data)
{
	spa_t *spa = vd->vdev_spa;

	/* Limit inflight initializing I/Os */
	mutex_enter(&vd->vdev_initialize_io_lock);
	while (vd->vdev_initialize_inflight >= zfs_initialize_limit) {
		cv_wait(&vd->vdev_initialize_io_cv,
		    &vd->vdev_initialize_io_lock);
	}
	vd->vdev_initialize_inflight++;
	mutex_exit(&vd->vdev_initialize_io_lock);

	dmu_tx_t *tx = dmu_tx_create_dd(spa_get_dsl(spa)->dp_mos_dir);
	VERIFY0(dmu_tx_assign(tx, TXG_WAIT));
	uint64_t txg = dmu_tx_get_txg(tx);

	spa_config_enter(spa, SCL_STATE_ALL, vd, RW_READER);
	mutex_enter(&vd->vdev_initialize_lock);

	if (vd->vdev_initialize_offset[txg & TXG_MASK] == 0) {
		uint64_t *guid = kmem_zalloc(sizeof (uint64_t), KM_SLEEP);
		*guid = vd->vdev_guid;

		/* This is the first write of this txg. */
		dsl_sync_task_nowait(spa_get_dsl(spa),
		    vdev_initialize_zap_update_sync, guid, 2,
		    ZFS_SPACE_CHECK_RESERVED, tx);
	}

	/*
	 * We know the vdev struct will still be around since all
	 * consumers of vdev_free must stop the initialization first.
	 */
	if (vdev_initialize_should_stop(vd)) {
		mutex_enter(&vd->vdev_initialize_io_lock);
		ASSERT3U(vd->vdev_initialize_inflight, >, 0);
		vd->vdev_initialize_inflight--;
		mutex_exit(&vd->vdev_initialize_io_lock);
		spa_config_exit(vd->vdev_spa, SCL_STATE_ALL, vd);
		mutex_exit(&vd->vdev_initialize_lock);
		dmu_tx_commit(tx);
		return (SET_ERROR(EINTR));
	}
	mutex_exit(&vd->vdev_initialize_lock);

	vd->vdev_initialize_offset[txg & TXG_MASK] = start + size;
	zio_nowait(zio_write_phys(spa->spa_txg_zio[txg & TXG_MASK], vd, start,
	    size, data, ZIO_CHECKSUM_OFF, vdev_initialize_cb, NULL,
	    ZIO_PRIORITY_INITIALIZING, ZIO_FLAG_CANFAIL, B_FALSE));
	/* vdev_initialize_cb releases SCL_STATE_ALL */

	dmu_tx_commit(tx);

	return (0);
}
示例#5
0
static void
vdev_initialize_change_state(vdev_t *vd, vdev_initializing_state_t new_state)
{
	ASSERT(MUTEX_HELD(&vd->vdev_initialize_lock));
	spa_t *spa = vd->vdev_spa;

	if (new_state == vd->vdev_initialize_state)
		return;

	/*
	 * Copy the vd's guid, this will be freed by the sync task.
	 */
	uint64_t *guid = kmem_zalloc(sizeof (uint64_t), KM_SLEEP);
	*guid = vd->vdev_guid;

	/*
	 * If we're suspending, then preserving the original start time.
	 */
	if (vd->vdev_initialize_state != VDEV_INITIALIZE_SUSPENDED) {
		vd->vdev_initialize_action_time = gethrestime_sec();
	}
	vd->vdev_initialize_state = new_state;

	dmu_tx_t *tx = dmu_tx_create_dd(spa_get_dsl(spa)->dp_mos_dir);
	VERIFY0(dmu_tx_assign(tx, TXG_WAIT));
	dsl_sync_task_nowait(spa_get_dsl(spa), vdev_initialize_zap_update_sync,
	    guid, 2, ZFS_SPACE_CHECK_RESERVED, tx);

	switch (new_state) {
	case VDEV_INITIALIZE_ACTIVE:
		spa_history_log_internal(spa, "initialize", tx,
		    "vdev=%s activated", vd->vdev_path);
		break;
	case VDEV_INITIALIZE_SUSPENDED:
		spa_history_log_internal(spa, "initialize", tx,
		    "vdev=%s suspended", vd->vdev_path);
		break;
	case VDEV_INITIALIZE_CANCELED:
		spa_history_log_internal(spa, "initialize", tx,
		    "vdev=%s canceled", vd->vdev_path);
		break;
	case VDEV_INITIALIZE_COMPLETE:
		spa_history_log_internal(spa, "initialize", tx,
		    "vdev=%s complete", vd->vdev_path);
		break;
	default:
		panic("invalid state %llu", (unsigned long long)new_state);
	}

	dmu_tx_commit(tx);
}
示例#6
0
/*
 * The nvlist will be consumed by this call.
 */
static void
log_internal(nvlist_t *nvl, const char *operation, spa_t *spa,
    dmu_tx_t *tx, const char *fmt, va_list adx)
{
	char *msg;
	va_list adx1;
	int size;

	/*
	 * If this is part of creating a pool, not everything is
	 * initialized yet, so don't bother logging the internal events.
	 * Likewise if the pool is not writeable.
	 */
	if (tx->tx_txg == TXG_INITIAL || !spa_writeable(spa)) {
		fnvlist_free(nvl);
		return;
	}

	va_copy(adx1, adx);
	size = vsnprintf(NULL, 0, fmt, adx1) + 1;
	msg = kmem_alloc(size, KM_PUSHPAGE);
	va_end(adx1);
	va_copy(adx1, adx);
	(void) vsprintf(msg, fmt, adx1);
	va_end(adx1);
	fnvlist_add_string(nvl, ZPOOL_HIST_INT_STR, msg);
	kmem_free(msg, size);

	fnvlist_add_string(nvl, ZPOOL_HIST_INT_NAME, operation);
	fnvlist_add_uint64(nvl, ZPOOL_HIST_TXG, tx->tx_txg);

	if (dmu_tx_is_syncing(tx)) {
		spa_history_log_sync(nvl, tx);
	} else {
		dsl_sync_task_nowait(spa_get_dsl(spa),
		    spa_history_log_sync, nvl, 0, tx);
	}
	/* spa_history_log_sync() will free nvl */
}