예제 #1
0
/*
 * Sigh.  Inside a local zone, we don't have access to /etc/zfs/zpool.cache,
 * and we don't want to allow the local zone to see all the pools anyway.
 * So we have to invent the ZFS_IOC_CONFIG ioctl to grab the configuration
 * information for all pool visible within the zone.
 */
nvlist_t *
spa_all_configs(uint64_t *generation)
{
	nvlist_t *pools;
	spa_t *spa;

	if (*generation == spa_config_generation)
		return (NULL);

	VERIFY(nvlist_alloc(&pools, NV_UNIQUE_NAME, KM_SLEEP) == 0);

	spa = NULL;
	mutex_enter(&spa_namespace_lock);
	while ((spa = spa_next(spa)) != NULL) {
		if (INGLOBALZONE(curproc) ||
		    zone_dataset_visible(spa_name(spa), NULL)) {
			mutex_enter(&spa->spa_config_cache_lock);
			VERIFY(nvlist_add_nvlist(pools, spa_name(spa),
			    spa->spa_config) == 0);
			mutex_exit(&spa->spa_config_cache_lock);
		}
	}
	mutex_exit(&spa_namespace_lock);

	*generation = spa_config_generation;

	return (pools);
}
예제 #2
0
/*
 * Sigh.  Inside a local zone, we don't have access to /etc/zfs/zpool.cache,
 * and we don't want to allow the local zone to see all the pools anyway.
 * So we have to invent the ZFS_IOC_CONFIG ioctl to grab the configuration
 * information for all pool visible within the zone.
 */
nvlist_t *
spa_all_configs(uint64_t *generation)
{
	nvlist_t *pools;
	spa_t *spa = NULL;

	if (*generation == spa_config_generation)
		return (NULL);

	pools = fnvlist_alloc();

	mutex_enter(&spa_namespace_lock);
	while ((spa = spa_next(spa)) != NULL) {
		if (INGLOBALZONE(curproc) ||
		    zone_dataset_visible(spa_name(spa), NULL)) {
			mutex_enter(&spa->spa_props_lock);
			fnvlist_add_nvlist(pools, spa_name(spa),
			    spa->spa_config);
			mutex_exit(&spa->spa_props_lock);
		}
	}
	*generation = spa_config_generation;
	mutex_exit(&spa_namespace_lock);

	return (pools);
}
예제 #3
0
/*
 * Create minors for specified pool, if pool is NULL create minors
 * for all available pools.
 */
int
zvol_create_minors(const char *pool)
{
	spa_t *spa = NULL;
	int error = 0;

	if (zvol_inhibit_dev)
		return (0);

	mutex_enter(&zvol_state_lock);
	if (pool) {
		error = dmu_objset_find_spa(NULL, pool, zvol_create_minors_cb,
		    NULL, DS_FIND_CHILDREN | DS_FIND_SNAPSHOTS);
	} else {
		mutex_enter(&spa_namespace_lock);
		while ((spa = spa_next(spa)) != NULL) {
			error = dmu_objset_find_spa(NULL,
			    spa_name(spa), zvol_create_minors_cb, NULL,
			    DS_FIND_CHILDREN | DS_FIND_SNAPSHOTS);
			if (error)
				break;
		}
		mutex_exit(&spa_namespace_lock);
	}
	mutex_exit(&zvol_state_lock);

	return error;
}
예제 #4
0
/*
 * Callback for device termination events, ie, disks removed.
 */
bool IOkit_disk_removed_callback(void* target,
								 void* refCon,
								 IOService* newService,
								 IONotifier* notifier)
{
	OSObject *prop = 0;
	OSString* bsdnameosstr = 0;

	prop = newService->getProperty(kIOBSDNameKey, gIOServicePlane,
								   kIORegistryIterateRecursively);
	if (prop) {
		spa_t *spa = NULL;

		bsdnameosstr = OSDynamicCast(OSString, prop);
		printf("ZFS: Device removal detected: '%s'\n",
			   bsdnameosstr->getCStringNoCopy());

		for (spa = spa_next(NULL);
			 spa != NULL; spa = spa_next(spa)) {
		  vdev_t *vd;

		  dprintf("ZFS: Scanning pool '%s'\n", spa_name(spa));

		  vd = vdev_lookup_by_path(spa->spa_root_vdev,
								   bsdnameosstr->getCStringNoCopy());

		  if (vd && vd->vdev_path) {

			printf("ZFS: Device '%s' removal requested\n",
				   vd->vdev_path);
			(void) thread_create(NULL, 0, vdev_close_thread,
								 vd, 0, &p0,
								 TS_RUN, minclsyspri);

			vd->vdev_remove_wanted = B_TRUE;
			spa_async_request(spa, SPA_ASYNC_REMOVE);

			break;
		  }

		} // for all spa

	} // if has BSDname

	return true;
}
예제 #5
0
파일: mmp.c 프로젝트: LLNL/zfs
void
mmp_signal_all_threads(void)
{
	spa_t *spa = NULL;

	mutex_enter(&spa_namespace_lock);
	while ((spa = spa_next(spa))) {
		if (spa->spa_state == POOL_STATE_ACTIVE)
			mmp_signal_thread(spa);
	}
	mutex_exit(&spa_namespace_lock);
}
예제 #6
0
/*
 * Synchronize all pools to disk.  This must be called with the namespace lock
 * held.
 */
void
spa_config_sync(void)
{
	spa_t *spa = NULL;
	nvlist_t *config;
	size_t buflen;
	char *buf;
	vnode_t *vp;
	int oflags = FWRITE | FTRUNC | FCREAT | FOFFMAX;
	char pathname[128];
	char pathname2[128];

	ASSERT(MUTEX_HELD(&spa_namespace_lock));

	VERIFY(nvlist_alloc(&config, NV_UNIQUE_NAME, KM_SLEEP) == 0);

	/*
	 * Add all known pools to the configuration list, ignoring those with
	 * alternate root paths.
	 */
	spa = NULL;
	while ((spa = spa_next(spa)) != NULL) {
		mutex_enter(&spa->spa_config_cache_lock);
		if (spa->spa_config && spa->spa_name && spa->spa_root == NULL)
			VERIFY(nvlist_add_nvlist(config, spa->spa_name,
			    spa->spa_config) == 0);
		mutex_exit(&spa->spa_config_cache_lock);
	}

	/*
	 * Pack the configuration into a buffer.
	 */
	VERIFY(nvlist_size(config, &buflen, NV_ENCODE_XDR) == 0);

	buf = kmem_alloc(buflen, KM_SLEEP);

	VERIFY(nvlist_pack(config, &buf, &buflen, NV_ENCODE_XDR,
	    KM_SLEEP) == 0);

	/*
	 * Write the configuration to disk.  We need to do the traditional
	 * 'write to temporary file, sync, move over original' to make sure we
	 * always have a consistent view of the data.
	 */
	(void) snprintf(pathname, sizeof (pathname), "%s/%s", spa_config_dir,
	    ZPOOL_CACHE_TMP);

	if (vn_open(pathname, UIO_SYSSPACE, oflags, 0644, &vp, CRCREAT, 0) != 0)
		goto out;

	if (vn_rdwr(UIO_WRITE, vp, buf, buflen, 0, UIO_SYSSPACE,
	    0, RLIM64_INFINITY, kcred, NULL) == 0 &&
	    VOP_FSYNC(vp, FSYNC, kcred) == 0) {
		(void) snprintf(pathname2, sizeof (pathname2), "%s/%s",
		    spa_config_dir, ZPOOL_CACHE_FILE);
		(void) vn_rename(pathname, pathname2, UIO_SYSSPACE);
	}

	(void) VOP_CLOSE(vp, oflags, 1, 0, kcred);
	VN_RELE(vp);

out:
	(void) vn_remove(pathname, UIO_SYSSPACE, RMFILE);
	spa_config_generation++;

	kmem_free(buf, buflen);
	nvlist_free(config);
}
예제 #7
0
/*
 * Synchronize pool configuration to disk.  This must be called with the
 * namespace lock held. Synchronizing the pool cache is typically done after
 * the configuration has been synced to the MOS. This exposes a window where
 * the MOS config will have been updated but the cache file has not. If
 * the system were to crash at that instant then the cached config may not
 * contain the correct information to open the pool and an explicity import
 * would be required.
 */
void
spa_config_sync(spa_t *target, boolean_t removing, boolean_t postsysevent)
{
	spa_config_dirent_t *dp, *tdp;
	nvlist_t *nvl;
	boolean_t ccw_failure;
	int error;

	ASSERT(MUTEX_HELD(&spa_namespace_lock));

	if (rootdir == NULL || !(spa_mode_global & FWRITE))
		return;

	/*
	 * Iterate over all cachefiles for the pool, past or present.  When the
	 * cachefile is changed, the new one is pushed onto this list, allowing
	 * us to update previous cachefiles that no longer contain this pool.
	 */
	ccw_failure = B_FALSE;
	for (dp = list_head(&target->spa_config_list); dp != NULL;
	    dp = list_next(&target->spa_config_list, dp)) {
		spa_t *spa = NULL;
		if (dp->scd_path == NULL)
			continue;

		/*
		 * Iterate over all pools, adding any matching pools to 'nvl'.
		 */
		nvl = NULL;
		while ((spa = spa_next(spa)) != NULL) {
			nvlist_t *nvroot = NULL;
			/*
			 * Skip over our own pool if we're about to remove
			 * ourselves from the spa namespace or any pool that
			 * is readonly. Since we cannot guarantee that a
			 * readonly pool would successfully import upon reboot,
			 * we don't allow them to be written to the cache file.
			 */
			if ((spa == target && removing) ||
			    (spa_state(spa) == POOL_STATE_ACTIVE &&
			    !spa_writeable(spa)))
				continue;

			mutex_enter(&spa->spa_props_lock);
			tdp = list_head(&spa->spa_config_list);
			if (spa->spa_config == NULL ||
			    tdp->scd_path == NULL ||
			    strcmp(tdp->scd_path, dp->scd_path) != 0) {
				mutex_exit(&spa->spa_props_lock);
				continue;
			}

			if (nvl == NULL)
				VERIFY(nvlist_alloc(&nvl, NV_UNIQUE_NAME,
				    KM_SLEEP) == 0);

			VERIFY(nvlist_add_nvlist(nvl, spa->spa_name,
			    spa->spa_config) == 0);
			mutex_exit(&spa->spa_props_lock);

			if (nvlist_lookup_nvlist(nvl, spa->spa_name, &nvroot) == 0)
				spa_config_clean(nvroot);
		}

		error = spa_config_write(dp, nvl);
		if (error != 0)
			ccw_failure = B_TRUE;
		nvlist_free(nvl);
	}

	if (ccw_failure) {
		/*
		 * Keep trying so that configuration data is
		 * written if/when any temporary filesystem
		 * resource issues are resolved.
		 */
		if (target->spa_ccw_fail_time == 0) {
			zfs_ereport_post(FM_EREPORT_ZFS_CONFIG_CACHE_WRITE,
			    target, NULL, NULL, 0, 0);
		}
		target->spa_ccw_fail_time = gethrtime();
		spa_async_request(target, SPA_ASYNC_CONFIG_UPDATE);
	} else {
		/*
		 * Do not rate limit future attempts to update
		 * the config cache.
		 */
		target->spa_ccw_fail_time = 0;
	}

	/*
	 * Remove any config entries older than the current one.
	 */
	dp = list_head(&target->spa_config_list);
	while ((tdp = list_next(&target->spa_config_list, dp)) != NULL) {
		list_remove(&target->spa_config_list, tdp);
		if (tdp->scd_path != NULL)
			spa_strfree(tdp->scd_path);
		kmem_free(tdp, sizeof (spa_config_dirent_t));
	}

	spa_config_generation++;

	if (postsysevent)
		spa_event_notify(target, NULL, ESC_ZFS_CONFIG_SYNC);
}
예제 #8
0
파일: spa_config.c 프로젝트: koplover/zfs
/*
 * Synchronize pool configuration to disk.  This must be called with the
 * namespace lock held. Synchronizing the pool cache is typically done after
 * the configuration has been synced to the MOS. This exposes a window where
 * the MOS config will have been updated but the cache file has not. If
 * the system were to crash at that instant then the cached config may not
 * contain the correct information to open the pool and an explicity import
 * would be required.
 */
void
spa_config_sync(spa_t *target, boolean_t removing, boolean_t postsysevent)
{
	spa_config_dirent_t *dp, *tdp;
	nvlist_t *nvl;
	char *pool_name;

	ASSERT(MUTEX_HELD(&spa_namespace_lock));

	if (rootdir == NULL || !(spa_mode_global & FWRITE))
		return;

	/*
	 * Iterate over all cachefiles for the pool, past or present.  When the
	 * cachefile is changed, the new one is pushed onto this list, allowing
	 * us to update previous cachefiles that no longer contain this pool.
	 */
	for (dp = list_head(&target->spa_config_list); dp != NULL;
	    dp = list_next(&target->spa_config_list, dp)) {
		spa_t *spa = NULL;
		if (dp->scd_path == NULL)
			continue;

		/*
		 * Iterate over all pools, adding any matching pools to 'nvl'.
		 */
		nvl = NULL;
		while ((spa = spa_next(spa)) != NULL) {
			/*
			 * Skip over our own pool if we're about to remove
			 * ourselves from the spa namespace or any pool that
			 * is readonly. Since we cannot guarantee that a
			 * readonly pool would successfully import upon reboot,
			 * we don't allow them to be written to the cache file.
			 */
			if ((spa == target && removing) ||
			    !spa_writeable(spa))
				continue;

			mutex_enter(&spa->spa_props_lock);
			tdp = list_head(&spa->spa_config_list);
			if (spa->spa_config == NULL ||
			    tdp->scd_path == NULL ||
			    strcmp(tdp->scd_path, dp->scd_path) != 0) {
				mutex_exit(&spa->spa_props_lock);
				continue;
			}

			if (nvl == NULL)
				VERIFY(nvlist_alloc(&nvl, NV_UNIQUE_NAME,
				    KM_SLEEP) == 0);

			if (spa->spa_import_flags & ZFS_IMPORT_TEMP_NAME) {
				VERIFY0(nvlist_lookup_string(spa->spa_config,
					ZPOOL_CONFIG_POOL_NAME, &pool_name));
			} else
				pool_name = spa_name(spa);

			VERIFY(nvlist_add_nvlist(nvl, pool_name,
			    spa->spa_config) == 0);
			mutex_exit(&spa->spa_props_lock);
		}

		spa_config_write(dp, nvl);
		nvlist_free(nvl);
	}

	/*
	 * Remove any config entries older than the current one.
	 */
	dp = list_head(&target->spa_config_list);
	while ((tdp = list_next(&target->spa_config_list, dp)) != NULL) {
		list_remove(&target->spa_config_list, tdp);
		if (tdp->scd_path != NULL)
			spa_strfree(tdp->scd_path);
		kmem_free(tdp, sizeof (spa_config_dirent_t));
	}

	spa_config_generation++;

	if (postsysevent)
		spa_event_notify(target, NULL, FM_EREPORT_ZFS_CONFIG_SYNC);
}
예제 #9
0
/*
 * Synchronize pool configuration to disk.  This must be called with the
 * namespace lock held.
 */
void
spa_config_sync(spa_t *target, boolean_t removing, boolean_t postsysevent)
{
	spa_config_dirent_t *dp, *tdp;
	nvlist_t *nvl;

	ASSERT(MUTEX_HELD(&spa_namespace_lock));

	if (rootdir == NULL || !(spa_mode_global & FWRITE))
		return;

	/*
	 * Iterate over all cachefiles for the pool, past or present.  When the
	 * cachefile is changed, the new one is pushed onto this list, allowing
	 * us to update previous cachefiles that no longer contain this pool.
	 */
	for (dp = list_head(&target->spa_config_list); dp != NULL;
	    dp = list_next(&target->spa_config_list, dp)) {
		spa_t *spa = NULL;
		if (dp->scd_path == NULL)
			continue;

		/*
		 * Iterate over all pools, adding any matching pools to 'nvl'.
		 */
		nvl = NULL;
		while ((spa = spa_next(spa)) != NULL) {
			if (spa == target && removing)
				continue;

			mutex_enter(&spa->spa_props_lock);
			tdp = list_head(&spa->spa_config_list);
			if (spa->spa_config == NULL ||
			    tdp->scd_path == NULL ||
			    strcmp(tdp->scd_path, dp->scd_path) != 0) {
				mutex_exit(&spa->spa_props_lock);
				continue;
			}

			if (nvl == NULL)
				VERIFY(nvlist_alloc(&nvl, NV_UNIQUE_NAME,
				    KM_SLEEP) == 0);

			VERIFY(nvlist_add_nvlist(nvl, spa->spa_name,
			    spa->spa_config) == 0);
			mutex_exit(&spa->spa_props_lock);
		}

		spa_config_write(dp, nvl);
		nvlist_free(nvl);
	}

	/*
	 * Remove any config entries older than the current one.
	 */
	dp = list_head(&target->spa_config_list);
	while ((tdp = list_next(&target->spa_config_list, dp)) != NULL) {
		list_remove(&target->spa_config_list, tdp);
		if (tdp->scd_path != NULL)
			spa_strfree(tdp->scd_path);
		kmem_free(tdp, sizeof (spa_config_dirent_t));
	}

	spa_config_generation++;

	if (postsysevent)
		spa_event_notify(target, NULL, FM_EREPORT_ZFS_CONFIG_SYNC);
}
예제 #10
0
/*
 * Synchronize pool configuration to disk.  This must be called with the
 * namespace lock held.
 */
void
spa_config_sync(spa_t *target, boolean_t removing, boolean_t postsysevent)
{
	spa_t *spa = NULL;
	spa_config_dirent_t *dp, *tdp;
	nvlist_t *nvl;

	ASSERT(MUTEX_HELD(&spa_namespace_lock));

	/*
	 * Iterate over all cachefiles for the pool, past or present.  When the
	 * cachefile is changed, the new one is pushed onto this list, allowing
	 * us to update previous cachefiles that no longer contain this pool.
	 */
	for (dp = list_head(&target->spa_config_list); dp != NULL;
	    dp = list_next(&target->spa_config_list, dp)) {
		spa = NULL;
		if (dp->scd_path == NULL)
			continue;

		/*
		 * Iterate over all pools, adding any matching pools to 'nvl'.
		 */
		nvl = NULL;
		while ((spa = spa_next(spa)) != NULL) {
			if (spa->spa_config == NULL || spa->spa_name == NULL)
				continue;

			if (spa == target && removing)
				continue;

#ifdef __APPLE__
			/* OS X - Omit disk based pools */
			if (vdev_contains_disks(spa->spa_root_vdev))
				continue;
#endif
			tdp = list_head(&spa->spa_config_list);
			ASSERT(tdp != NULL);
			if (tdp->scd_path == NULL ||
			    strcmp(tdp->scd_path, dp->scd_path) != 0)
				continue;

			if (nvl == NULL)
				VERIFY(nvlist_alloc(&nvl, NV_UNIQUE_NAME,
				    KM_SLEEP) == 0);

			VERIFY(nvlist_add_nvlist(nvl, spa->spa_name,
			    spa->spa_config) == 0);
		}

		spa_config_write(dp, nvl);
		nvlist_free(nvl);
	}

	/*
	 * Remove any config entries older than the current one.
	 */
	dp = list_head(&target->spa_config_list);
	while ((tdp = list_next(&target->spa_config_list, dp)) != NULL) {
		list_remove(&target->spa_config_list, tdp);
		if (tdp->scd_path != NULL)
			spa_strfree(tdp->scd_path);
		kmem_free(tdp, sizeof (spa_config_dirent_t));
	}

	spa_config_generation++;

	if (postsysevent)
		spa_event_notify(target, NULL, ESC_ZFS_CONFIG_SYNC);
}