Exemple #1
0
/*
 * Convert a device path/nvlist pair to an nvp_list_t
 * Used to parse the nvlist format when reading
 * /etc/devices/devid_cache
 */
static nvp_list_t *
devid_nvl2nvp(nvlist_t *nvl, char *name)
{
	nvp_devid_t *np;
	ddi_devid_t devidp;
	int rval;
	uint_t n;

	np = kmem_zalloc(sizeof (nvp_devid_t), KM_SLEEP);
	np->nvp_devpath = i_ddi_strdup(name, KM_SLEEP);

	NVP_DEVID_DEBUG_PATH((np->nvp_devpath));

	/*
	 * check path for a devid
	 */
	np->nvp_devid = NULL;
	rval = nvlist_lookup_byte_array(nvl,
		DP_DEVID_ID, (uchar_t **)&devidp, &n);
	if (rval == 0) {
		if (ddi_devid_valid(devidp) == DDI_SUCCESS) {
			ASSERT(n == ddi_devid_sizeof(devidp));
			np->nvp_devid = kmem_alloc(n, KM_SLEEP);
			(void) bcopy(devidp, np->nvp_devid, n);
			NVP_DEVID_DEBUG_DEVID((np->nvp_devid));
		} else {
			DEVIDERR((CE_CONT,
			    "%s: invalid devid\n", np->nvp_devpath));
		}
	}

	return (NVPLIST(np));
}
Exemple #2
0
/*
 * Unpack a device path/nvlist pair to internal data list format.
 * Used to decode the nvlist format into the internal representation
 * when reading /etc/devices/devname_cache.
 * Note that the expiration counts are optional, for compatibility
 * with earlier instances of the cache.  If not present, the
 * expire counts are initialized to defaults.
 */
static int
sdev_ncache_unpack_nvlist(nvf_handle_t fd, nvlist_t *nvl, char *name)
{
	nvp_devname_t *np;
	char	**strs;
	int	*cnts;
	uint_t	nstrs, ncnts;
	int	rval, i;

	ASSERT(fd == sdevfd_handle);
	ASSERT(RW_WRITE_HELD(nvf_lock(fd)));

	/* name of the sublist must match what we created */
	if (strcmp(name, DP_DEVNAME_ID) != 0) {
		return (-1);
	}

	np = kmem_zalloc(sizeof (nvp_devname_t), KM_SLEEP);

	rval = nvlist_lookup_string_array(nvl,
	    DP_DEVNAME_NCACHE_ID, &strs, &nstrs);
	if (rval) {
		kmem_free(np, sizeof (nvp_devname_t));
		return (-1);
	}

	np->nvp_npaths = nstrs;
	np->nvp_paths = kmem_zalloc(nstrs * sizeof (char *), KM_SLEEP);
	for (i = 0; i < nstrs; i++) {
		np->nvp_paths[i] = i_ddi_strdup(strs[i], KM_SLEEP);
	}
	np->nvp_expirecnts = kmem_zalloc(nstrs * sizeof (int), KM_SLEEP);
	for (i = 0; i < nstrs; i++) {
		np->nvp_expirecnts[i] = sdev_nc_expirecnt;
	}

	rval = nvlist_lookup_int32_array(nvl,
	    DP_DEVNAME_NC_EXPIRECNT_ID, &cnts, &ncnts);
	if (rval == 0) {
		ASSERT(ncnts == nstrs);
		ncnts = min(ncnts, nstrs);
		for (i = 0; i < nstrs; i++) {
			np->nvp_expirecnts[i] = cnts[i];
		}
	}

	list_insert_tail(nvf_list(sdevfd_handle), np);

	return (0);
}
Exemple #3
0
/*
 * Prepare to perform an update of the neg. cache backing store.
 */
static void
sdev_ncache_write(void)
{
	sdev_nc_list_t	*ncl = sdev_ncache;
	nvp_devname_t	*np;
	sdev_nc_node_t	*lp;
	int		n, i;

	if (sdev_cache_write_disable) {
		mutex_enter(&ncl->ncl_mutex);
		ncl->ncl_flags &= ~NCL_LIST_WRITING;
		mutex_exit(&ncl->ncl_mutex);
		return;
	}

	/* proper lock ordering here is essential */
	rw_enter(nvf_lock(sdevfd_handle), RW_WRITER);
	sdev_ncache_list_free(sdevfd_handle);

	rw_enter(&ncl->ncl_lock, RW_READER);
	n = ncl->ncl_nentries;
	ASSERT(n <= sdev_nc_max_entries);

	np = kmem_zalloc(sizeof (nvp_devname_t), KM_SLEEP);
	np->nvp_npaths = n;
	np->nvp_paths = kmem_zalloc(n * sizeof (char *), KM_SLEEP);
	np->nvp_expirecnts = kmem_zalloc(n * sizeof (int), KM_SLEEP);

	i = 0;
	for (lp = list_head(&ncl->ncl_list); lp;
	    lp = list_next(&ncl->ncl_list, lp)) {
		np->nvp_paths[i] = i_ddi_strdup(lp->ncn_name, KM_SLEEP);
		np->nvp_expirecnts[i] = lp->ncn_expirecnt;
		sdcmn_err5(("    %s %d\n",
		    np->nvp_paths[i], np->nvp_expirecnts[i]));
		i++;
	}

	rw_exit(&ncl->ncl_lock);

	nvf_mark_dirty(sdevfd_handle);
	list_insert_tail(nvf_list(sdevfd_handle), np);
	rw_exit(nvf_lock(sdevfd_handle));

	nvf_wake_daemon();
}
Exemple #4
0
/*
 * Convert a device path/nvlist pair to an nvp_list_t
 * Used to parse the nvlist format when reading
 * /etc/devices/devname_cache
 */
static nvp_list_t *
sdev_nvl2nvp(nvlist_t *nvl, char *name)
{
	nvp_devname_t *np;
	char	**strs;
	int	*cnts;
	uint_t	nstrs, ncnts;
	int	rval, i;

	/* name of the sublist must match what we created */
	if (strcmp(name, DP_DEVNAME_ID) != 0) {
		return (NULL);
	}

	np = kmem_zalloc(sizeof (nvp_devname_t), KM_SLEEP);

	rval = nvlist_lookup_string_array(nvl,
	    DP_DEVNAME_NCACHE_ID, &strs, &nstrs);
	if (rval) {
		kmem_free(np, sizeof (nvp_devname_t));
		return (NULL);
	}

	np->nvp_npaths = nstrs;
	np->nvp_paths = kmem_zalloc(nstrs * sizeof (char *), KM_SLEEP);
	for (i = 0; i < nstrs; i++) {
		np->nvp_paths[i] = i_ddi_strdup(strs[i], KM_SLEEP);
	}
	np->nvp_expirecnts = kmem_zalloc(nstrs * sizeof (int), KM_SLEEP);
	for (i = 0; i < nstrs; i++) {
		np->nvp_expirecnts[i] = 4; /* XXX sdev_nc_expirecnt */
	}

	rval = nvlist_lookup_int32_array(nvl,
	    DP_DEVNAME_NC_EXPIRECNT_ID, &cnts, &ncnts);
	if (rval == 0) {
		ASSERT(ncnts == nstrs);
		ncnts = max(ncnts, nstrs);
		for (i = 0; i < nstrs; i++) {
			np->nvp_expirecnts[i] = cnts[i];
		}
	}

	return (NVPLIST(np));
}
Exemple #5
0
/*
 * Create new device address map
 *
 * name:		map name (kstat unique)
 * size:		max # of map entries
 * mode:		style of address reports: per-address or fullset
 * stable_usec:		# of quiescent microseconds before report/map is stable
 *
 * activate_arg:	address provider activation-callout private
 * activate_cb:		address provider activation callback handler
 * deactivate_cb:	address provider deactivation callback handler
 *
 * config_arg:		configuration-callout private
 * config_cb:		class configuration callout
 * unconfig_cb:		class unconfiguration callout
 *
 * damapp:		pointer to map handle (return)
 *
 * Returns:	DAM_SUCCESS
 *		DAM_EINVAL	Invalid argument(s)
 *		DAM_FAILURE	General failure
 */
int
damap_create(char *name, damap_rptmode_t mode, int map_opts,
    int stable_usec, void *activate_arg, damap_activate_cb_t activate_cb,
    damap_deactivate_cb_t deactivate_cb,
    void *config_arg, damap_configure_cb_t configure_cb,
    damap_unconfig_cb_t unconfig_cb,
    damap_t **damapp)
{
	dam_t *mapp;

	if (configure_cb == NULL || unconfig_cb == NULL || name == NULL)
		return (DAM_EINVAL);

	mapp = kmem_zalloc(sizeof (*mapp), KM_SLEEP);
	mapp->dam_options = map_opts;
	mapp->dam_stable_ticks = drv_usectohz(stable_usec);
	mapp->dam_size = 0;
	mapp->dam_rptmode = mode;
	mapp->dam_activate_arg = activate_arg;
	mapp->dam_activate_cb = (activate_cb_t)activate_cb;
	mapp->dam_deactivate_cb = (deactivate_cb_t)deactivate_cb;
	mapp->dam_config_arg = config_arg;
	mapp->dam_configure_cb = (configure_cb_t)configure_cb;
	mapp->dam_unconfig_cb = (unconfig_cb_t)unconfig_cb;
	mapp->dam_name = i_ddi_strdup(name, KM_SLEEP);
	mutex_init(&mapp->dam_lock, NULL, MUTEX_DRIVER, NULL);
	cv_init(&mapp->dam_sync_cv, NULL, CV_DRIVER, NULL);
	bitset_init(&mapp->dam_active_set);
	bitset_init(&mapp->dam_stable_set);
	bitset_init(&mapp->dam_report_set);
	*damapp = (damap_t *)mapp;

	DTRACE_PROBE5(damap__create,
	    char *, mapp->dam_name, damap_t *, mapp,
	    damap_rptmode_t, mode, int, map_opts, int, stable_usec);

	return (DAM_SUCCESS);
}
Exemple #6
0
/*
 * Search the devid cache, returning dev_t list for all
 * device paths mapping to the device identified by the
 * given devid.
 *
 * Primary interface used by ddi_lyr_devid_to_devlist()
 */
int
e_devid_cache_to_devt_list(ddi_devid_t devid, char *minor_name,
	int *retndevts, dev_t **retdevts)
{
	char		*path, **paths;
	int		i, j, n;
	dev_t		*devts, *udevts;
	dev_t		tdevt;
	int		ndevts, undevts, ndevts_alloced;
	dev_info_t	*devi, **devis;
	int		ndevis, npaths, nalloced;
	ddi_devid_t	match_devid;

	DEVID_LOG_FIND(("find", devid, NULL));

	ASSERT(ddi_devid_valid(devid) == DDI_SUCCESS);
	if (ddi_devid_valid(devid) != DDI_SUCCESS) {
		DEVID_LOG_ERR(("invalid devid", devid, NULL));
		return (DDI_FAILURE);
	}

	nalloced = 128;

	for (;;) {
		paths = kmem_zalloc(nalloced * sizeof (char *), KM_SLEEP);
		devis = kmem_zalloc(nalloced * sizeof (dev_info_t *), KM_SLEEP);

		rw_enter(&dcfd->nvf_lock, RW_READER);
		n = e_devid_cache_devi_path_lists(devid, nalloced,
			&ndevis, devis, &npaths, paths);
		if (n <= nalloced)
			break;
		rw_exit(&dcfd->nvf_lock);
		for (i = 0; i < ndevis; i++)
			ndi_rele_devi(devis[i]);
		kmem_free(paths, nalloced * sizeof (char *));
		kmem_free(devis, nalloced * sizeof (dev_info_t *));
		nalloced = n + 128;
	}

	for (i = 0; i < npaths; i++) {
		path = i_ddi_strdup(paths[i], KM_SLEEP);
		paths[i] = path;
	}
	rw_exit(&dcfd->nvf_lock);

	if (ndevis == 0 && npaths == 0) {
		DEVID_LOG_ERR(("no devid found", devid, NULL));
		kmem_free(paths, nalloced * sizeof (char *));
		kmem_free(devis, nalloced * sizeof (dev_info_t *));
		return (DDI_FAILURE);
	}

	ndevts_alloced = 128;
restart:
	ndevts = 0;
	devts = kmem_alloc(ndevts_alloced * sizeof (dev_t), KM_SLEEP);
	for (i = 0; i < ndevis; i++) {
		ASSERT(!DEVI_IS_ATTACHING(devis[i]));
		ASSERT(!DEVI_IS_DETACHING(devis[i]));
		e_devid_minor_to_devlist(devis[i], minor_name,
			ndevts_alloced, &ndevts, devts);
		if (ndevts > ndevts_alloced) {
			kmem_free(devts, ndevts_alloced * sizeof (dev_t));
			ndevts_alloced += 128;
			goto restart;
		}
	}
	for (i = 0; i < npaths; i++) {
		DEVID_LOG_LOOKUP((CE_CONT, "lookup %s\n", paths[i]));
		devi = e_ddi_hold_devi_by_path(paths[i], 0);
		if (devi == NULL) {
			DEVID_LOG_STALE(("stale device reference",
			    devid, paths[i]));
			continue;
		}
		/*
		 * Verify the newly attached device registered a matching devid
		 */
		if (i_ddi_devi_get_devid(DDI_DEV_T_ANY, devi,
		    &match_devid) != DDI_SUCCESS) {
			DEVIDERR((CE_CONT,
			    "%s: no devid registered on attach\n",
			    paths[i]));
			ddi_release_devi(devi);
			continue;
		}

		if (ddi_devid_compare(devid, match_devid) != 0) {
			DEVID_LOG_STALE(("new devid registered",
			    devid, paths[i]));
			ddi_release_devi(devi);
			ddi_devid_free(match_devid);
			continue;
		}
		ddi_devid_free(match_devid);

		e_devid_minor_to_devlist(devi, minor_name,
			ndevts_alloced, &ndevts, devts);
		ddi_release_devi(devi);
		if (ndevts > ndevts_alloced) {
			kmem_free(devts,
			    ndevts_alloced * sizeof (dev_t));
			ndevts_alloced += 128;
			goto restart;
		}
	}

	/* drop hold from e_devid_cache_devi_path_lists */
	for (i = 0; i < ndevis; i++) {
		ndi_rele_devi(devis[i]);
	}
	for (i = 0; i < npaths; i++) {
		kmem_free(paths[i], strlen(paths[i]) + 1);
	}
	kmem_free(paths, nalloced * sizeof (char *));
	kmem_free(devis, nalloced * sizeof (dev_info_t *));

	if (ndevts == 0) {
		DEVID_LOG_ERR(("no devid found", devid, NULL));
		kmem_free(devts, ndevts_alloced * sizeof (dev_t));
		return (DDI_FAILURE);
	}

	/*
	 * Build the final list of sorted dev_t's with duplicates collapsed so
	 * returned results are consistent. This prevents implementation
	 * artifacts from causing unnecessary changes in SVM namespace.
	 */
	/* bubble sort */
	for (i = 0; i < (ndevts - 1); i++) {
		for (j = 0; j < ((ndevts - 1) - i); j++) {
			if (devts[j + 1] < devts[j]) {
				tdevt = devts[j];
				devts[j] = devts[j + 1];
				devts[j + 1] = tdevt;
			}
		}
	}

	/* determine number of unique values */
	for (undevts = ndevts, i = 1; i < ndevts; i++) {
		if (devts[i - 1] == devts[i])
			undevts--;
	}

	/* allocate unique */
	udevts = kmem_alloc(undevts * sizeof (dev_t), KM_SLEEP);

	/* copy unique */
	udevts[0] = devts[0];
	for (i = 1, j = 1; i < ndevts; i++) {
		if (devts[i - 1] != devts[i])
			udevts[j++] = devts[i];
	}
	ASSERT(j == undevts);

	kmem_free(devts, ndevts_alloced * sizeof (dev_t));

	*retndevts = undevts;
	*retdevts = udevts;

	return (DDI_SUCCESS);
}
Exemple #7
0
/*
 * For a given path to a boot device,
 * load that driver and all its parents.
 */
static int
load_bootpath_drivers(char *bootpath)
{
	dev_info_t	*dip;
	char		*pathcopy;
	int		pathcopy_len;
	int		rval;
	char		*p;

	if (bootpath == NULL || *bootpath == 0)
		return (-1);

	BMDPRINTF(("load_bootpath_drivers: %s\n", bootpath));

	pathcopy = i_ddi_strdup(bootpath, KM_SLEEP);
	pathcopy_len = strlen(pathcopy) + 1;

	dip = path_to_devinfo(pathcopy);

#if defined(__i386) || defined(__amd64)
	/*
	 * i386 does not provide stub nodes for all boot devices,
	 * but we should be able to find the node for the parent,
	 * and the leaf of the boot path should be the driver name,
	 * which we go ahead and load here.
	 */
	if (dip == NULL) {
		char	*leaf;

		/*
		 * Find last slash to build the full path to the
		 * parent of the leaf boot device
		 */
		p = strrchr(pathcopy, '/');
		*p++ = 0;

		/*
		 * Now isolate the driver name of the leaf device
		 */
		leaf = p;
		p = strchr(leaf, '@');
		*p = 0;

		BMDPRINTF(("load_bootpath_drivers: parent=%s leaf=%s\n",
		    bootpath, leaf));

		dip = path_to_devinfo(pathcopy);
		if (leaf) {
			rval = load_boot_driver(leaf, NULL);
			if (rval == -1) {
				kmem_free(pathcopy, pathcopy_len);
				return (NULL);
			}
		}
	}
#endif

	if (dip == NULL) {
		cmn_err(CE_WARN, "can't bind driver for boot path <%s>",
		    bootpath);
		kmem_free(pathcopy, pathcopy_len);
		return (NULL);
	}

	/*
	 * Load IP over IB driver when netbooting over IB.
	 * As per IB 1275 binding, IP over IB is represented as
	 * service on the top of the HCA node. So, there is no
	 * PROM node and generic framework cannot pre-load
	 * IP over IB driver based on the bootpath. The following
	 * code preloads IP over IB driver when doing netboot over
	 * InfiniBand.
	 */
	if (netboot_over_ib(bootpath) &&
	    modloadonly("drv", "ibd") == -1) {
		cmn_err(CE_CONT, "ibd: cannot load platform driver\n");
		kmem_free(pathcopy, pathcopy_len);
		return (NULL);
	}

	/* get rid of minor node at end of copy (if not already done above) */
	p = strrchr(pathcopy, '/');
	if (p) {
		p = strchr(p, ':');
		if (p)
			*p = 0;
	}

	rval = load_parent_drivers(dip, pathcopy);
	kmem_free(pathcopy, pathcopy_len);
	return (rval);
}