示例#1
0
/*
 * Initialize memory controller's data structure and status.
 */
static int
fipe_mc_init(dev_info_t *dip)
{
	ddi_acc_handle_t handle;

	bzero(&fipe_mc_ctrl, sizeof (fipe_mc_ctrl));

	/* Hold one reference count and will be released in fipe_mc_fini. */
	ndi_hold_devi(dip);

	/* Setup pci configuration handler. */
	if (pci_config_setup(dip, &handle) != DDI_SUCCESS) {
		cmn_err(CE_WARN,
		    "!fipe: failed to setup pcicfg handler in mc_init.");
		ndi_rele_devi(dip);
		return (-1);
	}

	/* Save original configuration. */
	fipe_mc_ctrl.mc_thrtctrl = pci_config_get8(handle, FIPE_MC_THRTCTRL);
	fipe_mc_ctrl.mc_thrtlow = pci_config_get8(handle, FIPE_MC_THRTLOW);
	fipe_mc_ctrl.mc_gblact = pci_config_get8(handle, FIPE_MC_GBLACT);
	fipe_mc_ctrl.mc_dip = dip;
	fipe_mc_ctrl.mc_pci_hdl = handle;
	fipe_mc_ctrl.mc_initialized = B_TRUE;

	return (0);
}
示例#2
0
int
iommulib_iommu_unregister(iommulib_handle_t handle)
{
	uint32_t unitid;
	dev_info_t *dip;
	int instance;
	const char *driver;
	iommulib_unit_t *unitp = (iommulib_unit_t *)handle;
	const char *f = "iommulib_unregister";

	ASSERT(unitp);

	mutex_enter(&iommulib_lock);
	mutex_enter(&unitp->ilu_lock);

	unitid = unitp->ilu_unitid;
	dip = unitp->ilu_dip;
	driver = ddi_driver_name(dip);
	instance = ddi_get_instance(dip);

	if (unitp->ilu_ref != 0) {
		mutex_exit(&unitp->ilu_lock);
		mutex_exit(&iommulib_lock);
		cmn_err(CE_WARN, "%s: %s%d: IOMMULIB handle is busy. Cannot "
		    "unregister IOMMULIB unitid %u",
		    f, driver, instance, unitid);
		return (DDI_FAILURE);
	}
	unitp->ilu_unitid = 0;
	ASSERT(unitp->ilu_ref == 0);

	if (unitp->ilu_prev == NULL) {
		iommulib_list = unitp->ilu_next;
		unitp->ilu_next->ilu_prev = NULL;
	} else {
		unitp->ilu_prev->ilu_next = unitp->ilu_next;
		unitp->ilu_next->ilu_prev = unitp->ilu_prev;
	}

	iommulib_num_units--;

	mutex_exit(&unitp->ilu_lock);

	mutex_destroy(&unitp->ilu_lock);
	kmem_free(unitp, sizeof (iommulib_unit_t));

	mutex_exit(&iommulib_lock);

	cmn_err(CE_WARN, "%s: %s%d: IOMMULIB handle (unitid=%u) successfully "
	    "unregistered", f, driver, instance, unitid);

	ndi_rele_devi(dip);

	return (DDI_SUCCESS);
}
示例#3
0
/*
 * Restore memory controller's configuration and release resources.
 */
static void
fipe_mc_fini(void)
{
	if (fipe_mc_ctrl.mc_initialized) {
		fipe_mc_restore();
		pci_config_teardown(&fipe_mc_ctrl.mc_pci_hdl);
		ndi_rele_devi(fipe_mc_ctrl.mc_dip);
		fipe_mc_ctrl.mc_initialized = B_FALSE;
	}
	bzero(&fipe_mc_ctrl, sizeof (fipe_mc_ctrl));
}
示例#4
0
static void
fipe_ioat_fini(void)
{
	/* Release reference count hold by ddi_find_devinfo. */
	if (fipe_ioat_ctrl.ioat_dev_info != NULL) {
		ndi_rele_devi(fipe_ioat_ctrl.ioat_dev_info);
		fipe_ioat_ctrl.ioat_dev_info = NULL;
	}

	if (fipe_ioat_ctrl.ioat_buf_start != NULL) {
		ASSERT(fipe_ioat_ctrl.ioat_buf_size != 0);
		kmem_free(fipe_ioat_ctrl.ioat_buf_start,
		    fipe_ioat_ctrl.ioat_buf_size);
	}

	mutex_destroy(&fipe_ioat_ctrl.ioat_lock);
	bzero(&fipe_ioat_ctrl, sizeof (fipe_ioat_ctrl));
}
示例#5
0
/*
 * Free resources allocated in fipe_ioat_alloc.
 */
static void
fipe_ioat_free(void)
{
	int idx = 0;
	dcopy_cmd_t *cmds = fipe_ioat_ctrl.ioat_cmds;

	mutex_enter(&fipe_ioat_ctrl.ioat_lock);

	/* Cancel timeout to avoid race condition. */
	if (fipe_ioat_ctrl.ioat_timerid != 0) {
		fipe_ioat_ctrl.ioat_cancel = B_TRUE;
		mutex_exit(&fipe_ioat_ctrl.ioat_lock);
		(void) untimeout(fipe_ioat_ctrl.ioat_timerid);
		mutex_enter(&fipe_ioat_ctrl.ioat_lock);
		fipe_ioat_ctrl.ioat_timerid = 0;
		fipe_ioat_ctrl.ioat_cancel = B_FALSE;
	}

	/* Free ioat resources. */
	if (fipe_ioat_ctrl.ioat_ready) {
		if (cmds[0] != NULL) {
			dcopy_cmd_free(&cmds[0]);
		}
		for (idx = 1; idx <= FIPE_IOAT_CMD_NUM; idx++) {
			if (cmds[idx] != NULL) {
				dcopy_cmd_free(&cmds[idx]);
				break;
			}
		}
		bzero(fipe_ioat_ctrl.ioat_cmds,
		    sizeof (fipe_ioat_ctrl.ioat_cmds));
		dcopy_free(&fipe_ioat_ctrl.ioat_handle);
		fipe_ioat_ctrl.ioat_handle = NULL;
		fipe_ioat_ctrl.ioat_ready = B_FALSE;
	}

	/* Release reference count hold by ddi_find_devinfo. */
	if (fipe_ioat_ctrl.ioat_dev_info != NULL) {
		ndi_rele_devi(fipe_ioat_ctrl.ioat_dev_info);
		fipe_ioat_ctrl.ioat_dev_info = NULL;
	}

	mutex_exit(&fipe_ioat_ctrl.ioat_lock);
}
示例#6
0
int
iommulib_nexus_unregister(iommulib_nexhandle_t handle)
{
	dev_info_t *dip;
	int instance;
	const char *driver;
	iommulib_nex_t *nexp = (iommulib_nex_t *)handle;
	const char *f = "iommulib_nexus_unregister";

	ASSERT(nexp);

	if (nexp->nex_ref != 0)
		return (DDI_FAILURE);

	mutex_enter(&iommulib_nexus_lock);

	dip = nexp->nex_dip;
	driver = ddi_driver_name(dip);
	instance = ddi_get_instance(dip);

	/* A future enhancement would be to add ref-counts */

	if (nexp->nex_prev == NULL) {
		iommulib_nexus_list = nexp->nex_next;
	} else {
		nexp->nex_prev->nex_next = nexp->nex_next;
	}

	if (nexp->nex_next != NULL)
		nexp->nex_next->nex_prev = nexp->nex_prev;

	mutex_exit(&iommulib_nexus_lock);

	kmem_free(nexp, sizeof (iommulib_nex_t));

	cmn_err(CE_NOTE, "!%s: %s%d: NEXUS (%s) handle successfully "
	    "unregistered from IOMMULIB", f, driver, instance,
	    ddi_node_name(dip));

	ndi_rele_devi(dip);

	return (DDI_SUCCESS);
}
示例#7
0
/* return path of first usb serial device */
static char *
plat_usbser_path(void)
{
	extern dev_info_t *usbser_first_device(void);

	dev_info_t *us_dip;
	static char *us_path = NULL;

	if (us_path)
		return (us_path);

	us_dip = usbser_first_device();
	if (us_dip == NULL)
		return (NULL);

	us_path = kmem_alloc(MAXPATHLEN, KM_SLEEP);
	(void) ddi_pathname(us_dip, us_path);
	ndi_rele_devi(us_dip);	/* held from usbser_first_device */
	return (us_path);
}
示例#8
0
/*
 * The function is to get prom name according non-client dip node.
 * And the function will set the alternate node of dip to alt_dip
 * if it is exist which must be PROM node.
 */
static int
i_devi_to_promname(dev_info_t *dip, char *prom_path, dev_info_t **alt_dipp)
{
	dev_info_t *pdip, *cdip, *idip;
	char *unit_address, *nodename;
	major_t major;
	int depth, old_depth = 0;
	struct parinfo *parinfo = NULL;
	struct parinfo *info;
	int ret = 0;

	if (MDI_CLIENT(dip))
		return (EINVAL);

	if (ddi_pathname_obp(dip, prom_path) != NULL) {
		return (0);
	}
	/*
	 * ddi_pathname_obp return NULL, but the obp path still could
	 * be different with the devfs path name, so need use a parents
	 * stack to compose the path name string layer by layer.
	 */

	/* find the closest ancestor which is a prom node */
	pdip = dip;
	parinfo = kmem_alloc(OBP_STACKDEPTH * sizeof (*parinfo),
	    KM_SLEEP);
	for (depth = 0; ndi_dev_is_prom_node(pdip) == 0; depth++) {
		if (depth == OBP_STACKDEPTH) {
			ret = EINVAL;
			/* must not have been an obp node */
			goto out;
		}
		pdip = get_parent(pdip, &parinfo[depth]);
	}
	old_depth = depth;
	ASSERT(pdip);	/* at least root is prom node */
	if (pdip)
		(void) ddi_pathname(pdip, prom_path);

	ndi_hold_devi(pdip);

	for (depth = old_depth; depth > 0; depth--) {
		info = &parinfo[depth - 1];
		idip = info->dip;
		nodename = ddi_node_name(idip);
		unit_address = ddi_get_name_addr(idip);

		if (pdip) {
			major = ddi_driver_major(idip);
			cdip = find_alternate_node(pdip, major);
			ndi_rele_devi(pdip);
			if (cdip) {
				nodename = ddi_node_name(cdip);
			}
		}

		/*
		 * node name + unitaddr to the prom_path
		 */
		(void) strcat(prom_path, "/");
		(void) strcat(prom_path, nodename);
		if (unit_address && (*unit_address)) {
			(void) strcat(prom_path, "@");
			(void) strcat(prom_path, unit_address);
		}
		pdip = cdip;
	}

	if (pdip) {
		ndi_rele_devi(pdip); /* hold from find_alternate_node */
	}
	/*
	 * Now pdip is the alternate node which is same hierarchy as dip
	 * if it exists.
	 */
	*alt_dipp = pdip;
out:
	if (parinfo) {
		/* release holds from get_parent() */
		for (depth = old_depth; depth > 0; depth--) {
			info = &parinfo[depth - 1];
			if (info && info->pdip)
				ndi_rele_devi(info->pdip);
		}
		kmem_free(parinfo, OBP_STACKDEPTH * sizeof (*parinfo));
	}
	return (ret);
}
示例#9
0
/*
 * translate a devfs pathname to one that will be acceptable
 * by the prom.  In most cases, there is no translation needed.
 * For systems supporting generically named devices, the prom
 * may support nodes such as 'disk' that do not have any unit
 * address information (i.e. target,lun info).  If this is the
 * case, the ddi framework will reject the node as invalid and
 * populate the devinfo tree with nodes froms the .conf file
 * (e.g. sd).  In this case, the names that show up in /devices
 * are sd - since the prom only knows about 'disk' nodes, this
 * routine detects this situation and does the conversion
 * There are also cases such as pluto where the disk node in the
 * prom is named "SUNW,ssd" but in /devices the name is "ssd".
 *
 * If MPxIO is enabled, the translation involves following
 * pathinfo nodes to the "best" parent.
 *
 * return a 0 on success with the new device string in ret_buf.
 * Otherwise return the appropriate error code as we may be called
 * from the openprom driver.
 */
int
i_devname_to_promname(char *dev_name, char *ret_buf, size_t len)
{
	dev_info_t *dip, *pdip, *cdip, *alt_dip = NULL;
	mdi_pathinfo_t *pip = NULL;
	char *dev_path, *prom_path;
	char *unit_address, *minorname, *nodename;
	major_t major;
	char *rptr, *optr, *offline;
	size_t olen, rlen;
	int circ;
	int ret = 0;

	/* do some sanity checks */
	if ((dev_name == NULL) || (ret_buf == NULL) ||
	    (strlen(dev_name) > MAXPATHLEN)) {
		return (EINVAL);
	}

	/*
	 * Convert to a /devices name. Fail the translation if
	 * the name doesn't exist.
	 */
	dev_path = kmem_zalloc(MAXPATHLEN, KM_SLEEP);
	if (resolve_devfs_name(dev_name, dev_path) != 0 ||
	    strncmp(dev_path, "/devices/", 9) != 0) {
		kmem_free(dev_path, MAXPATHLEN);
		return (EINVAL);
	}
	dev_name = dev_path + sizeof ("/devices") - 1;

	bzero(ret_buf, len);

	if (prom_finddevice(dev_name) != OBP_BADNODE) {
		/* we are done */
		(void) snprintf(ret_buf, len, "%s", dev_name);
		kmem_free(dev_path, MAXPATHLEN);
		return (0);
	}

	/*
	 * if we get here, then some portion of the device path is
	 * not understood by the prom.  We need to look for alternate
	 * names (e.g. replace ssd by disk) and mpxio enabled devices.
	 */
	dip = e_ddi_hold_devi_by_path(dev_name, 0);
	if (dip == NULL) {
		cmn_err(CE_NOTE, "cannot find dip for %s", dev_name);
		kmem_free(dev_path, MAXPATHLEN);
		return (EINVAL);
	}

	prom_path = kmem_zalloc(MAXPATHLEN, KM_SLEEP);
	rlen = len;
	rptr = ret_buf;

	if (!MDI_CLIENT(dip)) {
		ret = i_devi_to_promname(dip, prom_path, &alt_dip);
		if (ret == 0) {
			minorname = strrchr(dev_name, ':');
			if (minorname && (minorname[1] != '\0')) {
				(void) strcat(prom_path, minorname);
			}
			(void) snprintf(rptr, rlen, "%s", prom_path);
		}
	} else {
		/*
		 * if get to here, means dip is a vhci client
		 */
		offline = kmem_zalloc(len, KM_SLEEP); /* offline paths */
		olen = len;
		optr = offline;
		/*
		 * The following code assumes that the phci client is at leaf
		 * level.
		 */
		ndi_devi_enter(dip, &circ);
		while ((pip = mdi_get_next_phci_path(dip, pip)) != NULL) {
			/*
			 * walk all paths associated to the client node
			 */
			bzero(prom_path, MAXPATHLEN);

			/*
			 * replace with mdi_hold_path() when mpxio goes into
			 * genunix
			 */
			MDI_PI_LOCK(pip);
			MDI_PI_HOLD(pip);
			MDI_PI_UNLOCK(pip);

			if (mdi_pi_pathname_obp(pip, prom_path) != NULL) {
				/*
				 * The path has different obp path
				 */
				goto minor_pathinfo;
			}

			pdip = mdi_pi_get_phci(pip);
			ndi_hold_devi(pdip);

			/*
			 * Get obp path name of the phci node firstly.
			 * NOTE: if the alternate node of pdip exists,
			 * the third argument of the i_devi_to_promname()
			 * would be set to the alternate node.
			 */
			(void) i_devi_to_promname(pdip, prom_path, &alt_dip);
			if (alt_dip != NULL) {
				ndi_rele_devi(pdip);
				pdip = alt_dip;
				ndi_hold_devi(pdip);
			}

			nodename = ddi_node_name(dip);
			unit_address = MDI_PI(pip)->pi_addr;

			major = ddi_driver_major(dip);
			cdip = find_alternate_node(pdip, major);

			if (cdip) {
				nodename = ddi_node_name(cdip);
			}
			/*
			 * node name + unitaddr to the prom_path
			 */
			(void) strcat(prom_path, "/");
			(void) strcat(prom_path, nodename);
			if (unit_address && (*unit_address)) {
				(void) strcat(prom_path, "@");
				(void) strcat(prom_path, unit_address);
			}
			if (cdip) {
				/* hold from find_alternate_node */
				ndi_rele_devi(cdip);
			}
			ndi_rele_devi(pdip);
minor_pathinfo:
			minorname = strrchr(dev_name, ':');
			if (minorname && (minorname[1] != '\0')) {
				(void) strcat(prom_path, minorname);
			}

			if (MDI_PI_IS_ONLINE(pip)) {
				(void) snprintf(rptr, rlen, "%s", prom_path);
				rlen -= strlen(rptr) + 1;
				rptr += strlen(rptr) + 1;
				if (rlen <= 0) /* drop paths we can't store */
					break;
			} else {	/* path is offline */
				(void) snprintf(optr, olen, "%s", prom_path);
				olen -= strlen(optr) + 1;
				if (olen > 0) /* drop paths we can't store */
					optr += strlen(optr) + 1;
			}
			MDI_PI_LOCK(pip);
			MDI_PI_RELE(pip);
			if (MDI_PI(pip)->pi_ref_cnt == 0)
				cv_broadcast(&MDI_PI(pip)->pi_ref_cv);
			MDI_PI_UNLOCK(pip);
		}
		ndi_devi_exit(dip, circ);
		ret = 0;
		if (rlen > 0) {
			/* now add as much of offline to ret_buf as possible */
			bcopy(offline, rptr, rlen);
		}
		kmem_free(offline, len);
	}
	/* release hold from e_ddi_hold_devi_by_path() */
	ndi_rele_devi(dip);
	ret_buf[len - 1] = '\0';
	ret_buf[len - 2] = '\0';
	kmem_free(dev_path, MAXPATHLEN);
	kmem_free(prom_path, MAXPATHLEN);

	return (ret);
}
示例#10
0
文件: devctl.c 项目: andreiw/polaris
/*
 * Search the devid cache, returning dev_t list for all
 * device paths mapping to the device identified by the
 * given devid.
 *
 * Primary interface used by ddi_lyr_devid_to_devlist()
 */
int
e_devid_cache_to_devt_list(ddi_devid_t devid, char *minor_name,
	int *retndevts, dev_t **retdevts)
{
	char		*path, **paths;
	int		i, j, n;
	dev_t		*devts, *udevts;
	dev_t		tdevt;
	int		ndevts, undevts, ndevts_alloced;
	dev_info_t	*devi, **devis;
	int		ndevis, npaths, nalloced;
	ddi_devid_t	match_devid;

	DEVID_LOG_FIND(("find", devid, NULL));

	ASSERT(ddi_devid_valid(devid) == DDI_SUCCESS);
	if (ddi_devid_valid(devid) != DDI_SUCCESS) {
		DEVID_LOG_ERR(("invalid devid", devid, NULL));
		return (DDI_FAILURE);
	}

	nalloced = 128;

	for (;;) {
		paths = kmem_zalloc(nalloced * sizeof (char *), KM_SLEEP);
		devis = kmem_zalloc(nalloced * sizeof (dev_info_t *), KM_SLEEP);

		rw_enter(&dcfd->nvf_lock, RW_READER);
		n = e_devid_cache_devi_path_lists(devid, nalloced,
			&ndevis, devis, &npaths, paths);
		if (n <= nalloced)
			break;
		rw_exit(&dcfd->nvf_lock);
		for (i = 0; i < ndevis; i++)
			ndi_rele_devi(devis[i]);
		kmem_free(paths, nalloced * sizeof (char *));
		kmem_free(devis, nalloced * sizeof (dev_info_t *));
		nalloced = n + 128;
	}

	for (i = 0; i < npaths; i++) {
		path = i_ddi_strdup(paths[i], KM_SLEEP);
		paths[i] = path;
	}
	rw_exit(&dcfd->nvf_lock);

	if (ndevis == 0 && npaths == 0) {
		DEVID_LOG_ERR(("no devid found", devid, NULL));
		kmem_free(paths, nalloced * sizeof (char *));
		kmem_free(devis, nalloced * sizeof (dev_info_t *));
		return (DDI_FAILURE);
	}

	ndevts_alloced = 128;
restart:
	ndevts = 0;
	devts = kmem_alloc(ndevts_alloced * sizeof (dev_t), KM_SLEEP);
	for (i = 0; i < ndevis; i++) {
		ASSERT(!DEVI_IS_ATTACHING(devis[i]));
		ASSERT(!DEVI_IS_DETACHING(devis[i]));
		e_devid_minor_to_devlist(devis[i], minor_name,
			ndevts_alloced, &ndevts, devts);
		if (ndevts > ndevts_alloced) {
			kmem_free(devts, ndevts_alloced * sizeof (dev_t));
			ndevts_alloced += 128;
			goto restart;
		}
	}
	for (i = 0; i < npaths; i++) {
		DEVID_LOG_LOOKUP((CE_CONT, "lookup %s\n", paths[i]));
		devi = e_ddi_hold_devi_by_path(paths[i], 0);
		if (devi == NULL) {
			DEVID_LOG_STALE(("stale device reference",
			    devid, paths[i]));
			continue;
		}
		/*
		 * Verify the newly attached device registered a matching devid
		 */
		if (i_ddi_devi_get_devid(DDI_DEV_T_ANY, devi,
		    &match_devid) != DDI_SUCCESS) {
			DEVIDERR((CE_CONT,
			    "%s: no devid registered on attach\n",
			    paths[i]));
			ddi_release_devi(devi);
			continue;
		}

		if (ddi_devid_compare(devid, match_devid) != 0) {
			DEVID_LOG_STALE(("new devid registered",
			    devid, paths[i]));
			ddi_release_devi(devi);
			ddi_devid_free(match_devid);
			continue;
		}
		ddi_devid_free(match_devid);

		e_devid_minor_to_devlist(devi, minor_name,
			ndevts_alloced, &ndevts, devts);
		ddi_release_devi(devi);
		if (ndevts > ndevts_alloced) {
			kmem_free(devts,
			    ndevts_alloced * sizeof (dev_t));
			ndevts_alloced += 128;
			goto restart;
		}
	}

	/* drop hold from e_devid_cache_devi_path_lists */
	for (i = 0; i < ndevis; i++) {
		ndi_rele_devi(devis[i]);
	}
	for (i = 0; i < npaths; i++) {
		kmem_free(paths[i], strlen(paths[i]) + 1);
	}
	kmem_free(paths, nalloced * sizeof (char *));
	kmem_free(devis, nalloced * sizeof (dev_info_t *));

	if (ndevts == 0) {
		DEVID_LOG_ERR(("no devid found", devid, NULL));
		kmem_free(devts, ndevts_alloced * sizeof (dev_t));
		return (DDI_FAILURE);
	}

	/*
	 * Build the final list of sorted dev_t's with duplicates collapsed so
	 * returned results are consistent. This prevents implementation
	 * artifacts from causing unnecessary changes in SVM namespace.
	 */
	/* bubble sort */
	for (i = 0; i < (ndevts - 1); i++) {
		for (j = 0; j < ((ndevts - 1) - i); j++) {
			if (devts[j + 1] < devts[j]) {
				tdevt = devts[j];
				devts[j] = devts[j + 1];
				devts[j + 1] = tdevt;
			}
		}
	}

	/* determine number of unique values */
	for (undevts = ndevts, i = 1; i < ndevts; i++) {
		if (devts[i - 1] == devts[i])
			undevts--;
	}

	/* allocate unique */
	udevts = kmem_alloc(undevts * sizeof (dev_t), KM_SLEEP);

	/* copy unique */
	udevts[0] = devts[0];
	for (i = 1, j = 1; i < ndevts; i++) {
		if (devts[i - 1] != devts[i])
			udevts[j++] = devts[i];
	}
	ASSERT(j == undevts);

	kmem_free(devts, ndevts_alloced * sizeof (dev_t));

	*retndevts = undevts;
	*retdevts = udevts;

	return (DDI_SUCCESS);
}
示例#11
0
文件: devctl.c 项目: andreiw/polaris
/*
 * Search for cached entries matching a devid
 * Return two lists:
 *	a list of dev_info nodes, for those devices in the attached state
 *	a list of pathnames whose instances registered the given devid
 * If the lists passed in are not sufficient to return the matching
 * references, return the size of lists required.
 * The dev_info nodes are returned with a hold that the caller must release.
 */
static int
e_devid_cache_devi_path_lists(ddi_devid_t devid, int retmax,
	int *retndevis, dev_info_t **retdevis, int *retnpaths, char **retpaths)
{
	nvp_devid_t *np;
	int ndevis, npaths;
	dev_info_t *dip, *pdip;
	int circ;
	int maxdevis = 0;
	int maxpaths = 0;

	ndevis = 0;
	npaths = 0;
	for (np = NVF_DEVID_LIST(dcfd); np; np = NVP_DEVID_NEXT(np)) {
		if (np->nvp_devid == NULL)
			continue;
		if (ddi_devid_valid(np->nvp_devid) != DDI_SUCCESS) {
			DEVIDERR((CE_CONT,
			    "find: invalid devid %s\n",
			    np->nvp_devpath));
			continue;
		}
		if (ddi_devid_compare(devid, np->nvp_devid) == 0) {
			DEVID_DEBUG2((CE_CONT,
			    "find: devid match: %s 0x%x\n",
			    np->nvp_devpath, np->nvp_flags));
			DEVID_LOG_MATCH(("find", devid, np->nvp_devpath));
			DEVID_LOG_PATHS((CE_CONT, "%s\n", np->nvp_devpath));

			/*
			 * Check if we have a cached devinfo reference for this
			 * devid.  Place a hold on it to prevent detach
			 * Otherwise, use the path instead.
			 * Note: returns with a hold on each dev_info
			 * node in the list.
			 */
			dip = NULL;
			if (np->nvp_flags & NVP_DEVID_DIP) {
				pdip = ddi_get_parent(np->nvp_dip);
				if (ndi_devi_tryenter(pdip, &circ)) {
					dip = np->nvp_dip;
					ndi_hold_devi(dip);
					ndi_devi_exit(pdip, circ);
					ASSERT(!DEVI_IS_ATTACHING(dip));
					ASSERT(!DEVI_IS_DETACHING(dip));
				} else {
					DEVID_LOG_DETACH((CE_CONT,
					    "may be detaching: %s\n",
					    np->nvp_devpath));
				}
			}

			if (dip) {
				if (ndevis < retmax) {
					retdevis[ndevis++] = dip;
				} else {
					ndi_rele_devi(dip);
				}
				maxdevis++;
			} else {
				if (npaths < retmax)
					retpaths[npaths++] = np->nvp_devpath;
				maxpaths++;
			}
		}
	}

	*retndevis = ndevis;
	*retnpaths = npaths;
	return (maxdevis > maxpaths ? maxdevis : maxpaths);
}
示例#12
0
static void
dr_resume_devices(dev_info_t *start, dr_sr_handle_t *srh)
{
	dr_handle_t	*handle;
	dev_info_t	*dip, *next, *last = NULL;
	major_t		major;
	char		*bn;
	int		circ;

	major = (major_t)-1;

	/* attach in reverse device tree order */
	while (last != start) {
		dip = start;
		next = ddi_get_next_sibling(dip);
		while (next != last && dip != srh->sr_failed_dip) {
			dip = next;
			next = ddi_get_next_sibling(dip);
		}
		if (dip == srh->sr_failed_dip) {
			/* release hold acquired in dr_suspend_devices() */
			srh->sr_failed_dip = NULL;
			ndi_rele_devi(dip);
		} else if (dr_is_real_device(dip) &&
				srh->sr_failed_dip == NULL) {

			if ((bn = ddi_binding_name(dip)) != NULL) {
				major = ddi_name_to_major(bn);
			} else {
				bn = "<null>";
			}
			if (!dr_bypass_device(bn) &&
				!drmach_verify_sr(dip, 0)) {
				char	d_name[40], d_alias[40], *d_info;

				d_name[0] = 0;
				d_info = ddi_get_name_addr(dip);
				if (d_info == NULL)
					d_info = "<null>";

				if (!dr_resolve_devname(dip, d_name,
								d_alias)) {
					if (d_alias[0] != 0) {
						prom_printf("\tresuming "
							"%s@%s (aka %s)\n",
							d_name, d_info,
							d_alias);
					} else {
						prom_printf("\tresuming "
							"%s@%s\n",
							d_name, d_info);
					}
				} else {
					prom_printf("\tresuming %s@%s\n",
						bn, d_info);
				}

				if (devi_attach(dip, DDI_RESUME) !=
							DDI_SUCCESS) {
					/*
					 * Print a console warning,
					 * set an e_code of ESBD_RESUME,
					 * and save the driver major
					 * number in the e_rsc.
					 */
					prom_printf("\tFAILED to resume %s@%s",
					    d_name[0] ? d_name : bn, d_info);

					srh->sr_err_idx =
						dr_add_int(srh->sr_err_ints,
						srh->sr_err_idx, DR_MAX_ERR_INT,
						(uint64_t)major);

					handle = srh->sr_dr_handlep;

					dr_op_err(CE_IGNORE, handle,
					    ESBD_RESUME, "%s@%s",
					    d_name[0] ? d_name : bn, d_info);
				}
			}
		}

		/* Hold parent busy while walking its children */
		ndi_devi_enter(dip, &circ);
		dr_resume_devices(ddi_get_child(dip), srh);
		ndi_devi_exit(dip, circ);
		last = dip;
	}
}
示例#13
0
/*ARGSUSED*/
static void
fipe_ioat_alloc(void *arg)
{
	int idx, flags, rc = 0;
	uint64_t physaddr;
	boolean_t fatal = B_FALSE;
	dcopy_query_t info;
	dcopy_handle_t handle;
	dcopy_cmd_t cmds[FIPE_IOAT_CMD_NUM + 1];

	mutex_enter(&fipe_ioat_ctrl.ioat_lock);
	/*
	 * fipe_ioat_alloc() is called in DEVICE ATTACH context when loaded.
	 * In DEVICE ATTACH context, it can't call ddi_walk_devs(), so just
	 * schedule a timer and exit.
	 */
	if (fipe_ioat_ctrl.ioat_try_alloc == B_FALSE) {
		fipe_ioat_ctrl.ioat_try_alloc = B_TRUE;
		mutex_exit(&fipe_ioat_ctrl.ioat_lock);
		goto out_error;
	}

	/*
	 * Check whether device has been initialized or if it encountered
	 * some permanent error.
	 */
	if (fipe_ioat_ctrl.ioat_ready || fipe_ioat_ctrl.ioat_failed ||
	    fipe_ioat_ctrl.ioat_cancel) {
		fipe_ioat_ctrl.ioat_timerid = 0;
		mutex_exit(&fipe_ioat_ctrl.ioat_lock);
		return;
	}

	if (fipe_ioat_ctrl.ioat_dev_info == NULL) {
		/* Find dev_info_t for IOAT engine. */
		ddi_walk_devs(ddi_root_node(), fipe_search_ioat_dev, NULL);
		if (fipe_ioat_ctrl.ioat_dev_info == NULL) {
			cmn_err(CE_NOTE,
			    "!fipe: no IOAT hardware found, disable pm.");
			mutex_exit(&fipe_ioat_ctrl.ioat_lock);
			fatal = B_TRUE;
			goto out_error;
		}
	}
	mutex_exit(&fipe_ioat_ctrl.ioat_lock);

	/* Check, allocate and initialize IOAT resources with lock released. */
	dcopy_query(&info);
	if (info.dq_version < DCOPY_QUERY_V0) {
		/* Permanent error, give up. */
		cmn_err(CE_WARN, "!fipe: IOAT driver version mismatch.");
		fatal = B_TRUE;
		goto out_error;
	} else if (info.dq_num_channels == 0) {
		/* IOAT driver hasn't been loaded, keep trying. */
		goto out_error;
	}

	/* Allocate IOAT channel. */
	rc = dcopy_alloc(DCOPY_NOSLEEP, &handle);
	if (rc == DCOPY_NORESOURCES) {
		/* Resource temporarily not available, keep trying. */
		goto out_error;
	} else if (rc != DCOPY_SUCCESS) {
		/* Permanent error, give up. */
		cmn_err(CE_WARN, "!fipe: failed to allocate IOAT channel.");
		fatal = B_TRUE;
		goto out_error;
	}

	/*
	 * Allocate multiple IOAT commands and organize them into a ring to
	 * loop forever. Commands number is determined by IOAT descriptor size
	 * and memory interleave pattern.
	 * cmd[0] is used break the loop and disable IOAT operation.
	 * cmd[1, FIPE_IOAT_CMD_NUM] are grouped into a ring and cmd[1] is the
	 * list head.
	 */
	bzero(cmds, sizeof (cmds));
	physaddr = fipe_ioat_ctrl.ioat_buf_physaddr;
	for (idx = FIPE_IOAT_CMD_NUM; idx >= 0; idx--) {
		/* Allocate IOAT commands. */
		if (idx == 0 || idx == FIPE_IOAT_CMD_NUM) {
			flags = DCOPY_NOSLEEP;
		} else {
			/*
			 * To link commands into a list, the initial value of
			 * cmd need to be set to next cmd on list.
			 */
			flags = DCOPY_NOSLEEP | DCOPY_ALLOC_LINK;
			cmds[idx] = cmds[idx + 1];
		}
		rc = dcopy_cmd_alloc(handle, flags, &cmds[idx]);
		if (rc == DCOPY_NORESOURCES) {
			goto out_freecmd;
		} else if (rc != DCOPY_SUCCESS) {
			/* Permanent error, give up. */
			cmn_err(CE_WARN,
			    "!fipe: failed to allocate IOAT command.");
			fatal = B_TRUE;
			goto out_freecmd;
		}

		/* Disable src/dst snoop to improve CPU cache efficiency. */
		cmds[idx]->dp_flags = DCOPY_CMD_NOSRCSNP | DCOPY_CMD_NODSTSNP;
		/* Specially handle commands on the list. */
		if (idx != 0) {
			/* Disable IOAT status. */
			cmds[idx]->dp_flags |= DCOPY_CMD_NOSTAT;
			/* Disable waiting for resources. */
			cmds[idx]->dp_flags |= DCOPY_CMD_NOWAIT;
			if (idx == 1) {
				/* The list head, chain command into loop. */
				cmds[idx]->dp_flags |= DCOPY_CMD_LOOP;
			} else {
				/* Queue all other commands except head. */
				cmds[idx]->dp_flags |= DCOPY_CMD_QUEUE;
			}
		}
		cmds[idx]->dp_cmd = DCOPY_CMD_COPY;
		cmds[idx]->dp.copy.cc_source = physaddr;
		cmds[idx]->dp.copy.cc_dest = physaddr + FIPE_MC_MEMORY_OFFSET;
		if (idx == 0) {
			/*
			 * Command 0 is used to cancel memory copy by breaking
			 * the ring created in fipe_ioat_trigger().
			 * For efficiency, use the smallest memory copy size.
			 */
			cmds[idx]->dp.copy.cc_size = 1;
		} else {
			cmds[idx]->dp.copy.cc_size = FIPE_MC_MEMORY_SIZE;
		}
	}

	/* Update IOAT control status if it hasn't been initialized yet. */
	mutex_enter(&fipe_ioat_ctrl.ioat_lock);
	if (!fipe_ioat_ctrl.ioat_ready && !fipe_ioat_ctrl.ioat_cancel) {
		fipe_ioat_ctrl.ioat_handle = handle;
		for (idx = 0; idx <= FIPE_IOAT_CMD_NUM; idx++) {
			fipe_ioat_ctrl.ioat_cmds[idx] = cmds[idx];
		}
		fipe_ioat_ctrl.ioat_ready = B_TRUE;
		fipe_ioat_ctrl.ioat_failed = B_FALSE;
		fipe_ioat_ctrl.ioat_timerid = 0;
		mutex_exit(&fipe_ioat_ctrl.ioat_lock);
		return;
	}
	mutex_exit(&fipe_ioat_ctrl.ioat_lock);
	/* Initialized by another thread, fall through to free resources. */

out_freecmd:
	if (cmds[0] != NULL) {
		dcopy_cmd_free(&cmds[0]);
	}
	/* Only need to free head, dcopy will free all commands on the list. */
	for (idx = 1; idx <= FIPE_IOAT_CMD_NUM; idx++) {
		if (cmds[idx] != NULL) {
			dcopy_cmd_free(&cmds[idx]);
			break;
		}
	}
	dcopy_free(&handle);

out_error:
	mutex_enter(&fipe_ioat_ctrl.ioat_lock);
	fipe_ioat_ctrl.ioat_timerid = 0;
	if (!fipe_ioat_ctrl.ioat_ready && !fipe_ioat_ctrl.ioat_cancel) {
		if (fatal) {
			/* Mark permanent error and give up. */
			fipe_ioat_ctrl.ioat_failed = B_TRUE;
			/* Release reference count hold by ddi_find_devinfo. */
			if (fipe_ioat_ctrl.ioat_dev_info != NULL) {
				ndi_rele_devi(fipe_ioat_ctrl.ioat_dev_info);
				fipe_ioat_ctrl.ioat_dev_info = NULL;
			}
		} else {
			/*
			 * Schedule another timer to keep on trying.
			 * timeout() should always success, no need to check.
			 */
			fipe_ioat_ctrl.ioat_timerid = timeout(fipe_ioat_alloc,
			    NULL, drv_usectohz(FIPE_IOAT_RETRY_INTERVAL));
		}
	}
	mutex_exit(&fipe_ioat_ctrl.ioat_lock);
}
示例#14
0
/*ARGSUSED*/
static void
fipe_ioat_alloc(void *arg)
{
	int rc = 0, nregs;
	dev_info_t *dip;
	ddi_device_acc_attr_t attr;
	boolean_t fatal = B_FALSE;

	mutex_enter(&fipe_ioat_ctrl.ioat_lock);
	/*
	 * fipe_ioat_alloc() is called in DEVICE ATTACH context when loaded.
	 * In DEVICE ATTACH context, it can't call ddi_walk_devs(), so just
	 * schedule a timer and exit.
	 */
	if (fipe_ioat_ctrl.ioat_try_alloc == B_FALSE) {
		fipe_ioat_ctrl.ioat_try_alloc = B_TRUE;
		goto out_error;
	}

	/* Check whether has been initialized or encountered permanent error. */
	if (fipe_ioat_ctrl.ioat_ready || fipe_ioat_ctrl.ioat_failed ||
	    fipe_ioat_ctrl.ioat_cancel) {
		fipe_ioat_ctrl.ioat_timerid = 0;
		mutex_exit(&fipe_ioat_ctrl.ioat_lock);
		return;
	}

	if (fipe_ioat_ctrl.ioat_dev_info == NULL) {
		/* Find dev_info_t for IOAT engine. */
		ddi_walk_devs(ddi_root_node(), fipe_search_ioat_dev, NULL);
		if (fipe_ioat_ctrl.ioat_dev_info == NULL) {
			cmn_err(CE_NOTE,
			    "!fipe: no IOAT hardware found, disable pm.");
			fatal = B_TRUE;
			goto out_error;
		}
	}

	/* Map in IOAT control register window. */
	ASSERT(fipe_ioat_ctrl.ioat_dev_info != NULL);
	ASSERT(fipe_ioat_ctrl.ioat_reg_mapped == B_FALSE);
	dip = fipe_ioat_ctrl.ioat_dev_info;
	if (ddi_dev_nregs(dip, &nregs) != DDI_SUCCESS || nregs < 2) {
		cmn_err(CE_WARN, "!fipe: ioat has not enough register bars.");
		fatal = B_TRUE;
		goto out_error;
	}
	attr.devacc_attr_version = DDI_DEVICE_ATTR_V0;
	attr.devacc_attr_endian_flags = DDI_NEVERSWAP_ACC;
	attr.devacc_attr_dataorder = DDI_STRICTORDER_ACC;
	rc = ddi_regs_map_setup(dip, 1,
	    (caddr_t *)&fipe_ioat_ctrl.ioat_reg_addr,
	    0, 0, &attr, &fipe_ioat_ctrl.ioat_reg_handle);
	if (rc != DDI_SUCCESS) {
		cmn_err(CE_WARN, "!fipe: failed to map IOAT registeres.");
		fatal = B_TRUE;
		goto out_error;
	}

	/* Mark IOAT status. */
	fipe_ioat_ctrl.ioat_reg_mapped = B_TRUE;
	fipe_ioat_ctrl.ioat_ready = B_TRUE;
	fipe_ioat_ctrl.ioat_failed = B_FALSE;
	fipe_ioat_ctrl.ioat_timerid = 0;
	mutex_exit(&fipe_ioat_ctrl.ioat_lock);

	return;

out_error:
	fipe_ioat_ctrl.ioat_timerid = 0;
	if (!fipe_ioat_ctrl.ioat_ready && !fipe_ioat_ctrl.ioat_cancel) {
		if (fatal) {
			/* Mark permanent error and give up. */
			fipe_ioat_ctrl.ioat_failed = B_TRUE;
			/* Release reference count hold by ddi_find_devinfo. */
			if (fipe_ioat_ctrl.ioat_dev_info != NULL) {
				ndi_rele_devi(fipe_ioat_ctrl.ioat_dev_info);
				fipe_ioat_ctrl.ioat_dev_info = NULL;
			}
		} else {
			/*
			 * Schedule another timer to keep on trying.
			 * timeout() should always succeed, no need to check
			 * return.
			 */
			fipe_ioat_ctrl.ioat_timerid = timeout(fipe_ioat_alloc,
			    NULL, drv_usectohz(FIPE_IOAT_RETRY_INTERVAL));
		}
	}
	mutex_exit(&fipe_ioat_ctrl.ioat_lock);
}
示例#15
0
/*
 * Build the reserved ISA irq list, and store it in the table pointed to by
 * reserved_irqs_table. The caller is responsible for allocating this table
 * with a minimum of MAX_ISA_IRQ + 1 entries.
 *
 * The routine looks in the device tree at the subtree rooted at /isa
 * for each of the devices under that node, if an interrupts property
 * is present, its values are used to "reserve" irqs so that later ACPI
 * configuration won't choose those irqs.
 *
 * In addition, if acpi_irq_check_elcr is set, will use ELCR register
 * to identify reserved IRQs.
 */
void
build_reserved_irqlist(uchar_t *reserved_irqs_table)
{
	dev_info_t *isanode = ddi_find_devinfo("isa", -1, 0);
	dev_info_t *isa_child = 0;
	int i;
	uint_t	elcrval;

	/* Initialize the reserved ISA IRQs: */
	for (i = 0; i <= MAX_ISA_IRQ; i++)
		reserved_irqs_table[i] = 0;

	if (acpi_irq_check_elcr) {

		elcrval = (inb(ELCR_PORT2) << 8) | (inb(ELCR_PORT1));
		if (ELCR_EDGE(elcrval, 0) && ELCR_EDGE(elcrval, 1) &&
		    ELCR_EDGE(elcrval, 2) && ELCR_EDGE(elcrval, 8) &&
		    ELCR_EDGE(elcrval, 13)) {
			/* valid ELCR */
			for (i = 0; i <= MAX_ISA_IRQ; i++)
				if (!ELCR_LEVEL(elcrval, i))
					reserved_irqs_table[i] = 1;
		}
	}

	/* always check the isa devinfo nodes */

	if (isanode != 0) { /* Found ISA */
		uint_t intcnt;		/* Interrupt count */
		int *intrs;		/* Interrupt values */

		/* Load first child: */
		isa_child = ddi_get_child(isanode);
		while (isa_child != 0) { /* Iterate over /isa children */
			/* if child has any interrupts, save them */
			if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, isa_child,
			    DDI_PROP_DONTPASS, "interrupts", &intrs, &intcnt)
			    == DDI_PROP_SUCCESS) {
				/*
				 * iterate over child interrupt list, adding
				 * them to the reserved irq list
				 */
				while (intcnt-- > 0) {
					/*
					 * Each value MUST be <= MAX_ISA_IRQ
					 */

					if ((intrs[intcnt] > MAX_ISA_IRQ) ||
					    (intrs[intcnt] < 0))
						continue;

					reserved_irqs_table[intrs[intcnt]] = 1;
				}
				ddi_prop_free(intrs);
			}
			isa_child = ddi_get_next_sibling(isa_child);
		}
		/* The isa node was held by ddi_find_devinfo, so release it */
		ndi_rele_devi(isanode);
	}

	/*
	 * Reserve IRQ14 & IRQ15 for IDE.  It shouldn't be hard-coded
	 * here but there's no other way to find the irqs for
	 * legacy-mode ata (since it's hard-coded in pci-ide also).
	 */
	reserved_irqs_table[14] = 1;
	reserved_irqs_table[15] = 1;
}
static void
sbdp_resume_devices(dev_info_t *start, sbdp_sr_handle_t *srh)
{
	int circ;
	dev_info_t	*dip, *next, *last = NULL;
	char		*bn;
	sbd_error_t	*sep;

	sep = &srh->sep;

	/* attach in reverse device tree order */
	while (last != start) {
		dip = start;
		next = ddi_get_next_sibling(dip);
		while (next != last && dip != SR_FAILED_DIP(srh)) {
			dip = next;
			next = ddi_get_next_sibling(dip);
		}
		if (dip == SR_FAILED_DIP(srh)) {
			/* Release hold acquired in sbdp_suspend_devices() */
			ndi_rele_devi(dip);
			SR_FAILED_DIP(srh) = NULL;
		} else if (sbdp_is_real_device(dip) &&
				SR_FAILED_DIP(srh) == NULL) {

			if (DEVI(dip)->devi_binding_name != NULL) {
				bn = ddi_binding_name(dip);
			}
#ifdef DEBUG
			if (!sbdp_bypass_device(bn)) {
#else
			{
#endif
				char	d_name[40], d_alias[40], *d_info;

				d_name[0] = 0;
				d_info = ddi_get_name_addr(dip);
				if (d_info == NULL)
					d_info = "<null>";

				if (!sbdp_resolve_devname(dip, d_name,
								d_alias)) {
					if (d_alias[0] != 0) {
						SBDP_DBG_QR("\tresuming "
							"%s@%s (aka %s)\n",
							d_name, d_info,
							d_alias);
					} else {
						SBDP_DBG_QR("\tresuming "
							"%s@%s\n",
							d_name, d_info);
					}
				} else {
					SBDP_DBG_QR("\tresuming %s@%s\n",
						bn, d_info);
				}

				if (devi_attach(dip, DDI_RESUME) !=
							DDI_SUCCESS) {
					/*
					 * Print a console warning,
					 * set an errno of ESGT_RESUME,
					 * and save the driver major
					 * number in the e_str.
					 */

					(void) sprintf(sbdp_get_err_buf(sep),
					    "%s@%s",
					    d_name[0] ? d_name : bn, d_info);
					SBDP_DBG_QR("\tFAILED to resume "
						"%s\n", sbdp_get_err_buf(sep));
					sbdp_set_err(sep,
					    ESGT_RESUME, NULL);
				}
			}
		}
		ndi_devi_enter(dip, &circ);
		sbdp_resume_devices(ddi_get_child(dip), srh);
		ndi_devi_exit(dip, circ);
		last = dip;
	}
}

/*
 * True if thread is virtually stopped.  Similar to CPR_VSTOPPED
 * but from DR point of view.  These user threads are waiting in
 * the kernel.  Once they return from kernel, they will process
 * the stop signal and stop.
 */
#define	SBDP_VSTOPPED(t)			\
	((t)->t_state == TS_SLEEP &&		\
	(t)->t_wchan != NULL &&			\
	(t)->t_astflag &&		\
	((t)->t_proc_flag & TP_CHKPT))


static int
sbdp_stop_user_threads(sbdp_sr_handle_t *srh)
{
	int		count;
	char		cache_psargs[PSARGSZ];
	kthread_id_t	cache_tp;
	uint_t		cache_t_state;
	int		bailout;
	sbd_error_t	*sep;
	kthread_id_t 	tp;

	extern void add_one_utstop();
	extern void utstop_timedwait(clock_t);
	extern void utstop_init(void);

#define	SBDP_UTSTOP_RETRY	4
#define	SBDP_UTSTOP_WAIT	hz

	if (sbdp_skip_user_threads)
		return (DDI_SUCCESS);

	sep = &srh->sep;
	ASSERT(sep);

	utstop_init();

	/* we need to try a few times to get past fork, etc. */
	for (count = 0; count < SBDP_UTSTOP_RETRY; count++) {
		/* walk the entire threadlist */
		mutex_enter(&pidlock);
		for (tp = curthread->t_next; tp != curthread; tp = tp->t_next) {
			proc_t *p = ttoproc(tp);

			/* handle kernel threads separately */
			if (p->p_as == &kas || p->p_stat == SZOMB)
				continue;

			mutex_enter(&p->p_lock);
			thread_lock(tp);

			if (tp->t_state == TS_STOPPED) {
				/* add another reason to stop this thread */
				tp->t_schedflag &= ~TS_RESUME;
			} else {
				tp->t_proc_flag |= TP_CHKPT;

				thread_unlock(tp);
				mutex_exit(&p->p_lock);
				add_one_utstop();
				mutex_enter(&p->p_lock);
				thread_lock(tp);

				aston(tp);

				if (ISWAKEABLE(tp) || ISWAITING(tp)) {
					setrun_locked(tp);
				}
			}

			/* grab thread if needed */
			if (tp->t_state == TS_ONPROC && tp->t_cpu != CPU)
				poke_cpu(tp->t_cpu->cpu_id);


			thread_unlock(tp);
			mutex_exit(&p->p_lock);
		}
		mutex_exit(&pidlock);


		/* let everything catch up */
		utstop_timedwait(count * count * SBDP_UTSTOP_WAIT);


		/* now, walk the threadlist again to see if we are done */
		mutex_enter(&pidlock);
		for (tp = curthread->t_next, bailout = 0;
			tp != curthread; tp = tp->t_next) {
			proc_t *p = ttoproc(tp);

			/* handle kernel threads separately */
			if (p->p_as == &kas || p->p_stat == SZOMB)
				continue;

			/*
			 * If this thread didn't stop, and we don't allow
			 * unstopped blocked threads, bail.
			 */
			thread_lock(tp);
			if (!CPR_ISTOPPED(tp) &&
			    !(sbdp_allow_blocked_threads &&
			    SBDP_VSTOPPED(tp))) {

				/* nope, cache the details for later */
				bcopy(p->p_user.u_psargs, cache_psargs,
					sizeof (cache_psargs));
				cache_tp = tp;
				cache_t_state = tp->t_state;
				bailout = 1;
			}
			thread_unlock(tp);
		}
		mutex_exit(&pidlock);

		/* were all the threads stopped? */
		if (!bailout)
			break;
	}

	/* were we unable to stop all threads after a few tries? */
	if (bailout) {
		cmn_err(CE_NOTE, "process: %s id: %p state: %x\n",
			cache_psargs, cache_tp, cache_t_state);

		(void) sprintf(sbdp_get_err_buf(sep), "%s", cache_psargs);
		sbdp_set_err(sep, ESGT_UTHREAD, NULL);
		return (ESRCH);
	}

	return (DDI_SUCCESS);
}