Ejemplo n.º 1
0
void
npe_enable_htmsi_children(dev_info_t *dip)
{
	dev_info_t *cdip = ddi_get_child(dip);
	ddi_acc_handle_t cfg_hdl;

	if (!npe_enable_htmsi_flag)
		return;

	/*
	 * Hypertransport MSI remapping only applies to AMD CPUs using
	 * Hypertransport (K8 and above) and not other platforms with non-AMD
	 * CPUs that may be using Hypertransport internally in the chipset(s)
	 */
	if (!(cpuid_getvendor(CPU) == X86_VENDOR_AMD &&
	    cpuid_getfamily(CPU) >= 0xf))
		return;

	for (; cdip != NULL; cdip = ddi_get_next_sibling(cdip)) {
		if (pci_config_setup(cdip, &cfg_hdl) != DDI_SUCCESS) {
			cmn_err(CE_NOTE, "!npe_enable_htmsi_children: "
			    "pci_config_setup failed for %s",
			    ddi_node_name(cdip));
			return;
		}

		(void) npe_enable_htmsi(cfg_hdl);
		pci_config_teardown(&cfg_hdl);
	}
}
Ejemplo n.º 2
0
static int
fco_child(dev_info_t *ap, fco_handle_t rp, fc_ci_t *cp)
{
	fc_phandle_t h;
	dev_info_t *dip;

	if (fc_cell2int(cp->nargs) != 1)
		return (fc_syntax_error(cp, "nargs must be 1"));

	if (fc_cell2int(cp->nresults) < 1)
		return (fc_syntax_error(cp, "nresults must be > 0"));

	/*
	 * Make sure this is a handle we gave out ...
	 */
	h = fc_cell2phandle(fc_arg(cp, 0));
	if ((dip = fc_phandle_to_dip(fc_handle_to_phandle_head(rp), h)) == NULL)
		return (fc_priv_error(cp, "unknown handle"));

	/*
	 * Find the child and if there is one, return it ...
	 */
	dip = ddi_get_child(dip);
	h = 0;
	if (dip != NULL)
		h = fc_dip_to_phandle(fc_handle_to_phandle_head(rp), dip);

	cp->nresults = fc_int2cell(1);
	fc_result(cp, 0) = fc_phandle2cell(h);
	return (fc_success_op(ap, rp, cp));
}
Ejemplo n.º 3
0
dev_info_t *
i_ddi_create_branch(dev_info_t *pdip, int nid)
{
	char *buf;
	dev_info_t *dip = NULL;

	if (pdip == NULL || nid == OBP_NONODE || nid == OBP_BADNODE)
		return (NULL);

	buf = kmem_alloc(OBP_MAXPROPNAME, KM_SLEEP);

	if (getlongprop_buf(nid, OBP_NAME, buf, OBP_MAXPROPNAME) > 0) {
		if (check_status(nid, buf, pdip) == DDI_SUCCESS)
			dip = ddi_add_child(pdip, buf, nid, -1);
	}

	kmem_free(buf, OBP_MAXPROPNAME);

	if (dip == NULL)
		return (NULL);

	/*
	 * Don't create any siblings of the branch root, just
	 * children.
	 */
	(void) get_neighbors(dip, DDI_WALK_PRUNESIB);

	di_dfs(ddi_get_child(dip), get_neighbors, 0);

	return (dip);
}
Ejemplo n.º 4
0
/*
 * ppb_save_config_regs
 *
 * This routine saves the state of the configuration registers of all
 * the child nodes of each PBM.
 *
 * used by: ppb_detach() on suspends
 *
 * return value: none
 */
static void
ppb_save_config_regs(ppb_devstate_t *ppb_p)
{
	int i;
	dev_info_t *dip;
	ddi_acc_handle_t config_handle;

	for (i = 0, dip = ddi_get_child(ppb_p->dip); dip != NULL;
	    i++, dip = ddi_get_next_sibling(dip)) {

		if (pci_config_setup(dip, &config_handle) != DDI_SUCCESS) {
			cmn_err(CE_WARN, "%s%d: can't config space for %s%d\n",
			    ddi_driver_name(ppb_p->dip),
			    ddi_get_instance(ppb_p->dip),
			    ddi_driver_name(dip),
			    ddi_get_instance(dip));
			continue;
		}

		ppb_p->config_state[i].dip = dip;
		ppb_p->config_state[i].command =
		    pci_config_get16(config_handle, PCI_CONF_COMM);
		pci_config_teardown(&config_handle);
	}
	ppb_p->config_state_index = i;
}
Ejemplo n.º 5
0
/*
 * Starting from the root node suspend all devices in the device tree.
 * Assumes that all devices have already been marked busy.
 */
static int
sbdp_suspend_devices_(dev_info_t *dip, sbdp_sr_handle_t *srh)
{
	major_t	major;
	char	*dname;

	for (; dip != NULL; dip = ddi_get_next_sibling(dip)) {
		char	d_name[40], d_alias[40], *d_info;

		if (sbdp_suspend_devices_(ddi_get_child(dip), srh)) {
			return (ENXIO);
		}

		if (!sbdp_is_real_device(dip))
			continue;

		major = (major_t)-1;
		if ((dname = DEVI(dip)->devi_binding_name) != NULL)
			major = ddi_name_to_major(dname);

#ifdef DEBUG
		if (sbdp_bypass_device(dname)) {
			SBDP_DBG_QR("bypassed suspend of %s (major# %d)\n",
			    dname, major);
			continue;
		}
#endif

		if ((d_info = ddi_get_name_addr(dip)) == NULL)
			d_info = "<null>";

		d_name[0] = 0;
		if (sbdp_resolve_devname(dip, d_name, d_alias) == 0) {
			if (d_alias[0] != 0) {
				SBDP_DBG_QR("\tsuspending %s@%s (aka %s)\n",
					d_name, d_info, d_alias);
			} else {
				SBDP_DBG_QR("\tsuspending %s@%s\n",
					d_name, d_info);
			}
		} else {
			SBDP_DBG_QR("\tsuspending %s@%s\n", dname, d_info);
		}

		if (devi_detach(dip, DDI_SUSPEND) != DDI_SUCCESS) {
			(void) sprintf(sbdp_get_err_buf(&srh->sep),
			    "%d", major);

			sbdp_set_err(&srh->sep, ESGT_SUSPEND, NULL);
			ndi_hold_devi(dip);
			SR_FAILED_DIP(srh) = dip;
			return (DDI_FAILURE);
		}
	}

	return (DDI_SUCCESS);
}
Ejemplo n.º 6
0
void
load_platform_drivers(void)
{
	dev_info_t 		*dip;		/* dip of the isa driver */
	int			simba_present = 0;
	dev_info_t		*root_child_node;


	/*
	 * Install Isa driver. This is required for the southbridge IDE
	 * workaround - to reset the IDE channel during IDE bus reset.
	 * Panic the system in case ISA driver could not be loaded or
	 * any problem in accessing its pci config space. Since the register
	 * to reset the channel for IDE is in ISA config space!.
	 */
	root_child_node = ddi_get_child(ddi_root_node());

	while (root_child_node != NULL) {
		if (strcmp(ddi_node_name(root_child_node), "pci") == 0) {
			root_child_node = ddi_get_child(root_child_node);
			if (strcmp(ddi_node_name(root_child_node), "pci") == 0)
				simba_present = 1;
			break;
		}
		root_child_node = ddi_get_next_sibling(root_child_node);
	}


	if (simba_present)
	    dip = e_ddi_hold_devi_by_path(PLATFORM_ISA_PATHNAME_WITH_SIMBA, 0);
	else
	    dip = e_ddi_hold_devi_by_path(PLATFORM_ISA_PATHNAME, 0);

	if (dip == NULL) {
		cmn_err(CE_PANIC, "Could not install the isa driver\n");
		return;
	}

	if (pci_config_setup(dip, &platform_isa_handle) != DDI_SUCCESS) {
		cmn_err(CE_PANIC, "Could not get the config space of isa\n");
		return;
	}
}
Ejemplo n.º 7
0
void
pcmu_child_cfg_save(dev_info_t *dip)
{
	dev_info_t *cdip;
	int ret = DDI_SUCCESS;

	/*
	 * Save the state of the configuration headers of child
	 * nodes.
	 */

	for (cdip = ddi_get_child(dip); cdip != NULL;
	    cdip = ddi_get_next_sibling(cdip)) {

		/*
		 * Not interested in children who are not already
		 * init'ed.  They will be set up in pcmu_init_child().
		 */
		if (i_ddi_node_state(cdip) < DS_INITIALIZED) {
			PCMU_DBG2(PCMU_DBG_DETACH, dip, "DDI_SUSPEND: skipping "
			    "%s%d not in CF1\n", ddi_driver_name(cdip),
			    ddi_get_instance(cdip));

			continue;
		}

		/*
		 * Only save config registers if not already saved by child.
		 */
		if (ddi_prop_exists(DDI_DEV_T_ANY, cdip, DDI_PROP_DONTPASS,
		    SAVED_CONFIG_REGS) == 1) {

			continue;
		}

		/*
		 * The nexus needs to save config registers.  Create a property
		 * so it knows to restore on resume.
		 */
		ret = ndi_prop_create_boolean(DDI_DEV_T_NONE, cdip,
		    "nexus-saved-config-regs");

		if (ret != DDI_PROP_SUCCESS) {
			cmn_err(CE_WARN, "%s%d can't update prop %s",
			    ddi_driver_name(cdip), ddi_get_instance(cdip),
			    "nexus-saved-config-regs");
		}

		(void) pci_save_config_regs(cdip);
	}
}
Ejemplo n.º 8
0
/*
 * If the bridge is empty, disable it
 */
int
npe_disable_empty_bridges_workaround(dev_info_t *child)
{
	/*
	 * Do not bind drivers to empty bridges.
	 * Fail above, if the bridge is found to be hotplug capable
	 */
	if (ddi_driver_major(child) == ddi_name_to_major("pcieb") &&
	    ddi_get_child(child) == NULL &&
	    ddi_prop_get_int(DDI_DEV_T_ANY, child, DDI_PROP_DONTPASS,
	    "pci-hotplug-type", INBAND_HPC_NONE) == INBAND_HPC_NONE)
		return (1);

	return (0);
}
Ejemplo n.º 9
0
void
npe_enable_htmsi_children(dev_info_t *dip)
{
	dev_info_t *cdip = ddi_get_child(dip);
	ddi_acc_handle_t cfg_hdl;

	for (; cdip != NULL; cdip = ddi_get_next_sibling(cdip)) {
		if (pci_config_setup(cdip, &cfg_hdl) != DDI_SUCCESS) {
			cmn_err(CE_NOTE, "!npe_enable_htmsi_children: "
			    "pci_config_setup failed for %s",
			    ddi_node_name(cdip));
		}

		(void) npe_enable_htmsi(cfg_hdl);
		pci_config_teardown(&cfg_hdl);
	}
}
Ejemplo n.º 10
0
void
pcmu_child_cfg_restore(dev_info_t *dip)
{
	dev_info_t *cdip;

	/*
	 * Restore config registers for children that did not save
	 * their own registers.  Children pwr states are UNKNOWN after
	 * a resume since it is possible for the PM framework to call
	 * resume without an actual power cycle. (ie if suspend fails).
	 */
	for (cdip = ddi_get_child(dip); cdip != NULL;
	    cdip = ddi_get_next_sibling(cdip)) {

		/*
		 * Not interested in children who are not already
		 * init'ed.  They will be set up by pcmu_init_child().
		 */
		if (i_ddi_node_state(cdip) < DS_INITIALIZED) {
			PCMU_DBG2(PCMU_DBG_DETACH, dip,
			    "DDI_RESUME: skipping %s%d not in CF1\n",
			    ddi_driver_name(cdip), ddi_get_instance(cdip));
			continue;
		}

		/*
		 * Only restore config registers if saved by nexus.
		 */
		if (ddi_prop_exists(DDI_DEV_T_ANY, cdip, DDI_PROP_DONTPASS,
		    "nexus-saved-config-regs") == 1) {
			(void) pci_restore_config_regs(cdip);

			PCMU_DBG2(PCMU_DBG_PWR, dip,
			    "DDI_RESUME: nexus restoring %s%d config regs\n",
			    ddi_driver_name(cdip), ddi_get_instance(cdip));

			if (ndi_prop_remove(DDI_DEV_T_NONE, cdip,
			    "nexus-saved-config-regs") != DDI_PROP_SUCCESS) {
				cmn_err(CE_WARN, "%s%d can't remove prop %s",
				    ddi_driver_name(cdip),
				    ddi_get_instance(cdip),
				    "nexus-saved-config-regs");
			}
		}
	}
}
Ejemplo n.º 11
0
int
npe_restore_htconfig_children(dev_info_t *dip)
{
	dev_info_t *cdip = ddi_get_child(dip);
	int rval = DDI_SUCCESS;

	for (; cdip != NULL; cdip = ddi_get_next_sibling(cdip)) {
		if (ddi_prop_get_int(DDI_DEV_T_ANY, cdip, DDI_PROP_DONTPASS,
		    "htconfig-saved", 0) == 0)
			continue;

		if (pci_restore_config_regs(cdip) != DDI_SUCCESS) {
			cmn_err(CE_WARN, "Failed to restore HT config "
			    "regs for %s\n", ddi_node_name(cdip));
			rval = DDI_FAILURE;
		}
	}

	return (rval);
}
Ejemplo n.º 12
0
/*
 * save config regs for HyperTransport devices without drivers of classes:
 * memory controller and hostbridge
 */
int
npe_save_htconfig_children(dev_info_t *dip)
{
	dev_info_t *cdip = ddi_get_child(dip);
	ddi_acc_handle_t cfg_hdl;
	uint16_t ptr;
	int rval = DDI_SUCCESS;
	uint8_t cl, scl;

	for (; cdip != NULL; cdip = ddi_get_next_sibling(cdip)) {
		if (ddi_driver_major(cdip) != DDI_MAJOR_T_NONE)
			continue;

		if (pci_config_setup(cdip, &cfg_hdl) != DDI_SUCCESS)
			return (DDI_FAILURE);

		cl = pci_config_get8(cfg_hdl, PCI_CONF_BASCLASS);
		scl = pci_config_get8(cfg_hdl, PCI_CONF_SUBCLASS);

		if (((cl == PCI_CLASS_MEM && scl == PCI_MEM_RAM) ||
		    (cl == PCI_CLASS_BRIDGE && scl == PCI_BRIDGE_HOST)) &&
		    pci_htcap_locate(cfg_hdl, 0, 0, &ptr) == DDI_SUCCESS) {

			if (pci_save_config_regs(cdip) != DDI_SUCCESS) {
				cmn_err(CE_WARN, "Failed to save HT config "
				    "regs for %s\n", ddi_node_name(cdip));
				rval = DDI_FAILURE;

			} else if (ddi_prop_update_int(DDI_DEV_T_NONE, cdip,
			    "htconfig-saved", 1) != DDI_SUCCESS) {
				cmn_err(CE_WARN, "Failed to set htconfig-saved "
				    "property for %s\n", ddi_node_name(cdip));
				rval = DDI_FAILURE;
			}
		}

		pci_config_teardown(&cfg_hdl);
	}

	return (rval);
}
Ejemplo n.º 13
0
/*
 * check for a possible substitute node.  This routine searches the
 * children of parent_dip, looking for a node that:
 *	1. is a prom node
 *	2. binds to the same major number
 *	3. there is no need to verify that the unit-address information
 *		match since it is likely that the substitute node
 *		will have none (e.g. disk) - this would be the reason the
 *		framework rejected it in the first place.
 *
 * assumes parent_dip is held
 */
static dev_info_t *
find_alternate_node(dev_info_t *parent_dip, major_t major)
{
	int circ;
	dev_info_t *child_dip;

	/* lock down parent to keep children from being removed */
	ndi_devi_enter(parent_dip, &circ);
	for (child_dip = ddi_get_child(parent_dip); child_dip != NULL;
	    child_dip = ddi_get_next_sibling(child_dip)) {

		/* look for obp node with matching major */
		if ((ndi_dev_is_prom_node(child_dip) != 0) &&
		    (ddi_driver_major(child_dip) == major)) {
			ndi_hold_devi(child_dip);
			break;
		}
	}
	ndi_devi_exit(parent_dip, circ);
	return (child_dip);
}
Ejemplo n.º 14
0
static void
px_set_mps(px_t *px_p)
{
	dev_info_t	*dip;
	pcie_bus_t	*bus_p;
	int		max_supported;

	dip = px_p->px_dip;
	bus_p = PCIE_DIP2BUS(dip);

	bus_p->bus_mps = -1;

	if (pcie_root_port(dip) == DDI_FAILURE) {
		if (px_lib_get_root_complex_mps(px_p, dip,
		    &max_supported) < 0) {

			DBG(DBG_MPS, dip, "MPS:  Can not get RC MPS\n");
			return;
		}

		DBG(DBG_MPS, dip, "MPS: Root Complex MPS Cap of = %x\n",
		    max_supported);

		if (pcie_max_mps < max_supported)
			max_supported = pcie_max_mps;

		(void) pcie_get_fabric_mps(dip, ddi_get_child(dip),
		    &max_supported);

		bus_p->bus_mps = max_supported;

		(void) px_lib_set_root_complex_mps(px_p, dip, bus_p->bus_mps);

		DBG(DBG_MPS, dip, "MPS: Root Complex MPS Set to = %x\n",
		    bus_p->bus_mps);
	}
}
Ejemplo n.º 15
0
int
xen_suspend_devices(dev_info_t *dip)
{
	int error;
	char buf[XPV_BUFSIZE];

	SUSPEND_DEBUG("xen_suspend_devices\n");

	for (; dip != NULL; dip = ddi_get_next_sibling(dip)) {
		if (xen_suspend_devices(ddi_get_child(dip)))
			return (ENXIO);
		if (ddi_get_driver(dip) == NULL)
			continue;
		SUSPEND_DEBUG("Suspending device %s\n", ddi_deviname(dip, buf));
		ASSERT((DEVI(dip)->devi_cpr_flags & DCF_CPR_SUSPENDED) == 0);


		if (!i_ddi_devi_attached(dip)) {
			error = DDI_FAILURE;
		} else {
			error = devi_detach(dip, DDI_SUSPEND);
		}

		if (error == DDI_SUCCESS) {
			DEVI(dip)->devi_cpr_flags |= DCF_CPR_SUSPENDED;
		} else {
			SUSPEND_DEBUG("WARNING: Unable to suspend device %s\n",
			    ddi_deviname(dip, buf));
			cmn_err(CE_WARN, "Unable to suspend device %s.",
			    ddi_deviname(dip, buf));
			cmn_err(CE_WARN, "Device is busy or does not "
			    "support suspend/resume.");
				return (ENXIO);
		}
	}
	return (0);
}
Ejemplo n.º 16
0
/*
 * Top level routine to direct suspend/resume of a domain.
 */
void
xen_suspend_domain(void)
{
	extern void rtcsync(void);
	extern void ec_resume(void);
	extern kmutex_t ec_lock;
	struct xen_add_to_physmap xatp;
	ulong_t flags;
	int err;

	cmn_err(CE_NOTE, "Domain suspending for save/migrate");

	SUSPEND_DEBUG("xen_suspend_domain\n");

	/*
	 * We only want to suspend the PV devices, since the emulated devices
	 * are suspended by saving the emulated device state.  The PV devices
	 * are all children of the xpvd nexus device.  So we search the
	 * device tree for the xpvd node to use as the root of the tree to
	 * be suspended.
	 */
	if (xpvd_dip == NULL)
		ddi_walk_devs(ddi_root_node(), check_xpvd, NULL);

	/*
	 * suspend interrupts and devices
	 */
	if (xpvd_dip != NULL)
		(void) xen_suspend_devices(ddi_get_child(xpvd_dip));
	else
		cmn_err(CE_WARN, "No PV devices found to suspend");
	SUSPEND_DEBUG("xenbus_suspend\n");
	xenbus_suspend();

	mutex_enter(&cpu_lock);

	/*
	 * Suspend on vcpu 0
	 */
	thread_affinity_set(curthread, 0);
	kpreempt_disable();

	if (ncpus > 1)
		pause_cpus(NULL, NULL);
	/*
	 * We can grab the ec_lock as it's a spinlock with a high SPL. Hence
	 * any holder would have dropped it to get through pause_cpus().
	 */
	mutex_enter(&ec_lock);

	/*
	 * From here on in, we can't take locks.
	 */

	flags = intr_clear();

	SUSPEND_DEBUG("HYPERVISOR_suspend\n");
	/*
	 * At this point we suspend and sometime later resume.
	 * Note that this call may return with an indication of a cancelled
	 * for now no matter ehat the return we do a full resume of all
	 * suspended drivers, etc.
	 */
	(void) HYPERVISOR_shutdown(SHUTDOWN_suspend);

	/*
	 * Point HYPERVISOR_shared_info to the proper place.
	 */
	xatp.domid = DOMID_SELF;
	xatp.idx = 0;
	xatp.space = XENMAPSPACE_shared_info;
	xatp.gpfn = xen_shared_info_frame;
	if ((err = HYPERVISOR_memory_op(XENMEM_add_to_physmap, &xatp)) != 0)
		panic("Could not set shared_info page. error: %d", err);

	SUSPEND_DEBUG("gnttab_resume\n");
	gnttab_resume();

	SUSPEND_DEBUG("ec_resume\n");
	ec_resume();

	intr_restore(flags);

	if (ncpus > 1)
		start_cpus();

	mutex_exit(&ec_lock);
	mutex_exit(&cpu_lock);

	/*
	 * Now we can take locks again.
	 */

	rtcsync();

	SUSPEND_DEBUG("xenbus_resume\n");
	xenbus_resume();
	SUSPEND_DEBUG("xen_resume_devices\n");
	if (xpvd_dip != NULL)
		(void) xen_resume_devices(ddi_get_child(xpvd_dip), 0);

	thread_affinity_clear(curthread);
	kpreempt_enable();

	SUSPEND_DEBUG("finished xen_suspend_domain\n");

	cmn_err(CE_NOTE, "domain restore/migrate completed");
}
Ejemplo n.º 17
0
static void
sbdp_resume_devices(dev_info_t *start, sbdp_sr_handle_t *srh)
{
	int circ;
	dev_info_t	*dip, *next, *last = NULL;
	char		*bn;
	sbd_error_t	*sep;

	sep = &srh->sep;

	/* attach in reverse device tree order */
	while (last != start) {
		dip = start;
		next = ddi_get_next_sibling(dip);
		while (next != last && dip != SR_FAILED_DIP(srh)) {
			dip = next;
			next = ddi_get_next_sibling(dip);
		}
		if (dip == SR_FAILED_DIP(srh)) {
			/* Release hold acquired in sbdp_suspend_devices() */
			ndi_rele_devi(dip);
			SR_FAILED_DIP(srh) = NULL;
		} else if (sbdp_is_real_device(dip) &&
				SR_FAILED_DIP(srh) == NULL) {

			if (DEVI(dip)->devi_binding_name != NULL) {
				bn = ddi_binding_name(dip);
			}
#ifdef DEBUG
			if (!sbdp_bypass_device(bn)) {
#else
			{
#endif
				char	d_name[40], d_alias[40], *d_info;

				d_name[0] = 0;
				d_info = ddi_get_name_addr(dip);
				if (d_info == NULL)
					d_info = "<null>";

				if (!sbdp_resolve_devname(dip, d_name,
								d_alias)) {
					if (d_alias[0] != 0) {
						SBDP_DBG_QR("\tresuming "
							"%s@%s (aka %s)\n",
							d_name, d_info,
							d_alias);
					} else {
						SBDP_DBG_QR("\tresuming "
							"%s@%s\n",
							d_name, d_info);
					}
				} else {
					SBDP_DBG_QR("\tresuming %s@%s\n",
						bn, d_info);
				}

				if (devi_attach(dip, DDI_RESUME) !=
							DDI_SUCCESS) {
					/*
					 * Print a console warning,
					 * set an errno of ESGT_RESUME,
					 * and save the driver major
					 * number in the e_str.
					 */

					(void) sprintf(sbdp_get_err_buf(sep),
					    "%s@%s",
					    d_name[0] ? d_name : bn, d_info);
					SBDP_DBG_QR("\tFAILED to resume "
						"%s\n", sbdp_get_err_buf(sep));
					sbdp_set_err(sep,
					    ESGT_RESUME, NULL);
				}
			}
		}
		ndi_devi_enter(dip, &circ);
		sbdp_resume_devices(ddi_get_child(dip), srh);
		ndi_devi_exit(dip, circ);
		last = dip;
	}
}

/*
 * True if thread is virtually stopped.  Similar to CPR_VSTOPPED
 * but from DR point of view.  These user threads are waiting in
 * the kernel.  Once they return from kernel, they will process
 * the stop signal and stop.
 */
#define	SBDP_VSTOPPED(t)			\
	((t)->t_state == TS_SLEEP &&		\
	(t)->t_wchan != NULL &&			\
	(t)->t_astflag &&		\
	((t)->t_proc_flag & TP_CHKPT))


static int
sbdp_stop_user_threads(sbdp_sr_handle_t *srh)
{
	int		count;
	char		cache_psargs[PSARGSZ];
	kthread_id_t	cache_tp;
	uint_t		cache_t_state;
	int		bailout;
	sbd_error_t	*sep;
	kthread_id_t 	tp;

	extern void add_one_utstop();
	extern void utstop_timedwait(clock_t);
	extern void utstop_init(void);

#define	SBDP_UTSTOP_RETRY	4
#define	SBDP_UTSTOP_WAIT	hz

	if (sbdp_skip_user_threads)
		return (DDI_SUCCESS);

	sep = &srh->sep;
	ASSERT(sep);

	utstop_init();

	/* we need to try a few times to get past fork, etc. */
	for (count = 0; count < SBDP_UTSTOP_RETRY; count++) {
		/* walk the entire threadlist */
		mutex_enter(&pidlock);
		for (tp = curthread->t_next; tp != curthread; tp = tp->t_next) {
			proc_t *p = ttoproc(tp);

			/* handle kernel threads separately */
			if (p->p_as == &kas || p->p_stat == SZOMB)
				continue;

			mutex_enter(&p->p_lock);
			thread_lock(tp);

			if (tp->t_state == TS_STOPPED) {
				/* add another reason to stop this thread */
				tp->t_schedflag &= ~TS_RESUME;
			} else {
				tp->t_proc_flag |= TP_CHKPT;

				thread_unlock(tp);
				mutex_exit(&p->p_lock);
				add_one_utstop();
				mutex_enter(&p->p_lock);
				thread_lock(tp);

				aston(tp);

				if (ISWAKEABLE(tp) || ISWAITING(tp)) {
					setrun_locked(tp);
				}
			}

			/* grab thread if needed */
			if (tp->t_state == TS_ONPROC && tp->t_cpu != CPU)
				poke_cpu(tp->t_cpu->cpu_id);


			thread_unlock(tp);
			mutex_exit(&p->p_lock);
		}
		mutex_exit(&pidlock);


		/* let everything catch up */
		utstop_timedwait(count * count * SBDP_UTSTOP_WAIT);


		/* now, walk the threadlist again to see if we are done */
		mutex_enter(&pidlock);
		for (tp = curthread->t_next, bailout = 0;
			tp != curthread; tp = tp->t_next) {
			proc_t *p = ttoproc(tp);

			/* handle kernel threads separately */
			if (p->p_as == &kas || p->p_stat == SZOMB)
				continue;

			/*
			 * If this thread didn't stop, and we don't allow
			 * unstopped blocked threads, bail.
			 */
			thread_lock(tp);
			if (!CPR_ISTOPPED(tp) &&
			    !(sbdp_allow_blocked_threads &&
			    SBDP_VSTOPPED(tp))) {

				/* nope, cache the details for later */
				bcopy(p->p_user.u_psargs, cache_psargs,
					sizeof (cache_psargs));
				cache_tp = tp;
				cache_t_state = tp->t_state;
				bailout = 1;
			}
			thread_unlock(tp);
		}
		mutex_exit(&pidlock);

		/* were all the threads stopped? */
		if (!bailout)
			break;
	}

	/* were we unable to stop all threads after a few tries? */
	if (bailout) {
		cmn_err(CE_NOTE, "process: %s id: %p state: %x\n",
			cache_psargs, cache_tp, cache_t_state);

		(void) sprintf(sbdp_get_err_buf(sep), "%s", cache_psargs);
		sbdp_set_err(sep, ESGT_UTHREAD, NULL);
		return (ESRCH);
	}

	return (DDI_SUCCESS);
}
Ejemplo n.º 18
0
/*
 * Setup resource map for the pci bus node based on the "available"
 * property and "bus-range" property.
 */
int
pci_resource_setup(dev_info_t *dip)
{
	pci_regspec_t *regs;
	int rlen, rcount, i;
	char bus_type[16] = "(unknown)";
	int len;
	struct busnum_ctrl ctrl;
	int circular_count;
	int rval = NDI_SUCCESS;

	/*
	 * If this is a pci bus node then look for "available" property
	 * to find the available resources on this bus.
	 */
	len = sizeof (bus_type);
	if (ddi_prop_op(DDI_DEV_T_ANY, dip, PROP_LEN_AND_VAL_BUF,
	    DDI_PROP_CANSLEEP | DDI_PROP_DONTPASS, "device_type",
	    (caddr_t)&bus_type, &len) != DDI_SUCCESS)
		return (NDI_FAILURE);

	/* it is not a pci/pci-ex bus type */
	if ((strcmp(bus_type, "pci") != 0) && (strcmp(bus_type, "pciex") != 0))
		return (NDI_FAILURE);

	/*
	 * The pci-hotplug project addresses adding the call
	 * to pci_resource_setup from pci nexus driver.
	 * However that project would initially be only for x86,
	 * so for sparc pcmcia-pci support we still need to call
	 * pci_resource_setup in pcic driver. Once all pci nexus drivers
	 * are updated to call pci_resource_setup this portion of the
	 * code would really become an assert to make sure this
	 * function is not called for the same dip twice.
	 */
	{
		if (ra_map_exist(dip, NDI_RA_TYPE_MEM) == NDI_SUCCESS) {
			return (NDI_FAILURE);
		}
	}


	/*
	 * Create empty resource maps first.
	 *
	 * NOTE: If all the allocated resources are already assigned to
	 * device(s) in the hot plug slot then "available" property may not
	 * be present. But, subsequent hot plug operation may unconfigure
	 * the device in the slot and try to free up it's resources. So,
	 * at the minimum we should create empty maps here.
	 */
	if (ndi_ra_map_setup(dip, NDI_RA_TYPE_MEM) == NDI_FAILURE) {
		return (NDI_FAILURE);
	}

	if (ndi_ra_map_setup(dip, NDI_RA_TYPE_IO) == NDI_FAILURE) {
		return (NDI_FAILURE);
	}

	if (ndi_ra_map_setup(dip, NDI_RA_TYPE_PCI_BUSNUM) == NDI_FAILURE) {
		return (NDI_FAILURE);
	}

	if (ndi_ra_map_setup(dip, NDI_RA_TYPE_PCI_PREFETCH_MEM) ==
	    NDI_FAILURE) {
		return (NDI_FAILURE);
	}

	/* read the "available" property if it is available */
	if (ddi_getlongprop(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS,
	    "available", (caddr_t)&regs, &rlen) == DDI_SUCCESS) {
		/*
		 * create the available resource list for both memory and
		 * io space
		 */
		rcount = rlen / sizeof (pci_regspec_t);
		for (i = 0; i < rcount; i++) {
		    switch (PCI_REG_ADDR_G(regs[i].pci_phys_hi)) {
		    case PCI_REG_ADDR_G(PCI_ADDR_MEM32):
			(void) ndi_ra_free(dip,
			    (uint64_t)regs[i].pci_phys_low,
			    (uint64_t)regs[i].pci_size_low,
			    (regs[i].pci_phys_hi & PCI_REG_PF_M) ?
			    NDI_RA_TYPE_PCI_PREFETCH_MEM : NDI_RA_TYPE_MEM,
			    0);
			break;
		    case PCI_REG_ADDR_G(PCI_ADDR_MEM64):
			(void) ndi_ra_free(dip,
			    ((uint64_t)(regs[i].pci_phys_mid) << 32) |
			    ((uint64_t)(regs[i].pci_phys_low)),
			    ((uint64_t)(regs[i].pci_size_hi) << 32) |
			    ((uint64_t)(regs[i].pci_size_low)),
			    (regs[i].pci_phys_hi & PCI_REG_PF_M) ?
			    NDI_RA_TYPE_PCI_PREFETCH_MEM : NDI_RA_TYPE_MEM,
			    0);
			break;
		    case PCI_REG_ADDR_G(PCI_ADDR_IO):
			(void) ndi_ra_free(dip,
			    (uint64_t)regs[i].pci_phys_low,
			    (uint64_t)regs[i].pci_size_low,
			    NDI_RA_TYPE_IO,
			    0);
			break;
		    case PCI_REG_ADDR_G(PCI_ADDR_CONFIG):
			break;
		    default:
			cmn_err(CE_WARN,
			    "pci_resource_setup: bad addr type: %x\n",
			    PCI_REG_ADDR_G(regs[i].pci_phys_hi));
			break;
		    }
		}
		kmem_free((caddr_t)regs, rlen);
	}

	/*
	 * update resource map for available bus numbers if the node
	 * has available-bus-range or bus-range property.
	 */
	len = sizeof (struct bus_range);
	if (ddi_getlongprop_buf(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS,
	    "available-bus-range", (caddr_t)&pci_bus_range, &len) ==
	    DDI_SUCCESS) {
		/*
		 * Add bus numbers in the range to the free list.
		 */
		(void) ndi_ra_free(dip, (uint64_t)pci_bus_range.lo,
		    (uint64_t)pci_bus_range.hi - (uint64_t)pci_bus_range.lo +
		    1, NDI_RA_TYPE_PCI_BUSNUM, 0);
	} else {
		/*
		 * We don't have an available-bus-range property. If, instead,
		 * we have a bus-range property we add all the bus numbers
		 * in that range to the free list but we must then scan
		 * for pci-pci bridges on this bus to find out the if there
		 * are any of those bus numbers already in use. If so, we can
		 * reclaim them.
		 */
		len = sizeof (struct bus_range);
		if (ddi_getlongprop_buf(DDI_DEV_T_ANY, dip,
		    DDI_PROP_DONTPASS, "bus-range", (caddr_t)&pci_bus_range,
		    &len) == DDI_SUCCESS) {
			if (pci_bus_range.lo != pci_bus_range.hi) {
				/*
				 * Add bus numbers other than the secondary
				 * bus number to the free list.
				 */
				(void) ndi_ra_free(dip,
				    (uint64_t)pci_bus_range.lo + 1,
				    (uint64_t)pci_bus_range.hi -
				    (uint64_t)pci_bus_range.lo,
				    NDI_RA_TYPE_PCI_BUSNUM, 0);

				/* scan for pci-pci bridges */
				ctrl.rv = DDI_SUCCESS;
				ctrl.dip = dip;
				ctrl.range = &pci_bus_range;
				ndi_devi_enter(dip, &circular_count);
				ddi_walk_devs(ddi_get_child(dip),
				    claim_pci_busnum, (void *)&ctrl);
				ndi_devi_exit(dip, circular_count);
				if (ctrl.rv != DDI_SUCCESS) {
					/* failed to create the map */
					(void) ndi_ra_map_destroy(dip,
					    NDI_RA_TYPE_PCI_BUSNUM);
					rval = NDI_FAILURE;
				}
			}
		}
	}

#ifdef BUSRA_DEBUG
	if (busra_debug) {
		(void) ra_dump_all(NULL, dip);
	}
#endif

	return (rval);
}
Ejemplo n.º 19
0
int
xen_resume_devices(dev_info_t *start, int resume_failed)
{
	dev_info_t *dip, *next, *last = NULL;
	int did_suspend;
	int error = resume_failed;
	char buf[XPV_BUFSIZE];

	SUSPEND_DEBUG("xen_resume_devices\n");

	while (last != start) {
		dip = start;
		next = ddi_get_next_sibling(dip);
		while (next != last) {
			dip = next;
			next = ddi_get_next_sibling(dip);
		}

		/*
		 * cpr is the only one that uses this field and the device
		 * itself hasn't resumed yet, there is no need to use a
		 * lock, even though kernel threads are active by now.
		 */
		did_suspend = DEVI(dip)->devi_cpr_flags & DCF_CPR_SUSPENDED;
		if (did_suspend)
			DEVI(dip)->devi_cpr_flags &= ~DCF_CPR_SUSPENDED;

		/*
		 * There may be background attaches happening on devices
		 * that were not originally suspended by cpr, so resume
		 * only devices that were suspended by cpr. Also, stop
		 * resuming after the first resume failure, but traverse
		 * the entire tree to clear the suspend flag.
		 */
		if (did_suspend && !error) {
			SUSPEND_DEBUG("Resuming device %s\n",
			    ddi_deviname(dip, buf));
			/*
			 * If a device suspended by cpr gets detached during
			 * the resume process (for example, due to hotplugging)
			 * before cpr gets around to issuing it a DDI_RESUME,
			 * we'll have problems.
			 */
			if (!i_ddi_devi_attached(dip)) {
				cmn_err(CE_WARN, "Skipping %s, device "
				    "not ready for resume",
				    ddi_deviname(dip, buf));
			} else {
				if (devi_attach(dip, DDI_RESUME) !=
				    DDI_SUCCESS) {
					error = ENXIO;
				}
			}
		}

		if (error == ENXIO) {
			cmn_err(CE_WARN, "Unable to resume device %s",
			    ddi_deviname(dip, buf));
		}

		error = xen_resume_devices(ddi_get_child(dip), error);
		last = dip;
	}

	return (error);
}
Ejemplo n.º 20
0
/*
 * Build the reserved ISA irq list, and store it in the table pointed to by
 * reserved_irqs_table. The caller is responsible for allocating this table
 * with a minimum of MAX_ISA_IRQ + 1 entries.
 *
 * The routine looks in the device tree at the subtree rooted at /isa
 * for each of the devices under that node, if an interrupts property
 * is present, its values are used to "reserve" irqs so that later ACPI
 * configuration won't choose those irqs.
 *
 * In addition, if acpi_irq_check_elcr is set, will use ELCR register
 * to identify reserved IRQs.
 */
void
build_reserved_irqlist(uchar_t *reserved_irqs_table)
{
	dev_info_t *isanode = ddi_find_devinfo("isa", -1, 0);
	dev_info_t *isa_child = 0;
	int i;
	uint_t	elcrval;

	/* Initialize the reserved ISA IRQs: */
	for (i = 0; i <= MAX_ISA_IRQ; i++)
		reserved_irqs_table[i] = 0;

	if (acpi_irq_check_elcr) {

		elcrval = (inb(ELCR_PORT2) << 8) | (inb(ELCR_PORT1));
		if (ELCR_EDGE(elcrval, 0) && ELCR_EDGE(elcrval, 1) &&
		    ELCR_EDGE(elcrval, 2) && ELCR_EDGE(elcrval, 8) &&
		    ELCR_EDGE(elcrval, 13)) {
			/* valid ELCR */
			for (i = 0; i <= MAX_ISA_IRQ; i++)
				if (!ELCR_LEVEL(elcrval, i))
					reserved_irqs_table[i] = 1;
		}
	}

	/* always check the isa devinfo nodes */

	if (isanode != 0) { /* Found ISA */
		uint_t intcnt;		/* Interrupt count */
		int *intrs;		/* Interrupt values */

		/* Load first child: */
		isa_child = ddi_get_child(isanode);
		while (isa_child != 0) { /* Iterate over /isa children */
			/* if child has any interrupts, save them */
			if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, isa_child,
			    DDI_PROP_DONTPASS, "interrupts", &intrs, &intcnt)
			    == DDI_PROP_SUCCESS) {
				/*
				 * iterate over child interrupt list, adding
				 * them to the reserved irq list
				 */
				while (intcnt-- > 0) {
					/*
					 * Each value MUST be <= MAX_ISA_IRQ
					 */

					if ((intrs[intcnt] > MAX_ISA_IRQ) ||
					    (intrs[intcnt] < 0))
						continue;

					reserved_irqs_table[intrs[intcnt]] = 1;
				}
				ddi_prop_free(intrs);
			}
			isa_child = ddi_get_next_sibling(isa_child);
		}
		/* The isa node was held by ddi_find_devinfo, so release it */
		ndi_rele_devi(isanode);
	}

	/*
	 * Reserve IRQ14 & IRQ15 for IDE.  It shouldn't be hard-coded
	 * here but there's no other way to find the irqs for
	 * legacy-mode ata (since it's hard-coded in pci-ide also).
	 */
	reserved_irqs_table[14] = 1;
	reserved_irqs_table[15] = 1;
}
Ejemplo n.º 21
0
void
load_platform_drivers(void)
{
	extern int		watchdog_available;
	extern int		watchdog_enable;
	dev_info_t 		*dip;		/* dip of the isa driver */
	int			simba_present = 0;
	dev_info_t		*root_child_node;
	major_t	major;

	if (ddi_install_driver("power") != DDI_SUCCESS)
		cmn_err(CE_WARN, "Failed to install \"power\" driver.");

	/*
	 * Install Isa driver. This is required for the southbridge IDE
	 * workaround - to reset the IDE channel during IDE bus reset.
	 * Panic the system in case ISA driver could not be loaded or
	 * any problem in accessing its pci config space. Since the register
	 * to reset the channel for IDE is in ISA config space!.
	 */
	root_child_node = ddi_get_child(ddi_root_node());

	while (root_child_node != NULL) {
		if (strcmp(ddi_node_name(root_child_node), "pci") == 0) {
			root_child_node = ddi_get_child(root_child_node);
			if (strcmp(ddi_node_name(root_child_node), "pci") == 0)
				simba_present = 1;
			break;
		}
		root_child_node = ddi_get_next_sibling(root_child_node);
	}

	if (simba_present)
		dip = e_ddi_hold_devi_by_path(PLATFORM_ISA_PATHNAME_WITH_SIMBA,
		    0);
	else
		dip = e_ddi_hold_devi_by_path(PLATFORM_ISA_PATHNAME, 0);

	if (dip == NULL) {
		cmn_err(CE_PANIC, "Could not install the isa driver\n");
		return;
	}

	if (pci_config_setup(dip, &platform_isa_handle) != DDI_SUCCESS) {
		cmn_err(CE_PANIC, "Could not get the config space of isa\n");
		return;
	}

	/*
	 * Load the blade support chip driver.
	 *
	 */

	if (((major = ddi_name_to_major(BSC_DRV)) == -1) ||
		(ddi_hold_installed_driver(major) == NULL)) {
		cmn_err(CE_WARN, "%s: failed to load", BSC_DRV);
	} else {

		bsc_drv_func_ptr = (void (*)(struct bscv_idi_info *))
		    modgetsymvalue(BSC_DRV_FUNC, 0);

		if (bsc_drv_func_ptr == NULL) {
			cmn_err(CE_WARN, "load_platform_defaults: %s()"
			" not found; signatures will not be updated\n",
			BSC_DRV_FUNC);
			watchdog_available = 0;
			if (watchdog_enable) {
				cmn_err(CE_WARN, "load_platform_defaults: %s()"
			" not found; BSC OS watchdog service not available\n",
				BSC_DRV_FUNC);
			}
		}
	}
}
Ejemplo n.º 22
0
static void
dr_resume_devices(dev_info_t *start, dr_sr_handle_t *srh)
{
	dr_handle_t	*handle;
	dev_info_t	*dip, *next, *last = NULL;
	major_t		major;
	char		*bn;
	int		circ;

	major = (major_t)-1;

	/* attach in reverse device tree order */
	while (last != start) {
		dip = start;
		next = ddi_get_next_sibling(dip);
		while (next != last && dip != srh->sr_failed_dip) {
			dip = next;
			next = ddi_get_next_sibling(dip);
		}
		if (dip == srh->sr_failed_dip) {
			/* release hold acquired in dr_suspend_devices() */
			srh->sr_failed_dip = NULL;
			ndi_rele_devi(dip);
		} else if (dr_is_real_device(dip) &&
				srh->sr_failed_dip == NULL) {

			if ((bn = ddi_binding_name(dip)) != NULL) {
				major = ddi_name_to_major(bn);
			} else {
				bn = "<null>";
			}
			if (!dr_bypass_device(bn) &&
				!drmach_verify_sr(dip, 0)) {
				char	d_name[40], d_alias[40], *d_info;

				d_name[0] = 0;
				d_info = ddi_get_name_addr(dip);
				if (d_info == NULL)
					d_info = "<null>";

				if (!dr_resolve_devname(dip, d_name,
								d_alias)) {
					if (d_alias[0] != 0) {
						prom_printf("\tresuming "
							"%s@%s (aka %s)\n",
							d_name, d_info,
							d_alias);
					} else {
						prom_printf("\tresuming "
							"%s@%s\n",
							d_name, d_info);
					}
				} else {
					prom_printf("\tresuming %s@%s\n",
						bn, d_info);
				}

				if (devi_attach(dip, DDI_RESUME) !=
							DDI_SUCCESS) {
					/*
					 * Print a console warning,
					 * set an e_code of ESBD_RESUME,
					 * and save the driver major
					 * number in the e_rsc.
					 */
					prom_printf("\tFAILED to resume %s@%s",
					    d_name[0] ? d_name : bn, d_info);

					srh->sr_err_idx =
						dr_add_int(srh->sr_err_ints,
						srh->sr_err_idx, DR_MAX_ERR_INT,
						(uint64_t)major);

					handle = srh->sr_dr_handlep;

					dr_op_err(CE_IGNORE, handle,
					    ESBD_RESUME, "%s@%s",
					    d_name[0] ? d_name : bn, d_info);
				}
			}
		}

		/* Hold parent busy while walking its children */
		ndi_devi_enter(dip, &circ);
		dr_resume_devices(ddi_get_child(dip), srh);
		ndi_devi_exit(dip, circ);
		last = dip;
	}
}
Ejemplo n.º 23
0
/*
 * The "dip" argument's parent (if it exists) must be held busy.
 */
static int
dr_suspend_devices(dev_info_t *dip, dr_sr_handle_t *srh)
{
	dr_handle_t	*handle;
	major_t		major;
	char		*dname;
	int		circ;

	/*
	 * If dip is the root node, it has no siblings and it is
	 * always held. If dip is not the root node, dr_suspend_devices()
	 * will be invoked with the parent held busy.
	 */
	for (; dip != NULL; dip = ddi_get_next_sibling(dip)) {
		char	d_name[40], d_alias[40], *d_info;

		ndi_devi_enter(dip, &circ);
		if (dr_suspend_devices(ddi_get_child(dip), srh)) {
			ndi_devi_exit(dip, circ);
			return (ENXIO);
		}
		ndi_devi_exit(dip, circ);

		if (!dr_is_real_device(dip))
			continue;

		major = (major_t)-1;
		if ((dname = ddi_binding_name(dip)) != NULL)
			major = ddi_name_to_major(dname);

		if (dr_bypass_device(dname)) {
			PR_QR(" bypassed suspend of %s (major# %d)\n", dname,
				major);
			continue;
		}

		if (drmach_verify_sr(dip, 1)) {
			PR_QR(" bypassed suspend of %s (major# %d)\n", dname,
				major);
			continue;
		}

		if ((d_info = ddi_get_name_addr(dip)) == NULL)
			d_info = "<null>";

		d_name[0] = 0;
		if (dr_resolve_devname(dip, d_name, d_alias) == 0) {
			if (d_alias[0] != 0) {
				prom_printf("\tsuspending %s@%s (aka %s)\n",
					d_name, d_info, d_alias);
			} else {
				prom_printf("\tsuspending %s@%s\n",
					d_name, d_info);
			}
		} else {
			prom_printf("\tsuspending %s@%s\n", dname, d_info);
		}

		if (devi_detach(dip, DDI_SUSPEND) != DDI_SUCCESS) {
			prom_printf("\tFAILED to suspend %s@%s\n",
				d_name[0] ? d_name : dname, d_info);

			srh->sr_err_idx = dr_add_int(srh->sr_err_ints,
				srh->sr_err_idx, DR_MAX_ERR_INT,
				(uint64_t)major);

			ndi_hold_devi(dip);
			srh->sr_failed_dip = dip;

			handle = srh->sr_dr_handlep;
			dr_op_err(CE_IGNORE, handle, ESBD_SUSPEND, "%s@%s",
				d_name[0] ? d_name : dname, d_info);

			return (DDI_FAILURE);
		}
	}

	return (DDI_SUCCESS);
}
Ejemplo n.º 24
0
uint64_t *
get_intr_mapping_reg(int upaid, int slave)
{
	int affin_upaid;
	dev_info_t *affin_dip;
	uint64_t *addr = intr_map_reg[upaid];

	/* If we're a UPA master, or we have a valid mapping register. */
	if (!slave || addr != NULL)
		return (addr);

	/*
	 * We only get here if we're a UPA slave only device whose interrupt
	 * mapping register has not been set.
	 * We need to try and install the nexus whose physical address
	 * space is where the slaves mapping register resides.  They
	 * should call set_intr_mapping_reg() in their xxattach() to register
	 * the mapping register with the system.
	 */

	/*
	 * We don't know if a single- or multi-interrupt proxy is fielding
	 * our UPA slave interrupt, we must check both cases.
	 * Start out by assuming the multi-interrupt case.
	 * We assume that single- and multi- interrupters are not
	 * overlapping in UPA portid space.
	 */

	affin_upaid = upaid | 3;

	/*
	 * We start looking for the multi-interrupter affinity node.
	 * We know it's ONLY a child of the root node since the root
	 * node defines UPA space.
	 */
	for (affin_dip = ddi_get_child(ddi_root_node()); affin_dip;
	    affin_dip = ddi_get_next_sibling(affin_dip))
		if (ddi_prop_get_int(DDI_DEV_T_ANY, affin_dip,
		    DDI_PROP_DONTPASS, "upa-portid", -1) == affin_upaid)
			break;

	if (affin_dip) {
		if (i_ddi_attach_node_hierarchy(affin_dip) == DDI_SUCCESS) {
			/* try again to get the mapping register. */
			addr = intr_map_reg[upaid];
		}
	}

	/*
	 * If we still don't have a mapping register try single -interrupter
	 * case.
	 */
	if (addr == NULL) {

		affin_upaid = upaid | 1;

		for (affin_dip = ddi_get_child(ddi_root_node()); affin_dip;
		    affin_dip = ddi_get_next_sibling(affin_dip))
			if (ddi_prop_get_int(DDI_DEV_T_ANY, affin_dip,
			    DDI_PROP_DONTPASS, "upa-portid", -1) == affin_upaid)
				break;

		if (affin_dip) {
			if (i_ddi_attach_node_hierarchy(affin_dip)
			    == DDI_SUCCESS) {
				/* try again to get the mapping register. */
				addr = intr_map_reg[upaid];
			}
		}
	}
	return (addr);
}
Ejemplo n.º 25
0
/*
 * cnex_find_chan_dip -- Find the dip of a device that is corresponding
 * 	to the specific channel. Below are the details on how the dip
 *	is derived.
 *
 *	- In the MD, the cfg-handle is expected to be unique for
 *	  virtual-device nodes that have the same 'name' property value.
 *	  This value is expected to be the same as that of "reg" property
 *	  of the corresponding OBP device node.
 *
 *	- The value of the 'name' property of a virtual-device node
 *	  in the MD is expected to be the same for the corresponding
 *	  OBP device node.
 *
 *	- Find the virtual-device node corresponding to a channel-endpoint
 *	  by walking backwards. Then obtain the values for the 'name' and
 *	  'cfg-handle' properties.
 *
 *	- Walk all the children of the cnex, find a matching dip which
 *	  has the same 'name' and 'reg' property values.
 *
 *	- The channels that have no corresponding device driver are
 *	  treated as if they  correspond to the cnex driver,
 *	  that is, return cnex dip for them. This means, the
 *	  cnex acts as an umbrella device driver. Note, this is
 *	  for 'intrstat' statistics purposes only. As a result of this,
 *	  the 'intrstat' shows cnex as the device that is servicing the
 *	  interrupts corresponding to these channels.
 *
 *	  For now, only one such case is known, that is, the channels that
 *	  are used by the "domain-services".
 */
static dev_info_t *
cnex_find_chan_dip(dev_info_t *dip, uint64_t chan_id,
    md_t *mdp, mde_cookie_t mde)
{
	int listsz;
	int num_nodes;
	int num_devs;
	uint64_t cfghdl;
	char *md_name;
	mde_cookie_t *listp;
	dev_info_t *cdip = NULL;

	num_nodes = md_node_count(mdp);
	ASSERT(num_nodes > 0);
	listsz = num_nodes * sizeof (mde_cookie_t);
	listp = (mde_cookie_t *)kmem_zalloc(listsz, KM_SLEEP);

	num_devs = md_scan_dag(mdp, mde, md_find_name(mdp, "virtual-device"),
	    md_find_name(mdp, "back"), listp);
	ASSERT(num_devs <= 1);
	if (num_devs <= 0) {
		DWARN("cnex_find_chan_dip:channel(0x%llx): "
		    "No virtual-device found\n", chan_id);
		goto fdip_exit;
	}
	if (md_get_prop_str(mdp, listp[0], "name", &md_name) != 0) {
		DWARN("cnex_find_chan_dip:channel(0x%llx): "
		    "name property not found\n", chan_id);
		goto fdip_exit;
	}

	D1("cnex_find_chan_dip: channel(0x%llx): virtual-device "
	    "name property value = %s\n", chan_id, md_name);

	if (md_get_prop_val(mdp, listp[0], "cfg-handle", &cfghdl) != 0) {
		DWARN("cnex_find_chan_dip:channel(0x%llx): virtual-device's "
		    "cfg-handle property not found\n", chan_id);
		goto fdip_exit;
	}

	D1("cnex_find_chan_dip:channel(0x%llx): virtual-device cfg-handle "
	    " property value = 0x%x\n", chan_id, cfghdl);

	for (cdip = ddi_get_child(dip); cdip != NULL;
	    cdip = ddi_get_next_sibling(cdip)) {

		int *cnex_regspec;
		uint32_t reglen;
		char	*dev_name;

		if (ddi_prop_lookup_string(DDI_DEV_T_ANY, cdip,
		    DDI_PROP_DONTPASS, "name",
		    &dev_name) != DDI_PROP_SUCCESS) {
			DWARN("cnex_find_chan_dip: name property not"
			    " found for dip(0x%p)\n", cdip);
			continue;
		}
		if (strcmp(md_name, dev_name) != 0) {
			ddi_prop_free(dev_name);
			continue;
		}
		ddi_prop_free(dev_name);
		if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, cdip,
		    DDI_PROP_DONTPASS, "reg",
		    &cnex_regspec, &reglen) != DDI_SUCCESS) {
			DWARN("cnex_find_chan_dip: reg property not"
			    " found for dip(0x%p)\n", cdip);
			continue;
		}
		if (*cnex_regspec == cfghdl) {
			D1("cnex_find_chan_dip:channel(0x%llx): found "
			    "dip(0x%p) drvname=%s\n", chan_id, cdip,
			    ddi_driver_name(cdip));
			ddi_prop_free(cnex_regspec);
			break;
		}
		ddi_prop_free(cnex_regspec);
	}

fdip_exit:
	if (cdip == NULL) {
		/*
		 * If a virtual-device node exists but no dip found,
		 * then for now print a DEBUG error message only.
		 */
		if (num_devs > 0) {
			DERR("cnex_find_chan_dip:channel(0x%llx): "
			    "No device found\n", chan_id);
		}

		/* If no dip was found, return cnex device's dip. */
		cdip = dip;
	}

	kmem_free(listp, listsz);
	D1("cnex_find_chan_dip:channel(0x%llx): returning dip=0x%p\n",
	    chan_id, cdip);
	return (cdip);
}