Exemplo n.º 1
0
/*
 * Validate that the device in hand is indeed virtio network device
 */
static int
virtio_validate_pcidev(dev_info_t *dip)
{
	ddi_acc_handle_t	pcihdl;
	int			rc;

	rc = pci_config_setup(dip, &pcihdl);
	if (rc != DDI_SUCCESS) {
		return (DDI_FAILURE);
	}

	if (pci_config_get16(pcihdl, PCI_CONF_VENID) != VIRTIO_PCI_VENDOR) {
		cmn_err(CE_WARN, "Incorrect PCI vendor id");
		rc = DDI_FAILURE;
	}

	uint16_t devid = pci_config_get16(pcihdl, PCI_CONF_DEVID);

	if ((devid < VIRTIO_PCI_DEVID_MIN) && (devid > VIRTIO_PCI_DEVID_MAX)) {
		cmn_err(CE_WARN, "Incorrect PCI device id");
		rc = DDI_FAILURE;
	}

	if (pci_config_get16(pcihdl, PCI_CONF_REVID) != VIRTIO_PCI_REV_ABIV0) {
		cmn_err(CE_WARN, "Unsupported virtio ABI detected");
		rc = DDI_FAILURE;
	}

	pci_config_teardown(&pcihdl);
	return (rc);
}
Exemplo n.º 2
0
/*
 * ppb_save_config_regs
 *
 * This routine saves the state of the configuration registers of all
 * the child nodes of each PBM.
 *
 * used by: ppb_detach() on suspends
 *
 * return value: none
 */
static void
ppb_save_config_regs(ppb_devstate_t *ppb_p)
{
	int i;
	dev_info_t *dip;
	ddi_acc_handle_t config_handle;

	for (i = 0, dip = ddi_get_child(ppb_p->dip); dip != NULL;
	    i++, dip = ddi_get_next_sibling(dip)) {

		if (pci_config_setup(dip, &config_handle) != DDI_SUCCESS) {
			cmn_err(CE_WARN, "%s%d: can't config space for %s%d\n",
			    ddi_driver_name(ppb_p->dip),
			    ddi_get_instance(ppb_p->dip),
			    ddi_driver_name(dip),
			    ddi_get_instance(dip));
			continue;
		}

		ppb_p->config_state[i].dip = dip;
		ppb_p->config_state[i].command =
		    pci_config_get16(config_handle, PCI_CONF_COMM);
		pci_config_teardown(&config_handle);
	}
	ppb_p->config_state_index = i;
}
Exemplo n.º 3
0
/*
 * detach entry point:
 */
static int
pmubus_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
{
	int instance = ddi_get_instance(dip);
	pmubus_devstate_t *pmubusp = ddi_get_soft_state(per_pmubus_state,
	    instance);

	switch (cmd) {
	case DDI_DETACH:
		mutex_destroy(&pmubusp->pmubus_reg_access_lock);

		/* Tear down our register mappings */
		pci_config_teardown(&pmubusp->pmubus_reghdl);

		/* Free our ranges property */
		kmem_free(pmubusp->pmubus_rangep, pmubusp->pmubus_rnglen);

		/* Free the register property */
		kmem_free(pmubusp->pmubus_regp, pmubusp->pmubus_reglen);

		ddi_soft_state_free(per_pmubus_state, instance);
		break;

	case DDI_SUSPEND:
	default:
		break;
	}

	return (DDI_SUCCESS);
}
Exemplo n.º 4
0
void
npe_enable_htmsi_children(dev_info_t *dip)
{
	dev_info_t *cdip = ddi_get_child(dip);
	ddi_acc_handle_t cfg_hdl;

	if (!npe_enable_htmsi_flag)
		return;

	/*
	 * Hypertransport MSI remapping only applies to AMD CPUs using
	 * Hypertransport (K8 and above) and not other platforms with non-AMD
	 * CPUs that may be using Hypertransport internally in the chipset(s)
	 */
	if (!(cpuid_getvendor(CPU) == X86_VENDOR_AMD &&
	    cpuid_getfamily(CPU) >= 0xf))
		return;

	for (; cdip != NULL; cdip = ddi_get_next_sibling(cdip)) {
		if (pci_config_setup(cdip, &cfg_hdl) != DDI_SUCCESS) {
			cmn_err(CE_NOTE, "!npe_enable_htmsi_children: "
			    "pci_config_setup failed for %s",
			    ddi_node_name(cdip));
			return;
		}

		(void) npe_enable_htmsi(cfg_hdl);
		pci_config_teardown(&cfg_hdl);
	}
}
Exemplo n.º 5
0
/*
 * pcmu_init_child
 *
 * This function is called from our control ops routine on a
 * DDI_CTLOPS_INITCHILD request.  It builds and sets the device's
 * parent private data area.
 *
 * used by: pcmu_ctlops()
 *
 * return value: none
 */
int
pcmu_init_child(pcmu_t *pcmu_p, dev_info_t *child)
{
	char name[10];
	ddi_acc_handle_t config_handle;
	uint8_t bcr;
	uint8_t header_type;

	if (name_child(child, name, 10) != DDI_SUCCESS)
		return (DDI_FAILURE);
	ddi_set_name_addr(child, name);

	PCMU_DBG2(PCMU_DBG_PWR, ddi_get_parent(child),
	    "INITCHILD: config regs setup for %s@%s\n",
	    ddi_node_name(child), ddi_get_name_addr(child));

	/*
	 * Map the child configuration space to for initialization.
	 * We assume the obp will do the following in the devices
	 * config space:
	 *
	 *	Set the latency-timer register to values appropriate
	 *	for the devices on the bus (based on other devices
	 *	MIN_GNT and MAX_LAT registers.
	 *
	 *	Set the fast back-to-back enable bit in the command
	 *	register if it's supported and all devices on the bus
	 *	have the capability.
	 *
	 */
	if (pci_config_setup(child, &config_handle) != DDI_SUCCESS) {
		ddi_set_name_addr(child, NULL);
		return (DDI_FAILURE);
	}

	/*
	 * Determine the configuration header type.
	 */
	header_type = pci_config_get8(config_handle, PCI_CONF_HEADER);
	PCMU_DBG2(PCMU_DBG_INIT_CLD, pcmu_p->pcmu_dip, "%s: header_type=%x\n",
	    ddi_driver_name(child), header_type);

	/*
	 * If the device has a bus control register then program it
	 * based on the settings in the command register.
	 */
	if ((header_type & PCI_HEADER_TYPE_M) == PCI_HEADER_ONE) {
		bcr = pci_config_get8(config_handle, PCI_BCNF_BCNTRL);
		if (pcmu_command_default & PCI_COMM_PARITY_DETECT)
			bcr |= PCI_BCNF_BCNTRL_PARITY_ENABLE;
		if (pcmu_command_default & PCI_COMM_SERR_ENABLE)
			bcr |= PCI_BCNF_BCNTRL_SERR_ENABLE;
		bcr |= PCI_BCNF_BCNTRL_MAST_AB_MODE;
		pci_config_put8(config_handle, PCI_BCNF_BCNTRL, bcr);
	}

	pci_config_teardown(&config_handle);
	return (DDI_SUCCESS);
}
Exemplo n.º 6
0
/*
 * intercept certain interrupt services to handle special cases
 */
static int
ppb_intr_ops(dev_info_t *pdip, dev_info_t *rdip, ddi_intr_op_t intr_op,
    ddi_intr_handle_impl_t *hdlp, void *result)
{
	ddi_acc_handle_t cfg_hdl;
	int rv = DDI_SUCCESS;

	if (intr_op != DDI_INTROP_SUPPORTED_TYPES)
		return (i_ddi_intr_ops(pdip, rdip, intr_op, hdlp, result));

	DDI_INTR_NEXDBG((CE_CONT,
	    "ppb_intr_ops: pdip 0x%p, rdip 0x%p, op %x handle 0x%p\n",
	    (void *)pdip, (void *)rdip, intr_op, (void *)hdlp));

	/* Fixed interrupt is supported by default */
	*(int *)result = DDI_INTR_TYPE_FIXED;

	if (ppb_support_msi == -1) {
		DDI_INTR_NEXDBG((CE_CONT,
		    "ppb_intr_ops: MSI is not allowed\n"));
		goto OUT;
	}

	if (ppb_support_msi == 1) {
		DDI_INTR_NEXDBG((CE_CONT,
		    "ppb_intr_ops: MSI is always allowed\n"));
		rv = i_ddi_intr_ops(pdip, rdip, intr_op, hdlp, result);
		goto OUT;
	}

	if (pci_config_setup(pdip, &cfg_hdl) != DDI_SUCCESS) {
		DDI_INTR_NEXDBG((CE_CONT,
		    "ppb_intr_ops: pci_config_setup() failed\n"));
		goto OUT;
	}

	/*
	 * check for hypertransport msi mapping capability
	 */
	if (ppb_ht_msimap_check(cfg_hdl)) {
		DDI_INTR_NEXDBG((CE_CONT,
		    "ppb_intr_ops: HT MSI mapping enabled\n"));
		rv = i_ddi_intr_ops(pdip, rdip, intr_op, hdlp, result);
	}

	/*
	 * if we add failure conditions after pci_config_setup, move this to
	 * OUT and use an extra flag to indicate the need to teardown cfg_hdl
	 */
	pci_config_teardown(&cfg_hdl);

OUT:
	DDI_INTR_NEXDBG((CE_CONT,
	    "ppb_intr_ops: rdip 0x%p, returns supported types: 0x%x\n",
	    (void *)rdip, *(int *)result));
	return (rv);
}
Exemplo n.º 7
0
/*
 * Restore memory controller's configuration and release resources.
 */
static void
fipe_mc_fini(void)
{
	if (fipe_mc_ctrl.mc_initialized) {
		fipe_mc_restore();
		pci_config_teardown(&fipe_mc_ctrl.mc_pci_hdl);
		ndi_rele_devi(fipe_mc_ctrl.mc_dip);
		fipe_mc_ctrl.mc_initialized = B_FALSE;
	}
	bzero(&fipe_mc_ctrl, sizeof (fipe_mc_ctrl));
}
Exemplo n.º 8
0
/*
 * audioixp_unmap_regs()
 *
 * Description:
 *	This routine unmaps control registers.
 *
 * Arguments:
 *	audioixp_state_t	*state		The device's state structure
 */
static void
audioixp_unmap_regs(audioixp_state_t *statep)
{
	if (statep->regsh) {
		ddi_regs_map_free(&statep->regsh);
	}

	if (statep->pcih) {
		pci_config_teardown(&statep->pcih);
	}
}
Exemplo n.º 9
0
/*
 * Examine devinfo node to determine if it is a PCI-PCI bridge
 *
 * Returns:
 *	0 if not a bridge or error
 *	1 if a bridge
 */
static int
psm_is_pci_bridge(dev_info_t *dip)
{
	ddi_acc_handle_t cfg_handle;
	int rv = 0;

	if (pci_config_setup(dip, &cfg_handle) == DDI_SUCCESS) {
		rv = ((pci_config_get8(cfg_handle, PCI_CONF_BASCLASS) ==
		    PCI_CLASS_BRIDGE) && (pci_config_get8(cfg_handle,
		    PCI_CONF_SUBCLASS) == PCI_BRIDGE_PCI));
		pci_config_teardown(&cfg_handle);
	}

	return (rv);
}
Exemplo n.º 10
0
static int
agp_target_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
{
	agp_target_softstate_t *softstate;
	int instance;
	int status;

	if (cmd != DDI_ATTACH)
		return (DDI_FAILURE);

	instance = ddi_get_instance(dip);

	if (ddi_soft_state_zalloc(agptarget_glob_soft_handle, instance) !=
	    DDI_SUCCESS)
		return (DDI_FAILURE);

	softstate = ddi_get_soft_state(agptarget_glob_soft_handle, instance);
	mutex_init(&softstate->tsoft_lock, NULL, MUTEX_DRIVER, NULL);
	softstate->tsoft_dip = dip;
	status = pci_config_setup(dip, &softstate->tsoft_pcihdl);
	if (status != DDI_SUCCESS) {
		ddi_soft_state_free(agptarget_glob_soft_handle, instance);
		return (DDI_FAILURE);
	}

	softstate->tsoft_devid = pci_config_get32(softstate->tsoft_pcihdl,
	    PCI_CONF_VENID);
	softstate->tsoft_acaptr = agp_target_cap_find(softstate->tsoft_pcihdl);
	if (softstate->tsoft_acaptr == 0) {
		/* Make a correction for some Intel chipsets */
		if ((softstate->tsoft_devid & VENDOR_ID_MASK) ==
		    INTEL_VENDOR_ID)
			softstate->tsoft_acaptr = AGP_CAP_OFF_DEF;
		else
			return (DDI_FAILURE);
	}

	status = ddi_create_minor_node(dip, AGPTARGET_NAME, S_IFCHR,
	    INST2NODENUM(instance), DDI_NT_AGP_TARGET, 0);

	if (status != DDI_SUCCESS) {
		pci_config_teardown(&softstate->tsoft_pcihdl);
		ddi_soft_state_free(agptarget_glob_soft_handle, instance);
		return (DDI_FAILURE);
	}

	return (DDI_SUCCESS);
}
Exemplo n.º 11
0
void
npe_enable_htmsi_children(dev_info_t *dip)
{
	dev_info_t *cdip = ddi_get_child(dip);
	ddi_acc_handle_t cfg_hdl;

	for (; cdip != NULL; cdip = ddi_get_next_sibling(cdip)) {
		if (pci_config_setup(cdip, &cfg_hdl) != DDI_SUCCESS) {
			cmn_err(CE_NOTE, "!npe_enable_htmsi_children: "
			    "pci_config_setup failed for %s",
			    ddi_node_name(cdip));
		}

		(void) npe_enable_htmsi(cfg_hdl);
		pci_config_teardown(&cfg_hdl);
	}
}
Exemplo n.º 12
0
/*ARGSUSED*/
static int
agp_target_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
{
	int instance;
	agp_target_softstate_t *softstate;

	if (cmd != DDI_DETACH)
		return (DDI_FAILURE);

	instance = ddi_get_instance(dip);

	softstate = ddi_get_soft_state(agptarget_glob_soft_handle, instance);

	ddi_remove_minor_node(dip, AGPTARGET_NAME);
	pci_config_teardown(&softstate->tsoft_pcihdl);
	mutex_destroy(&softstate->tsoft_lock);
	ddi_soft_state_free(agptarget_glob_soft_handle, instance);
	return (DDI_SUCCESS);
}
Exemplo n.º 13
0
/*
 * save config regs for HyperTransport devices without drivers of classes:
 * memory controller and hostbridge
 */
int
npe_save_htconfig_children(dev_info_t *dip)
{
	dev_info_t *cdip = ddi_get_child(dip);
	ddi_acc_handle_t cfg_hdl;
	uint16_t ptr;
	int rval = DDI_SUCCESS;
	uint8_t cl, scl;

	for (; cdip != NULL; cdip = ddi_get_next_sibling(cdip)) {
		if (ddi_driver_major(cdip) != DDI_MAJOR_T_NONE)
			continue;

		if (pci_config_setup(cdip, &cfg_hdl) != DDI_SUCCESS)
			return (DDI_FAILURE);

		cl = pci_config_get8(cfg_hdl, PCI_CONF_BASCLASS);
		scl = pci_config_get8(cfg_hdl, PCI_CONF_SUBCLASS);

		if (((cl == PCI_CLASS_MEM && scl == PCI_MEM_RAM) ||
		    (cl == PCI_CLASS_BRIDGE && scl == PCI_BRIDGE_HOST)) &&
		    pci_htcap_locate(cfg_hdl, 0, 0, &ptr) == DDI_SUCCESS) {

			if (pci_save_config_regs(cdip) != DDI_SUCCESS) {
				cmn_err(CE_WARN, "Failed to save HT config "
				    "regs for %s\n", ddi_node_name(cdip));
				rval = DDI_FAILURE;

			} else if (ddi_prop_update_int(DDI_DEV_T_NONE, cdip,
			    "htconfig-saved", 1) != DDI_SUCCESS) {
				cmn_err(CE_WARN, "Failed to set htconfig-saved "
				    "property for %s\n", ddi_node_name(cdip));
				rval = DDI_FAILURE;
			}
		}

		pci_config_teardown(&cfg_hdl);
	}

	return (rval);
}
Exemplo n.º 14
0
/*
 * ppb_restore_config_regs
 *
 * This routine restores the state of the configuration registers of all
 * the child nodes of each PBM.
 *
 * used by: ppb_attach() on resume
 *
 * return value: none
 */
static void
ppb_restore_config_regs(ppb_devstate_t *ppb_p)
{
	int i;
	dev_info_t *dip;
	ddi_acc_handle_t config_handle;

	for (i = 0; i < ppb_p->config_state_index; i++) {
		dip = ppb_p->config_state[i].dip;
		if (pci_config_setup(dip, &config_handle) != DDI_SUCCESS) {
			cmn_err(CE_WARN, "%s%d: can't config space for %s%d\n",
			    ddi_driver_name(ppb_p->dip),
			    ddi_get_instance(ppb_p->dip),
			    ddi_driver_name(dip),
			    ddi_get_instance(dip));
			continue;
		}
		pci_config_put16(config_handle, PCI_CONF_COMM,
		    ppb_p->config_state[i].command);
		pci_config_teardown(&config_handle);
	}
}
Exemplo n.º 15
0
/*
 * Remove PM state for nexus.
 */
static void
ppb_pwr_teardown(ppb_devstate_t *ppb, dev_info_t *dip)
{
	int low_lvl;

	/*
	 * Determine the lowest power level supported.
	 */
	if (ppb->ppb_pwr_p->pwr_flags & PCI_PWR_B3_CAPABLE) {
		low_lvl = PM_LEVEL_B3;
	} else {
		low_lvl = PM_LEVEL_B2;
	}

	if (pm_lower_power(dip, PCI_PM_COMP_0, low_lvl) != DDI_SUCCESS) {
		cmn_err(CE_WARN, "%s%d failed to lower power",
		    ddi_driver_name(dip), ddi_get_instance(dip));
	}

	pci_config_teardown(&ppb->ppb_conf_hdl);
	mutex_destroy(&ppb->ppb_pwr_p->pwr_mutex);
	kmem_free(ppb->ppb_pwr_p, sizeof (pci_pwr_t));

	if (ddi_prop_remove(DDI_DEV_T_NONE, dip, "pm-components") !=
	    DDI_PROP_SUCCESS) {
		cmn_err(CE_WARN, "%s%d unable to remove prop pm-components",
		    ddi_driver_name(dip), ddi_get_instance(dip));
	}

	if (ddi_prop_remove(DDI_DEV_T_NONE, dip,
	    "pm-want-child-notification?") != DDI_PROP_SUCCESS) {
		cmn_err(CE_WARN,
		    "%s%d unable to remove prop pm-want_child_notification?",
		    ddi_driver_name(dip), ddi_get_instance(dip));
	}
}
Exemplo n.º 16
0
/*
 * The VGA device could be under a subtractive PCI bridge on some systems.
 * Though the PCI_BCNF_BCNTRL_VGA_ENABLE bit is not set on such subtractive
 * PCI bridge, the subtractive PCI bridge can forward VGA access if no other
 * agent claims the access.
 * The vga_enable element in param acts as a flag, if not set, ignore the
 * checking for the PCI_BCNF_BCNTRL_VGA_ENABLE bit of the PCI bridge during
 * the search.
 */
static int
find_fb_dev(dev_info_t *dip, void *param)
{
	struct find_fb_dev_param *p = param;
	char *dev_type;
	dev_info_t *pdip;
	char *parent_type;

	if (dip == ddi_root_node())
		return (DDI_WALK_CONTINUE);

	if (ddi_prop_lookup_string(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS,
	    "device_type", &dev_type) != DDI_SUCCESS)
		return (DDI_WALK_PRUNECHILD);

	if ((strcmp(dev_type, "isa") == 0) || (strcmp(dev_type, "eisa") == 0)) {
		ddi_prop_free(dev_type);
		return (DDI_WALK_CONTINUE);
	}

	if ((strcmp(dev_type, "pci") == 0) ||
	    (strcmp(dev_type, "pciex") == 0)) {
		ddi_acc_handle_t pci_conf;
		uint16_t data16;
		char *nodename;

		ddi_prop_free(dev_type);

		if (!p->vga_enable)
			return (DDI_WALK_CONTINUE);

		nodename = ddi_node_name(dip);

		/*
		 * If the node is not a PCI-to-PCI bridge, continue traversing
		 * (it could be the root node), otherwise, check for the
		 * VGAEnable bit to be set in the Bridge Control Register.
		 */
		if (strcmp(nodename, "pci") == 0) {
			if (is_pci_bridge(dip) == B_FALSE)
				return (DDI_WALK_CONTINUE);
		}

		if (i_ddi_attach_node_hierarchy(dip) != DDI_SUCCESS)
			return (DDI_WALK_PRUNECHILD);

		if (pci_config_setup(dip, &pci_conf) != DDI_SUCCESS)
			return (DDI_WALK_PRUNECHILD);

		data16 = pci_config_get16(pci_conf, PCI_BCNF_BCNTRL);
		pci_config_teardown(&pci_conf);

		if (data16 & PCI_BCNF_BCNTRL_VGA_ENABLE)
			return (DDI_WALK_CONTINUE);

		return (DDI_WALK_PRUNECHILD);
	}

	if (strcmp(dev_type, "display") != 0) {
		ddi_prop_free(dev_type);
		return (DDI_WALK_CONTINUE);
	}

	ddi_prop_free(dev_type);

	if ((pdip = ddi_get_parent(dip)) == NULL)
		return (DDI_WALK_PRUNECHILD);

	if (ddi_prop_lookup_string(DDI_DEV_T_ANY, pdip, DDI_PROP_DONTPASS,
	    "device_type", &parent_type) != DDI_SUCCESS)
		return (DDI_WALK_PRUNECHILD);

	if ((strcmp(parent_type, "isa") == 0) ||
	    (strcmp(parent_type, "eisa") == 0)) {
		p->found_dip = dip;
		ddi_prop_free(parent_type);
		return (DDI_WALK_TERMINATE);
	}

	if ((strcmp(parent_type, "pci") == 0) ||
	    (strcmp(parent_type, "pciex") == 0)) {
		ddi_acc_handle_t pci_conf;
		uint16_t data16;

		ddi_prop_free(parent_type);

		if (i_ddi_attach_node_hierarchy(dip) != DDI_SUCCESS)
			return (DDI_WALK_PRUNECHILD);

		if (pci_config_setup(dip, &pci_conf) != DDI_SUCCESS)
			return (DDI_WALK_PRUNECHILD);

		data16 = pci_config_get16(pci_conf, PCI_CONF_COMM);
		pci_config_teardown(&pci_conf);

		if (!(data16 & PCI_COMM_IO))
			return (DDI_WALK_PRUNECHILD);

		p->found_dip = dip;
		return (DDI_WALK_TERMINATE);
	}

	ddi_prop_free(parent_type);
	return (DDI_WALK_PRUNECHILD);
}
Exemplo n.º 17
0
/*
 * Controller specific initialization
 */
uint_t
sil3xxx_init_controller(dev_info_t *dip,
	/* LINTED */
	ushort_t vendor_id, ushort_t device_id)
{
	ddi_acc_handle_t  pci_conf_handle; /* pci config space handle */
	uint8_t cache_lnsz, frrc = 0;
	uint32_t fifo_cnt_ctl;
	int ports, i;

#ifdef	DEBUG
	/* LINTED */
	ushort_t sfiscfg_val;
#endif

	/*
	 * Sil3114, Sil3512, Sil3112
	 * We want to perform this initialization only once per entire
	 * pciide controller (all channels)
	 */
	if (ddi_prop_exists(DDI_DEV_T_ANY, ddi_get_parent(dip),
		DDI_PROP_DONTPASS, "sil3xxx-initialized")) {
		return (TRUE);
	}

	if (pci_config_setup(ddi_get_parent(dip), &pci_conf_handle) !=
	    DDI_SUCCESS) {
		cmn_err(CE_WARN,
		    "sil3xxx_init_controller: Can't do pci_config_setup\n");
		return (FALSE);
	}

	/*
	 * Sil3114/3512/3112 incorrectly change between MR and back to
	 * MRM for same transaction, which violates the PCI spec and can
	 * lead to incorrect data reads.  The workaround
	 * is to set bits 2:0 in the FIFO count and control register so
	 * that its value, a multiple of 32 bytes starting at 32, not 0,
	 * is greater or equal to the cacheline size, a multiple of 4
	 * bytes.  This will prevent any reads until the FIFO free space
	 * is greater than a cacheline size, ensuring only MRM is issued.
	 */

	cache_lnsz = pci_config_get8(pci_conf_handle, PCI_CONF_CACHE_LINESZ);

	/*
	 * The cache line is specified in 32-bit words, so multiply by 4
	 * to get bytes.  Then divide by 32 bytes, the granularity of the
	 * FIFO control bits 2:0.  Add 1 if there is any remainder to
	 * account for a partial 32-byte block, then subtract 1 since for
	 * FIFO controls bits 2:0, 0 corresponds to 32, 1 corresponds to
	 * 64, and so on.  The calculation is expanded for clarity.
	 */
	if (cache_lnsz != 0) {
		frrc = (cache_lnsz * 4 / 32) +
			(((cache_lnsz * 4) % 32) ? 1 : 0) - 1;
	}

	if (device_id == SIL3114_DEVICE_ID) {
		ports = 4;
	} else {
		ports = 2;
	}

	/*
	 * The following BAR5 registers are accessed via an indirect register
	 * in the PCI configuration space rather than mapping BAR5.
	 */
	for (i = 0; i < ports; i++) {
		GET_BAR5_INDIRECT(pci_conf_handle, fifocntctl[i],
		    fifo_cnt_ctl);
		fifo_cnt_ctl = (fifo_cnt_ctl & ~0x7) | (frrc & 0x7);
		PUT_BAR5_INDIRECT(pci_conf_handle, fifocntctl[i],
		    fifo_cnt_ctl);
		/*
		 * Correct default setting for FIS0cfg
		 */
#ifdef	DEBUG
		GET_BAR5_INDIRECT(pci_conf_handle, sfiscfg[i],
			sfiscfg_val);
		ADBG_WARN(("sil3xxx_init_controller: old val SFISCfg "
			"ch%d: %x\n", i, sfiscfg_val));
#endif
		PUT_BAR5_INDIRECT(pci_conf_handle, sfiscfg[i],
			SFISCFG_ERRATA);
#ifdef	DEBUG
		GET_BAR5_INDIRECT(pci_conf_handle, sfiscfg[i],
			sfiscfg_val);
		ADBG_WARN(("sil3xxx_init_controller: new val SFISCfg "
			"ch%d: %x\n", i, sfiscfg_val));
#endif
	}

	/* Now tear down the pci config setup */
	pci_config_teardown(&pci_conf_handle);

	/* Create property indicating that initialization was done */
	(void) ddi_prop_update_int(DDI_DEV_T_NONE, ddi_get_parent(dip),
		"sil3xxx-initialized", 1);

	return (TRUE);
}
Exemplo n.º 18
0
/*ARGSUSED*/
static int
ppb_attach(dev_info_t *devi, ddi_attach_cmd_t cmd)
{
	dev_info_t *root = ddi_root_node();
	int instance;
	ppb_devstate_t *ppb;
	dev_info_t *pdip;
	ddi_acc_handle_t config_handle;
	char *bus;
	int ret;

	switch (cmd) {
	case DDI_ATTACH:

		/*
		 * Make sure the "device_type" property exists.
		 */
		(void) ddi_prop_update_string(DDI_DEV_T_NONE, devi,
		    "device_type", "pci");

		/*
		 * Allocate and get soft state structure.
		 */
		instance = ddi_get_instance(devi);
		if (ddi_soft_state_zalloc(ppb_state, instance) != DDI_SUCCESS)
			return (DDI_FAILURE);
		ppb = ddi_get_soft_state(ppb_state, instance);
		ppb->dip = devi;

		/*
		 * don't enable ereports if immediate child of npe
		 */
		if (strcmp(ddi_driver_name(ddi_get_parent(devi)), "npe") == 0)
			ppb->ppb_fmcap = DDI_FM_ERRCB_CAPABLE |
			    DDI_FM_ACCCHK_CAPABLE | DDI_FM_DMACHK_CAPABLE;
		else
			ppb->ppb_fmcap = DDI_FM_EREPORT_CAPABLE |
			    DDI_FM_ERRCB_CAPABLE | DDI_FM_ACCCHK_CAPABLE |
			    DDI_FM_DMACHK_CAPABLE;

		ddi_fm_init(devi, &ppb->ppb_fmcap, &ppb->ppb_fm_ibc);
		mutex_init(&ppb->ppb_mutex, NULL, MUTEX_DRIVER, NULL);
		mutex_init(&ppb->ppb_err_mutex, NULL, MUTEX_DRIVER,
		    (void *)ppb->ppb_fm_ibc);
		mutex_init(&ppb->ppb_peek_poke_mutex, NULL, MUTEX_DRIVER,
		    (void *)ppb->ppb_fm_ibc);

		if (ppb->ppb_fmcap & (DDI_FM_ERRCB_CAPABLE |
		    DDI_FM_EREPORT_CAPABLE))
			pci_ereport_setup(devi);
		if (ppb->ppb_fmcap & DDI_FM_ERRCB_CAPABLE)
			ddi_fm_handler_register(devi, ppb_fm_callback, NULL);

		if (pci_config_setup(devi, &config_handle) != DDI_SUCCESS) {
			if (ppb->ppb_fmcap & DDI_FM_ERRCB_CAPABLE)
				ddi_fm_handler_unregister(devi);
			if (ppb->ppb_fmcap & (DDI_FM_ERRCB_CAPABLE |
			    DDI_FM_EREPORT_CAPABLE))
				pci_ereport_teardown(devi);
			ddi_fm_fini(devi);
			ddi_soft_state_free(ppb_state, instance);
			return (DDI_FAILURE);
		}

		ppb->parent_bus = PCIE_PCIECAP_DEV_TYPE_PCI_PSEUDO;
		for (pdip = ddi_get_parent(devi); pdip && (pdip != root) &&
		    (ppb->parent_bus != PCIE_PCIECAP_DEV_TYPE_PCIE_DEV);
		    pdip = ddi_get_parent(pdip)) {
			if (ddi_prop_lookup_string(DDI_DEV_T_ANY, pdip,
			    DDI_PROP_DONTPASS, "device_type", &bus) !=
			    DDI_PROP_SUCCESS)
				break;

			if (strcmp(bus, "pciex") == 0)
				ppb->parent_bus =
				    PCIE_PCIECAP_DEV_TYPE_PCIE_DEV;

			ddi_prop_free(bus);
		}

		if (ppb_support_ht_msimap == 1)
			(void) ppb_ht_msimap_set(config_handle,
			    HT_MSIMAP_ENABLE);
		else if (ppb_support_ht_msimap == -1)
			(void) ppb_ht_msimap_set(config_handle,
			    HT_MSIMAP_DISABLE);

		pci_config_teardown(&config_handle);

		/*
		 * Initialize hotplug support on this bus.
		 */
		if (ppb->parent_bus == PCIE_PCIECAP_DEV_TYPE_PCIE_DEV)
			ret = pcie_init(devi, NULL);
		else
			ret = pcihp_init(devi);

		if (ret != DDI_SUCCESS) {
			cmn_err(CE_WARN,
			    "pci: Failed to setup hotplug framework");
			(void) ppb_detach(devi, DDI_DETACH);
			return (ret);
		}

		ddi_report_dev(devi);
		return (DDI_SUCCESS);

	case DDI_RESUME:

		/*
		 * Get the soft state structure for the bridge.
		 */
		ppb = ddi_get_soft_state(ppb_state, ddi_get_instance(devi));
		ppb_restore_config_regs(ppb);
		return (DDI_SUCCESS);

	default:
		break;
	}
	return (DDI_FAILURE);
}
Exemplo n.º 19
0
/*
 * Autoconfiguration entry points.
 */
int
efe_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
{
	ddi_acc_handle_t pci;
	int types;
	int count;
	int actual;
	uint_t pri;
	efe_t *efep;
	mac_register_t *macp;

	switch (cmd) {
	case DDI_ATTACH:
		break;

	case DDI_RESUME:
		efep = ddi_get_driver_private(dip);
		return (efe_resume(efep));

	default:
		return (DDI_FAILURE);
	}

	/*
	 * PCI configuration.
	 */
	if (pci_config_setup(dip, &pci) != DDI_SUCCESS) {
		efe_error(dip, "unable to setup PCI configuration!");
		return (DDI_FAILURE);
	}

	pci_config_put16(pci, PCI_CONF_COMM,
	    pci_config_get16(pci, PCI_CONF_COMM) | PCI_COMM_MAE | PCI_COMM_ME);

	pci_config_teardown(&pci);

	if (ddi_intr_get_supported_types(dip, &types)
	    != DDI_SUCCESS || !(types & DDI_INTR_TYPE_FIXED)) {
		efe_error(dip, "fixed interrupts not supported!");
		return (DDI_FAILURE);
	}

	if (ddi_intr_get_nintrs(dip, DDI_INTR_TYPE_FIXED, &count)
	    != DDI_SUCCESS || count != 1) {
		efe_error(dip, "no fixed interrupts available!");
		return (DDI_FAILURE);
	}

	/*
	 * Initialize soft state.
	 */
	efep = kmem_zalloc(sizeof (efe_t), KM_SLEEP);
	ddi_set_driver_private(dip, efep);

	efep->efe_dip = dip;

	if (ddi_regs_map_setup(dip, 1, (caddr_t *)&efep->efe_regs, 0, 0,
	    &efe_regs_acc_attr, &efep->efe_regs_acch) != DDI_SUCCESS) {
		efe_error(dip, "unable to setup register mapping!");
		goto failure;
	}

	efep->efe_rx_ring = efe_ring_alloc(efep->efe_dip, RXDESCL);
	if (efep->efe_rx_ring == NULL) {
		efe_error(efep->efe_dip, "unable to allocate rx ring!");
		goto failure;
	}

	efep->efe_tx_ring = efe_ring_alloc(efep->efe_dip, TXDESCL);
	if (efep->efe_tx_ring == NULL) {
		efe_error(efep->efe_dip, "unable to allocate tx ring!");
		goto failure;
	}

	if (ddi_intr_alloc(dip, &efep->efe_intrh, DDI_INTR_TYPE_FIXED, 0,
	    count, &actual, DDI_INTR_ALLOC_STRICT) != DDI_SUCCESS ||
	    actual != count) {
		efe_error(dip, "unable to allocate fixed interrupt!");
		goto failure;
	}

	if (ddi_intr_get_pri(efep->efe_intrh, &pri) != DDI_SUCCESS ||
	    pri >= ddi_intr_get_hilevel_pri()) {
		efe_error(dip, "unable to get valid interrupt priority!");
		goto failure;
	}

	mutex_init(&efep->efe_intrlock, NULL, MUTEX_DRIVER,
	    DDI_INTR_PRI(pri));

	mutex_init(&efep->efe_txlock, NULL, MUTEX_DRIVER,
	    DDI_INTR_PRI(pri));

	/*
	 * Initialize device.
	 */
	mutex_enter(&efep->efe_intrlock);
	mutex_enter(&efep->efe_txlock);

	efe_reset(efep);

	mutex_exit(&efep->efe_txlock);
	mutex_exit(&efep->efe_intrlock);

	/* Use factory address as default */
	efe_getaddr(efep, efep->efe_macaddr);

	/*
	 * Enable the ISR.
	 */
	if (ddi_intr_add_handler(efep->efe_intrh, efe_intr, efep, NULL)
	    != DDI_SUCCESS) {
		efe_error(dip, "unable to add interrupt handler!");
		goto failure;
	}

	if (ddi_intr_enable(efep->efe_intrh) != DDI_SUCCESS) {
		efe_error(dip, "unable to enable interrupt!");
		goto failure;
	}

	/*
	 * Allocate MII resources.
	 */
	if ((efep->efe_miih = mii_alloc(efep, dip, &efe_mii_ops)) == NULL) {
		efe_error(dip, "unable to allocate mii resources!");
		goto failure;
	}

	/*
	 * Allocate MAC resources.
	 */
	if ((macp = mac_alloc(MAC_VERSION)) == NULL) {
		efe_error(dip, "unable to allocate mac resources!");
		goto failure;
	}

	macp->m_type_ident = MAC_PLUGIN_IDENT_ETHER;
	macp->m_driver = efep;
	macp->m_dip = dip;
	macp->m_src_addr = efep->efe_macaddr;
	macp->m_callbacks = &efe_m_callbacks;
	macp->m_min_sdu = 0;
	macp->m_max_sdu = ETHERMTU;
	macp->m_margin = VLAN_TAGSZ;

	if (mac_register(macp, &efep->efe_mh) != 0) {
		efe_error(dip, "unable to register with mac!");
		goto failure;
	}
	mac_free(macp);

	ddi_report_dev(dip);

	return (DDI_SUCCESS);

failure:
	if (macp != NULL) {
		mac_free(macp);
	}

	if (efep->efe_miih != NULL) {
		mii_free(efep->efe_miih);
	}

	if (efep->efe_intrh != NULL) {
		(void) ddi_intr_disable(efep->efe_intrh);
		(void) ddi_intr_remove_handler(efep->efe_intrh);
		(void) ddi_intr_free(efep->efe_intrh);
	}

	mutex_destroy(&efep->efe_txlock);
	mutex_destroy(&efep->efe_intrlock);

	if (efep->efe_tx_ring != NULL) {
		efe_ring_free(&efep->efe_tx_ring);
	}
	if (efep->efe_rx_ring != NULL) {
		efe_ring_free(&efep->efe_rx_ring);
	}

	if (efep->efe_regs_acch != NULL) {
		ddi_regs_map_free(&efep->efe_regs_acch);
	}

	kmem_free(efep, sizeof (efe_t));

	return (DDI_FAILURE);
}
Exemplo n.º 20
0
static void
gfxp_check_for_console(dev_info_t *devi, struct vgatext_softc *softc,
	int pci_pcie_bus)
{
	ddi_acc_handle_t pci_conf;
	dev_info_t *pdevi;
	uint16_t data16;

	/*
	 * Based on Section 11.3, "PCI Display Subsystem Initialization",
	 * of the 1.1 PCI-to-PCI Bridge Architecture Specification
	 * determine if this is the boot console device.  First, see
	 * if the SBIOS has turned on PCI I/O for this device.  Then if
	 * this is PCI/PCI-E, verify the parent bridge has VGAEnable set.
	 */

	if (pci_config_setup(devi, &pci_conf) != DDI_SUCCESS) {
		cmn_err(CE_WARN,
		    MYNAME
		    ": can't get PCI conf handle");
		return;
	}

	data16 = pci_config_get16(pci_conf, PCI_CONF_COMM);
	if (data16 & PCI_COMM_IO)
		softc->flags |= GFXP_FLAG_CONSOLE;

	pci_config_teardown(&pci_conf);

	/* If IO not enabled or ISA/EISA, just return */
	if (!(softc->flags & GFXP_FLAG_CONSOLE) || !pci_pcie_bus)
		return;

	/*
	 * Check for VGA Enable in the Bridge Control register for all
	 * PCI/PCIEX parents.  If not set all the way up the chain,
	 * this cannot be the boot console.
	 */

	pdevi = ddi_get_parent(devi);
	while (pdevi) {
		int	error;
		ddi_acc_handle_t ppci_conf;
		char	*parent_type = NULL;

		error = ddi_prop_lookup_string(DDI_DEV_T_ANY, pdevi,
		    DDI_PROP_DONTPASS, "device_type", &parent_type);
		if (error != DDI_SUCCESS) {
			return;
		}

		/* Verify still on the PCI/PCIEX parent tree */
		if (!STREQ(parent_type, "pci") &&
		    !STREQ(parent_type, "pciex")) {
			ddi_prop_free(parent_type);
			return;
		}

		ddi_prop_free(parent_type);
		parent_type = NULL;

		if (pci_config_setup(pdevi, &ppci_conf) != DDI_SUCCESS) {
			/* No registers on root node, done with check */
			return;
		}

		data16 = pci_config_get16(ppci_conf, PCI_BCNF_BCNTRL);
		pci_config_teardown(&ppci_conf);

		if (!(data16 & PCI_BCNF_BCNTRL_VGA_ENABLE)) {
			softc->flags &= ~GFXP_FLAG_CONSOLE;
			return;
		}

		pdevi = ddi_get_parent(pdevi);
	}
}
Exemplo n.º 21
0
/*
 * Removes px_ih_t from the ino's link list.
 * uses hardware mutex to lock out interrupt threads.
 * Side effects: interrupt belongs to that ino is turned off on return.
 * if we are sharing PX slot with other inos, the caller needs
 * to turn it back on.
 */
int
px_ib_ino_rem_intr(px_t *px_p, px_ino_pil_t *ipil_p, px_ih_t *ih_p)
{
    px_ino_t	*ino_p = ipil_p->ipil_ino_p;
    devino_t	ino = ino_p->ino_ino;
    sysino_t	sysino = ino_p->ino_sysino;
    dev_info_t	*dip = px_p->px_dip;
    px_ih_t		*ih_lst = ipil_p->ipil_ih_head;
    int		i, ret = DDI_SUCCESS;

    ASSERT(MUTEX_HELD(&ino_p->ino_ib_p->ib_ino_lst_mutex));

    DBG(DBG_IB, px_p->px_dip, "px_ib_ino_rem_intr ino=%x\n",
        ino_p->ino_ino);

    /* Wait on pending interrupt */
    if ((ret = px_ib_intr_pend(dip, sysino)) != DDI_SUCCESS) {
        cmn_err(CE_WARN, "%s%d: px_ib_ino_rem_intr: pending "
                "sysino 0x%lx(ino 0x%x) timeout",
                ddi_driver_name(dip), ddi_get_instance(dip),
                sysino, ino);
    }

    /*
     * If the interrupt was previously blocked (left in pending state)
     * because of jabber we need to clear the pending state in case the
     * jabber has gone away.
     */
    if (ret == DDI_SUCCESS &&
            ino_p->ino_unclaimed_intrs > px_unclaimed_intr_max) {
        cmn_err(CE_WARN, "%s%d: px_ib_ino_rem_intr: "
                "ino 0x%x has been unblocked",
                ddi_driver_name(dip), ddi_get_instance(dip), ino);

        ino_p->ino_unclaimed_intrs = 0;
        ret = px_lib_intr_setstate(dip, sysino, INTR_IDLE_STATE);
    }

    if (ret != DDI_SUCCESS) {
        DBG(DBG_IB, dip, "px_ib_ino_rem_intr: failed, "
            "ino 0x%x sysino 0x%x\n", ino, sysino);

        return (ret);
    }

    if (ipil_p->ipil_ih_size == 1) {
        if (ih_lst != ih_p)
            goto not_found;

        /* No need to set head/tail as ino_p will be freed */
        goto reset;
    }

    /* Search the link list for ih_p */
    for (i = 0; (i < ipil_p->ipil_ih_size) &&
            (ih_lst->ih_next != ih_p); i++, ih_lst = ih_lst->ih_next)
        ;

    if (ih_lst->ih_next != ih_p)
        goto not_found;

    /* Remove ih_p from the link list and maintain the head/tail */
    ih_lst->ih_next = ih_p->ih_next;

    if (ipil_p->ipil_ih_head == ih_p)
        ipil_p->ipil_ih_head = ih_p->ih_next;
    if (ipil_p->ipil_ih_tail == ih_p)
        ipil_p->ipil_ih_tail = ih_lst;

    ipil_p->ipil_ih_start = ipil_p->ipil_ih_head;

reset:
    if (ih_p->ih_config_handle)
        pci_config_teardown(&ih_p->ih_config_handle);
    if (ih_p->ih_ksp != NULL)
        kstat_delete(ih_p->ih_ksp);

    kmem_free(ih_p, sizeof (px_ih_t));
    ipil_p->ipil_ih_size--;

    return (ret);

not_found:
    DBG(DBG_R_INTX, ino_p->ino_ib_p->ib_px_p->px_dip,
        "ino_p=%x does not have ih_p=%x\n", ino_p, ih_p);

    return (DDI_FAILURE);
}
Exemplo n.º 22
0
/**
 * Attach entry point, to attach a device to the system or resume it.
 *
 * @param   pDip            The module structure instance.
 * @param   enmCmd          Attach type (ddi_attach_cmd_t)
 *
 * @return  corresponding solaris error code.
 */
static int VBoxGuestSolarisAttach(dev_info_t *pDip, ddi_attach_cmd_t enmCmd)
{
    LogFlow((DEVICE_NAME "::Attach\n"));
    switch (enmCmd)
    {
        case DDI_ATTACH:
        {
            if (g_pDip)
            {
                LogRel((DEVICE_NAME "::Attach: Only one instance supported.\n"));
                return DDI_FAILURE;
            }

            int instance = ddi_get_instance(pDip);

            /*
             * Enable resources for PCI access.
             */
            ddi_acc_handle_t PciHandle;
            int rc = pci_config_setup(pDip, &PciHandle);
            if (rc == DDI_SUCCESS)
            {
                /*
                 * Map the register address space.
                 */
                caddr_t baseAddr;
                ddi_device_acc_attr_t deviceAttr;
                deviceAttr.devacc_attr_version = DDI_DEVICE_ATTR_V0;
                deviceAttr.devacc_attr_endian_flags = DDI_NEVERSWAP_ACC;
                deviceAttr.devacc_attr_dataorder = DDI_STRICTORDER_ACC;
                deviceAttr.devacc_attr_access = DDI_DEFAULT_ACC;
                rc = ddi_regs_map_setup(pDip, 1, &baseAddr, 0, 0, &deviceAttr, &g_PciIOHandle);
                if (rc == DDI_SUCCESS)
                {
                    /*
                     * Read size of the MMIO region.
                     */
                    g_uIOPortBase = (uintptr_t)baseAddr;
                    rc = ddi_dev_regsize(pDip, 2, &g_cbMMIO);
                    if (rc == DDI_SUCCESS)
                    {
                        rc = ddi_regs_map_setup(pDip, 2, &g_pMMIOBase, 0, g_cbMMIO, &deviceAttr,
                                        &g_PciMMIOHandle);
                        if (rc == DDI_SUCCESS)
                        {
                            /*
                             * Add IRQ of VMMDev.
                             */
                            rc = VBoxGuestSolarisAddIRQ(pDip);
                            if (rc == DDI_SUCCESS)
                            {
                                /*
                                 * Call the common device extension initializer.
                                 */
                                rc = VBoxGuestInitDevExt(&g_DevExt, g_uIOPortBase, g_pMMIOBase, g_cbMMIO,
#if ARCH_BITS == 64
                                                         VBOXOSTYPE_Solaris_x64,
#else
                                                         VBOXOSTYPE_Solaris,
#endif
                                                         VMMDEV_EVENT_MOUSE_POSITION_CHANGED);
                                if (RT_SUCCESS(rc))
                                {
                                    rc = ddi_create_minor_node(pDip, DEVICE_NAME, S_IFCHR, instance, DDI_PSEUDO, 0);
                                    if (rc == DDI_SUCCESS)
                                    {
                                        g_pDip = pDip;
                                        pci_config_teardown(&PciHandle);
                                        return DDI_SUCCESS;
                                    }

                                    LogRel((DEVICE_NAME "::Attach: ddi_create_minor_node failed.\n"));
                                    VBoxGuestDeleteDevExt(&g_DevExt);
                                }
                                else
                                    LogRel((DEVICE_NAME "::Attach: VBoxGuestInitDevExt failed.\n"));
                                VBoxGuestSolarisRemoveIRQ(pDip);
                            }
                            else
                                LogRel((DEVICE_NAME "::Attach: VBoxGuestSolarisAddIRQ failed.\n"));
                            ddi_regs_map_free(&g_PciMMIOHandle);
                        }
                        else
                            LogRel((DEVICE_NAME "::Attach: ddi_regs_map_setup for MMIO region failed.\n"));
                    }
                    else
                        LogRel((DEVICE_NAME "::Attach: ddi_dev_regsize for MMIO region failed.\n"));
                    ddi_regs_map_free(&g_PciIOHandle);
                }
                else
                    LogRel((DEVICE_NAME "::Attach: ddi_regs_map_setup for IOport failed.\n"));
                pci_config_teardown(&PciHandle);
            }
            else
                LogRel((DEVICE_NAME "::Attach: pci_config_setup failed rc=%d.\n", rc));
            return DDI_FAILURE;
        }

        case DDI_RESUME:
        {
            /** @todo implement resume for guest driver. */
            return DDI_SUCCESS;
        }

        default:
            return DDI_FAILURE;
    }
}
Exemplo n.º 23
0
static int
acebus_config(ebus_devstate_t *ebus_p)
{
	ddi_acc_handle_t conf_handle;
	uint16_t comm;
#ifdef	ACEBUS_HOTPLUG
	int tcr_reg;
	caddr_t csr_io;
	ddi_device_acc_attr_t csr_attr = {   /* CSR map attributes */
		DDI_DEVICE_ATTR_V0,
		DDI_STRUCTURE_LE_ACC,
		DDI_STRICTORDER_ACC
	};
	ddi_acc_handle_t csr_handle;
#endif

	/*
	 * Make sure the master enable and memory access enable
	 * bits are set in the config command register.
	 */
	if (pci_config_setup(ebus_p->dip, &conf_handle) != DDI_SUCCESS)
		return (0);

	comm = pci_config_get16(conf_handle, PCI_CONF_COMM),
#ifdef DEBUG
	    DBG1(D_ATTACH, ebus_p, "command register was 0x%x\n", comm);
#endif
	comm |= (PCI_COMM_ME|PCI_COMM_MAE|PCI_COMM_SERR_ENABLE|
	    PCI_COMM_PARITY_DETECT);
	pci_config_put16(conf_handle, PCI_CONF_COMM, comm),
#ifdef DEBUG
	    DBG1(D_MAP, ebus_p, "command register is now 0x%x\n",
	    pci_config_get16(conf_handle, PCI_CONF_COMM));
#endif
	pci_config_put8(conf_handle, PCI_CONF_CACHE_LINESZ,
	    (uchar_t)acebus_cache_line_size);
	pci_config_put8(conf_handle, PCI_CONF_LATENCY_TIMER,
	    (uchar_t)acebus_latency_timer);
	pci_config_teardown(&conf_handle);

#ifdef	ACEBUS_HOTPLUG
	if (acebus_update_props(ebus_p) != DDI_SUCCESS) {
		cmn_err(CE_WARN, "%s%d: Could not update special properties.",
		    ddi_driver_name(ebus_p->dip),
		    ddi_get_instance(ebus_p->dip));
		return (0);
	}

	if (ddi_regs_map_setup(ebus_p->dip, CSR_IO_RINDEX,
	    (caddr_t *)&csr_io, 0, CSR_SIZE, &csr_attr,
	    &csr_handle) != DDI_SUCCESS) {
		cmn_err(CE_WARN, "%s%d: Could not map Ebus CSR.",
		    ddi_driver_name(ebus_p->dip),
		    ddi_get_instance(ebus_p->dip));
	}
#ifdef	DEBUG
	if (acebus_debug_flags) {
		DBG3(D_ATTACH, ebus_p, "tcr[123] = %x,%x,%x\n",
		    ddi_get32(csr_handle, (uint32_t *)((caddr_t)csr_io +
		    TCR1_OFF)),
		    ddi_get32(csr_handle, (uint32_t *)((caddr_t)csr_io +
		    TCR2_OFF)),
		    ddi_get32(csr_handle, (uint32_t *)((caddr_t)csr_io +
		    TCR3_OFF)));
		DBG2(D_ATTACH, ebus_p, "pmd-aux=%x, freq-aux=%x\n",
		    ddi_get32(csr_handle, (uint32_t *)((caddr_t)csr_io +
		    PMD_AUX_OFF)),
		    ddi_get32(csr_handle, (uint32_t *)((caddr_t)csr_io +
		    FREQ_AUX_OFF)));
#ifdef ACEBUS_DEBUG
		for (comm = 0; comm < 4; comm++)
			prom_printf("dcsr%d=%x, dacr%d=%x, dbcr%d=%x\n", comm,
			    ddi_get32(csr_handle, (uint32_t *)((caddr_t)csr_io +
			    0x700000+(0x2000*comm))), comm,
			    ddi_get32(csr_handle, (uint32_t *)((caddr_t)csr_io +
			    0x700000+(0x2000*comm)+4)), comm,
			    ddi_get32(csr_handle, (uint32_t *)((caddr_t)csr_io +
			    0x700000+(0x2000*comm)+8)));
#endif
	} /* acebus_debug_flags */
#endif
	/* If TCR registers are not initialized, initialize them here */
	tcr_reg = ddi_get32(csr_handle, (uint32_t *)((caddr_t)csr_io +
	    TCR1_OFF));
	if ((tcr_reg == 0) || (tcr_reg == -1))
		ddi_put32(csr_handle, (uint32_t *)((caddr_t)csr_io + TCR1_OFF),
		    TCR1_REGVAL);
	tcr_reg = ddi_get32(csr_handle, (uint32_t *)((caddr_t)csr_io +
	    TCR2_OFF));
	if ((tcr_reg == 0) || (tcr_reg == -1))
		ddi_put32(csr_handle, (uint32_t *)((caddr_t)csr_io + TCR2_OFF),
		    TCR2_REGVAL);
	tcr_reg = ddi_get32(csr_handle, (uint32_t *)((caddr_t)csr_io +
	    TCR3_OFF));
	if ((tcr_reg == 0) || (tcr_reg == -1))
		ddi_put32(csr_handle, (uint32_t *)((caddr_t)csr_io + TCR3_OFF),
		    TCR3_REGVAL);
#ifdef	DEBUG
	if (acebus_debug_flags) {
		DBG3(D_ATTACH, ebus_p, "wrote tcr[123] = %x,%x,%x\n",
		    ddi_get32(csr_handle, (uint32_t *)((caddr_t)csr_io +
		    TCR1_OFF)),
		    ddi_get32(csr_handle, (uint32_t *)((caddr_t)csr_io +
		    TCR2_OFF)),
		    ddi_get32(csr_handle, (uint32_t *)((caddr_t)csr_io +
		    TCR3_OFF)));
	}
#endif

	ddi_regs_map_free(&csr_handle);
#endif	/* ACEBUS_HOTPLUG */
	return (1);	/* return success */
}
Exemplo n.º 24
0
int
pcn_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
{
	pcn_t			*pcnp;
	mac_register_t		*macp;
	const pcn_type_t	*pcn_type;
	int			instance = ddi_get_instance(dip);
	int			rc;
	ddi_acc_handle_t	pci;
	uint16_t		venid;
	uint16_t		devid;
	uint16_t		svid;
	uint16_t		ssid;

	switch (cmd) {
	case DDI_RESUME:
		return (pcn_ddi_resume(dip));

	case DDI_ATTACH:
		break;

	default:
		return (DDI_FAILURE);
	}

	if (ddi_slaveonly(dip) == DDI_SUCCESS) {
		pcn_error(dip, "slot does not support PCI bus-master");
		return (DDI_FAILURE);
	}

	if (ddi_intr_hilevel(dip, 0) != 0) {
		pcn_error(dip, "hilevel interrupts not supported");
		return (DDI_FAILURE);
	}

	if (pci_config_setup(dip, &pci) != DDI_SUCCESS) {
		pcn_error(dip, "unable to setup PCI config handle");
		return (DDI_FAILURE);
	}

	venid = pci_config_get16(pci, PCI_CONF_VENID);
	devid = pci_config_get16(pci, PCI_CONF_DEVID);
	svid = pci_config_get16(pci, PCI_CONF_SUBVENID);
	ssid = pci_config_get16(pci, PCI_CONF_SUBSYSID);

	if ((pcn_type = pcn_match(venid, devid)) == NULL) {
		pci_config_teardown(&pci);
		pcn_error(dip, "Unable to identify PCI card");
		return (DDI_FAILURE);
	}

	if (ddi_prop_update_string(DDI_DEV_T_NONE, dip, "model",
	    pcn_type->pcn_name) != DDI_PROP_SUCCESS) {
		pci_config_teardown(&pci);
		pcn_error(dip, "Unable to create model property");
		return (DDI_FAILURE);
	}

	if (ddi_soft_state_zalloc(pcn_ssp, instance) != DDI_SUCCESS) {
		pcn_error(dip, "Unable to allocate soft state");
		pci_config_teardown(&pci);
		return (DDI_FAILURE);
	}

	pcnp = ddi_get_soft_state(pcn_ssp, instance);
	pcnp->pcn_dip = dip;
	pcnp->pcn_instance = instance;
	pcnp->pcn_extphyaddr = -1;

	if (ddi_get_iblock_cookie(dip, 0, &pcnp->pcn_icookie) != DDI_SUCCESS) {
		pcn_error(pcnp->pcn_dip, "ddi_get_iblock_cookie failed");
		ddi_soft_state_free(pcn_ssp, instance);
		pci_config_teardown(&pci);
		return (DDI_FAILURE);
	}


	mutex_init(&pcnp->pcn_xmtlock, NULL, MUTEX_DRIVER, pcnp->pcn_icookie);
	mutex_init(&pcnp->pcn_intrlock, NULL, MUTEX_DRIVER, pcnp->pcn_icookie);
	mutex_init(&pcnp->pcn_reglock, NULL, MUTEX_DRIVER, pcnp->pcn_icookie);

	/*
	 * Enable bus master, IO space, and memory space accesses
	 */
	pci_config_put16(pci, PCI_CONF_COMM,
	    pci_config_get16(pci, PCI_CONF_COMM) | PCI_COMM_ME | PCI_COMM_MAE);

	pci_config_teardown(&pci);

	if (ddi_regs_map_setup(dip, 1, (caddr_t *)&pcnp->pcn_regs, 0, 0,
	    &pcn_devattr, &pcnp->pcn_regshandle)) {
		pcn_error(dip, "ddi_regs_map_setup failed");
		goto fail;
	}

	if (pcn_set_chipid(pcnp, (uint32_t)ssid << 16 | (uint32_t)svid) !=
	    DDI_SUCCESS) {
		goto fail;
	}

	if ((pcnp->pcn_mii = mii_alloc(pcnp, dip, &pcn_mii_ops)) == NULL)
		goto fail;

	/* XXX: need to set based on device */
	mii_set_pauseable(pcnp->pcn_mii, B_FALSE, B_FALSE);

	if ((pcn_allocrxring(pcnp) != DDI_SUCCESS) ||
	    (pcn_alloctxring(pcnp) != DDI_SUCCESS)) {
		pcn_error(dip, "unable to allocate DMA resources");
		goto fail;
	}

	pcnp->pcn_promisc = B_FALSE;

	mutex_enter(&pcnp->pcn_intrlock);
	mutex_enter(&pcnp->pcn_xmtlock);
	rc = pcn_initialize(pcnp, B_TRUE);
	mutex_exit(&pcnp->pcn_xmtlock);
	mutex_exit(&pcnp->pcn_intrlock);
	if (rc != DDI_SUCCESS)
		goto fail;

	if (ddi_add_intr(dip, 0, NULL, NULL, pcn_intr, (caddr_t)pcnp) !=
	    DDI_SUCCESS) {
		pcn_error(dip, "unable to add interrupt");
		goto fail;
	}

	pcnp->pcn_flags |= PCN_INTR_ENABLED;

	if ((macp = mac_alloc(MAC_VERSION)) == NULL) {
		pcn_error(pcnp->pcn_dip, "mac_alloc failed");
		goto fail;
	}

	macp->m_type_ident = MAC_PLUGIN_IDENT_ETHER;
	macp->m_driver = pcnp;
	macp->m_dip = dip;
	macp->m_src_addr = pcnp->pcn_addr;
	macp->m_callbacks = &pcn_m_callbacks;
	macp->m_min_sdu = 0;
	macp->m_max_sdu = ETHERMTU;
	macp->m_margin = VLAN_TAGSZ;

	if (mac_register(macp, &pcnp->pcn_mh) == DDI_SUCCESS) {
		mac_free(macp);
		return (DDI_SUCCESS);
	}

	mac_free(macp);

	return (DDI_SUCCESS);

fail:
	pcn_teardown(pcnp);
	return (DDI_FAILURE);
}
Exemplo n.º 25
0
static int
ppb_initchild(dev_info_t *child)
{
	struct ddi_parent_private_data *pdptr;
	ppb_devstate_t *ppb;
	char name[MAXNAMELEN];
	ddi_acc_handle_t config_handle;
	ushort_t command_preserve, command;

	ppb = (ppb_devstate_t *)ddi_get_soft_state(ppb_state,
	    ddi_get_instance(ddi_get_parent(child)));

	if (ppb_name_child(child, name, MAXNAMELEN) != DDI_SUCCESS)
		return (DDI_FAILURE);
	ddi_set_name_addr(child, name);

	/*
	 * Pseudo nodes indicate a prototype node with per-instance
	 * properties to be merged into the real h/w device node.
	 * The interpretation of the unit-address is DD[,F]
	 * where DD is the device id and F is the function.
	 */
	if (ndi_dev_is_persistent_node(child) == 0) {
		extern int pci_allow_pseudo_children;

		ddi_set_parent_data(child, NULL);

		/*
		 * Try to merge the properties from this prototype
		 * node into real h/w nodes.
		 */
		if (ndi_merge_node(child, ppb_name_child) == DDI_SUCCESS) {
			/*
			 * Merged ok - return failure to remove the node.
			 */
			ddi_set_name_addr(child, NULL);
			return (DDI_FAILURE);
		}

		/* workaround for ddivs to run under PCI */
		if (pci_allow_pseudo_children)
			return (DDI_SUCCESS);

		/*
		 * The child was not merged into a h/w node,
		 * but there's not much we can do with it other
		 * than return failure to cause the node to be removed.
		 */
		cmn_err(CE_WARN, "!%s@%s: %s.conf properties not merged",
		    ddi_driver_name(child), ddi_get_name_addr(child),
		    ddi_driver_name(child));
		ddi_set_name_addr(child, NULL);
		return (DDI_NOT_WELL_FORMED);
	}

	ddi_set_parent_data(child, NULL);

	/*
	 * PCIe FMA specific
	 *
	 * Note: parent_data for parent is created only if this is PCI-E
	 * platform, for which, SG take a different route to handle device
	 * errors.
	 */
	if (ppb->parent_bus == PCIE_PCIECAP_DEV_TYPE_PCIE_DEV) {
		if (pcie_init_cfghdl(child) != DDI_SUCCESS)
			return (DDI_FAILURE);
		pcie_init_dom(child);
	}

	/* transfer select properties from PROM to kernel */
	if (ddi_getprop(DDI_DEV_T_NONE, child, DDI_PROP_DONTPASS,
	    "interrupts", -1) != -1) {
		pdptr = kmem_zalloc((sizeof (struct ddi_parent_private_data) +
		    sizeof (struct intrspec)), KM_SLEEP);
		pdptr->par_intr = (struct intrspec *)(pdptr + 1);
		pdptr->par_nintr = 1;
		ddi_set_parent_data(child, pdptr);
	} else
		ddi_set_parent_data(child, NULL);

	if (pci_config_setup(child, &config_handle) != DDI_SUCCESS) {
		pcie_fini_dom(child);
		return (DDI_FAILURE);
	}

	/*
	 * Support for the "command-preserve" property.
	 */
	command_preserve = ddi_prop_get_int(DDI_DEV_T_ANY, child,
	    DDI_PROP_DONTPASS, "command-preserve", 0);
	command = pci_config_get16(config_handle, PCI_CONF_COMM);
	command &= (command_preserve | PCI_COMM_BACK2BACK_ENAB);
	command |= (ppb_command_default & ~command_preserve);
	pci_config_put16(config_handle, PCI_CONF_COMM, command);

	pci_config_teardown(&config_handle);
	return (DDI_SUCCESS);
}
Exemplo n.º 26
0
int
pcmu_add_intr(dev_info_t *dip, dev_info_t *rdip, ddi_intr_handle_impl_t *hdlp)
{
	pcmu_t *pcmu_p = get_pcmu_soft_state(ddi_get_instance(dip));
	pcmu_ib_t *pib_p = pcmu_p->pcmu_ib_p;
	ih_t *ih_p;
	pcmu_ib_ino_t ino;
	pcmu_ib_ino_info_t *ino_p; /* pulse interrupts have no ino */
	pcmu_ib_mondo_t mondo;
	uint32_t cpu_id;
	int ret;

	ino = PCMU_IB_MONDO_TO_INO(hdlp->ih_vector);

	PCMU_DBG3(PCMU_DBG_A_INTX, dip, "pcmu_add_intr: rdip=%s%d ino=%x\n",
	    ddi_driver_name(rdip), ddi_get_instance(rdip), ino);

	if (ino > pib_p->pib_max_ino) {
		PCMU_DBG1(PCMU_DBG_A_INTX, dip, "ino %x is invalid\n", ino);
		return (DDI_INTR_NOTFOUND);
	}

	if ((mondo = PCMU_IB_INO_TO_MONDO(pcmu_p->pcmu_ib_p, ino)) == 0)
		goto fail1;

	ino = PCMU_IB_MONDO_TO_INO(mondo);

	mutex_enter(&pib_p->pib_ino_lst_mutex);
	ih_p = pcmu_ib_alloc_ih(rdip, hdlp->ih_inum,
	    hdlp->ih_cb_func, hdlp->ih_cb_arg1, hdlp->ih_cb_arg2);

	if (ino_p = pcmu_ib_locate_ino(pib_p, ino)) {	/* sharing ino */
		uint32_t intr_index = hdlp->ih_inum;
		if (pcmu_ib_ino_locate_intr(ino_p, rdip, intr_index)) {
			PCMU_DBG1(PCMU_DBG_A_INTX, dip,
			    "dup intr #%d\n", intr_index);
			goto fail3;
		}

		/*
		 * add default weight(0) to the cpu that we are
		 * already targeting
		 */
		cpu_id = ino_p->pino_cpuid;
		intr_dist_cpuid_add_device_weight(cpu_id, rdip, 0);
		pcmu_ib_ino_add_intr(pcmu_p, ino_p, ih_p);
		goto ino_done;
	}

	ino_p = pcmu_ib_new_ino(pib_p, ino, ih_p);
	hdlp->ih_vector = mondo;

	PCMU_DBG2(PCMU_DBG_A_INTX, dip, "pcmu_add_intr:  pil=0x%x mondo=0x%x\n",
	    hdlp->ih_pri, hdlp->ih_vector);

	DDI_INTR_ASSIGN_HDLR_N_ARGS(hdlp,
	    (ddi_intr_handler_t *)pcmu_intr_wrapper, (caddr_t)ino_p, NULL);

	ret = i_ddi_add_ivintr(hdlp);

	/*
	 * Restore original interrupt handler
	 * and arguments in interrupt handle.
	 */
	DDI_INTR_ASSIGN_HDLR_N_ARGS(hdlp, ih_p->ih_handler,
	    ih_p->ih_handler_arg1, ih_p->ih_handler_arg2);

	if (ret != DDI_SUCCESS) {
		goto fail4;
	}
	/* Save the pil for this ino */
	ino_p->pino_pil = hdlp->ih_pri;

	/* clear and enable interrupt */
	PCMU_IB_INO_INTR_CLEAR(ino_p->pino_clr_reg);

	/* select cpu for sharing and removal */
	cpu_id = pcmu_intr_dist_cpuid(pib_p, ino_p);
	ino_p->pino_cpuid = cpu_id;
	ino_p->pino_established = 1;
	intr_dist_cpuid_add_device_weight(cpu_id, rdip, 0);

	cpu_id = u2u_translate_tgtid(pib_p->pib_pcmu_p,
	    cpu_id, ino_p->pino_map_reg);
	*ino_p->pino_map_reg = ib_get_map_reg(mondo, cpu_id);
	*ino_p->pino_map_reg;
ino_done:
	mutex_exit(&pib_p->pib_ino_lst_mutex);
done:
	PCMU_DBG2(PCMU_DBG_A_INTX, dip, "done! Interrupt 0x%x pil=%x\n",
	    hdlp->ih_vector, hdlp->ih_pri);
	return (DDI_SUCCESS);
fail4:
	pcmu_ib_delete_ino(pib_p, ino_p);
fail3:
	if (ih_p->ih_config_handle)
		pci_config_teardown(&ih_p->ih_config_handle);
	mutex_exit(&pib_p->pib_ino_lst_mutex);
	kmem_free(ih_p, sizeof (ih_t));
fail1:
	PCMU_DBG2(PCMU_DBG_A_INTX, dip, "Failed! Interrupt 0x%x pil=%x\n",
	    hdlp->ih_vector, hdlp->ih_pri);
	return (DDI_FAILURE);
}
Exemplo n.º 27
0
/*
 * attach(9E) -- Attach a device to the system
 *
 * Called once for each board successfully probed.
 */
static int
SMCG_attach(dev_info_t *devinfo, ddi_attach_cmd_t cmd)
{
	gld_mac_info_t	*macinfo;
	Adapter_Struc	*pAd;
	smcg_t		*smcg;
	int		rc;
	ddi_acc_handle_t pcihandle;

#ifdef	DEBUG
	if (SMCG_debug & SMCGDDI)
		cmn_err(CE_CONT, SMCG_NAME "_attach(0x%p)", (void *)devinfo);
#endif

	if (cmd != DDI_ATTACH)
		return (DDI_FAILURE);

	/*
	 * Allocate gld_mac_info_t and Lower MAC Adapter_Struc structures
	 */
	if ((macinfo = gld_mac_alloc(devinfo)) == NULL)
		return (DDI_FAILURE);
	if ((pAd = kmem_zalloc(sizeof (Adapter_Struc), KM_NOSLEEP)) == NULL) {
		gld_mac_free(macinfo);
		return (DDI_FAILURE);
	}
	if ((smcg = kmem_zalloc(sizeof (smcg_t), KM_NOSLEEP)) == NULL) {
		gld_mac_free(macinfo);
		kmem_free(pAd, sizeof (Adapter_Struc));
		return (DDI_FAILURE);
	}

	pAd->pc_bus = SMCG_PCI_BUS;

	/* create pci handle for UM_PCI_Service */
	if (pci_config_setup(devinfo, (ddi_acc_handle_t *)&pcihandle)
	    != DDI_SUCCESS) {
		goto attach_fail_cleanup;
	}

	/*
	 * Query the LMAC for the device information
	 */
	pAd->pcihandle = (void *) pcihandle;
	rc = LM_GetCnfg(pAd);

	pci_config_teardown((ddi_acc_handle_t *)&pcihandle);
	pAd->pcihandle = NULL;

	if (rc != ADAPTER_AND_CONFIG) {
		cmn_err(CE_WARN,
		    SMCG_NAME "_attach: LM_GetCnfg failed (0x%x)", rc);
		goto attach_fail_cleanup;
	}

	/*
	 * Initialize pointers to device specific functions which will be
	 * used by the generic layer.
	 */
	macinfo->gldm_reset   = SMCG_reset;
	macinfo->gldm_start   = SMCG_start_board;
	macinfo->gldm_stop    = SMCG_stop_board;
	macinfo->gldm_set_mac_addr   = SMCG_set_mac_addr;
	macinfo->gldm_set_multicast = SMCG_set_multicast;
	macinfo->gldm_set_promiscuous = SMCG_set_promiscuous;
	macinfo->gldm_get_stats   = SMCG_get_stats;
	macinfo->gldm_send    = SMCG_send;
	macinfo->gldm_intr    = SMCG_intr;
	macinfo->gldm_ioctl   = NULL;

	/*
	 * Initialize board characteristics needed by the generic layer.
	 */
	macinfo->gldm_ident = SMCG_IDENT;
	macinfo->gldm_type = DL_ETHER;
	macinfo->gldm_minpkt = 0;	/* assumes we pad ourselves */
	macinfo->gldm_maxpkt = SMCGMAXPKT;
	macinfo->gldm_addrlen = ETHERADDRL;
	macinfo->gldm_saplen = -2;
	macinfo->gldm_ppa = ddi_get_instance(devinfo);

	pAd->receive_mask = ACCEPT_BROADCAST;
	pAd->max_packet_size = SMMAXPKT;

	macinfo->gldm_broadcast_addr = SMCG_broadcastaddr;

	/* Get the board's vendor-assigned hardware network address. */
	LM_Get_Addr(pAd);
	macinfo->gldm_vendor_addr = (unsigned char *)pAd->node_address;

	/* Link macinfo, smcg, and LMAC Adapter Structs */
	macinfo->gldm_private = (caddr_t)smcg;
	pAd->sm_private = (void *)smcg;
	smcg->smcg_pAd = pAd;
	smcg->smcg_macinfo = macinfo;

	pAd->ptr_rx_CRC_errors = &smcg->rx_CRC_errors;
	pAd->ptr_rx_too_big = &smcg->rx_too_big;
	pAd->ptr_rx_lost_pkts = &smcg->rx_lost_pkts;
	pAd->ptr_rx_align_errors = &smcg->rx_align_errors;
	pAd->ptr_rx_overruns = &smcg->rx_overruns;
	pAd->ptr_tx_deferred = &smcg->tx_deferred;
	pAd->ptr_tx_total_collisions = &smcg->tx_total_collisions;
	pAd->ptr_tx_max_collisions = &smcg->tx_max_collisions;
	pAd->ptr_tx_one_collision = &smcg->tx_one_collision;
	pAd->ptr_tx_mult_collisions = &smcg->tx_mult_collisions;
	pAd->ptr_tx_ow_collision = &smcg->tx_ow_collision;
	pAd->ptr_tx_CD_heartbeat = &smcg->tx_CD_heartbeat;
	pAd->ptr_tx_carrier_lost = &smcg->tx_carrier_lost;
	pAd->ptr_tx_underruns = &smcg->tx_underruns;
	pAd->ptr_ring_OVW = &smcg->ring_OVW;

	macinfo->gldm_devinfo = smcg->smcg_devinfo = devinfo;

	pAd->num_of_tx_buffs = ddi_getprop(DDI_DEV_T_ANY, devinfo,
	    DDI_PROP_DONTPASS, "num-tx-bufs", SMTRANSMIT_BUFS);
	if (pAd->num_of_tx_buffs > SMCG_MAX_TXDESCS) {
		pAd->num_of_tx_buffs = SMCG_MAX_TXDESCS;
		cmn_err(CE_WARN, SMCG_NAME
		    "Max number_of_tx_buffs is %d", SMCG_MAX_TXDESCS);
	}
	if (pAd->num_of_tx_buffs < 2) {
		pAd->num_of_tx_buffs = 2;
	}
	pAd->num_of_rx_buffs = ddi_getprop(DDI_DEV_T_ANY, devinfo,
	    DDI_PROP_DONTPASS, "num-rx-bufs", SMRECEIVE_BUFS);
	if (pAd->num_of_rx_buffs > SMCG_MAX_RXDESCS) {
		pAd->num_of_rx_buffs = SMCG_MAX_RXDESCS;
		cmn_err(CE_WARN, SMCG_NAME
		    "Max number_of_rx_buffs is %d", SMCG_MAX_RXDESCS);
	}
	if (pAd->num_of_rx_buffs < 2) {
		pAd->num_of_rx_buffs = 2;
	}

	if (ddi_get_iblock_cookie(devinfo, 0, &macinfo->gldm_cookie)
		!= DDI_SUCCESS)
		goto attach_fail_cleanup;

	/*
	 * rbuf_lock	Protects receive data structures
	 * txbuf_lock	Protects transmit data structures
	 * lm_lock	Protects all calls to LMAC layer
	 * rlist_lock	Protects receive buffer list
	 * Note: Locks should be acquired in the above order.
	 */
	mutex_init(&smcg->rbuf_lock, NULL, MUTEX_DRIVER, NULL);
	mutex_init(&smcg->txbuf_lock, NULL, MUTEX_DRIVER, NULL);
	mutex_init(&smcg->lm_lock, NULL, MUTEX_DRIVER, NULL);
	mutex_init(&smcg->rlist_lock, NULL, MUTEX_DRIVER, NULL);

	/*
	 * SMCG_dma_alloc is called before it is possible to get
	 * any interrupts, send or receive packets... Therefore I'm
	 * not going to take rlist_lock for it.
	 */
	if (SMCG_dma_alloc(smcg) != DDI_SUCCESS)
		goto attach_fail_cleanup1;

#ifdef SAFE
	LM_Reset_Adapter(pAd);
#endif
	/* Add the interrupt handler */
	if (ddi_add_intr(devinfo, 0,  NULL, NULL, gld_intr, (caddr_t)macinfo)
	    != DDI_SUCCESS) {
		SMCG_dma_unalloc(smcg);
		goto attach_fail_cleanup1;
	}

	/*
	 * Register ourselves with the GLD interface
	 *
	 * gld_register will:
	 *	link us with the GLD system;
	 *	create the minor node.
	 */
	if (gld_register(devinfo, SMCG_NAME, macinfo) != DDI_SUCCESS) {
		ddi_remove_intr(devinfo, 0, macinfo->gldm_cookie);
		SMCG_dma_unalloc(smcg);
		goto attach_fail_cleanup1;
	}

	return (DDI_SUCCESS);

attach_fail_cleanup1:
	mutex_destroy(&smcg->rbuf_lock);
	mutex_destroy(&smcg->txbuf_lock);
	mutex_destroy(&smcg->lm_lock);
	mutex_destroy(&smcg->rlist_lock);

attach_fail_cleanup:
	kmem_free(pAd, sizeof (Adapter_Struc));
	kmem_free(smcg, sizeof (smcg_t));
	gld_mac_free(macinfo);
	return (DDI_FAILURE);
}
Exemplo n.º 28
0
static int
ppb_initchild(dev_info_t *child)
{
	char name[MAXNAMELEN];
	ddi_acc_handle_t config_handle;
	ushort_t command_preserve, command;
	uint_t n;
	ushort_t bcr;
	uchar_t header_type;
	uchar_t min_gnt, latency_timer;
	ppb_devstate_t *ppb;

	/*
	 * Name the child
	 */
	if (ppb_name_child(child, name, MAXNAMELEN) != DDI_SUCCESS)
		return (DDI_FAILURE);

	ddi_set_name_addr(child, name);
	ddi_set_parent_data(child, NULL);

	/*
	 * Pseudo nodes indicate a prototype node with per-instance
	 * properties to be merged into the real h/w device node.
	 * The interpretation of the unit-address is DD[,F]
	 * where DD is the device id and F is the function.
	 */
	if (ndi_dev_is_persistent_node(child) == 0) {
		extern int pci_allow_pseudo_children;

		/*
		 * Try to merge the properties from this prototype
		 * node into real h/w nodes.
		 */
		if (ndi_merge_node(child, ppb_name_child) == DDI_SUCCESS) {
			/*
			 * Merged ok - return failure to remove the node.
			 */
			ppb_removechild(child);
			return (DDI_FAILURE);
		}

		/* workaround for ddivs to run under PCI */
		if (pci_allow_pseudo_children)
			return (DDI_SUCCESS);

		/*
		 * The child was not merged into a h/w node,
		 * but there's not much we can do with it other
		 * than return failure to cause the node to be removed.
		 */
		cmn_err(CE_WARN, "!%s@%s: %s.conf properties not merged",
		    ddi_driver_name(child), ddi_get_name_addr(child),
		    ddi_driver_name(child));
		ppb_removechild(child);
		return (DDI_NOT_WELL_FORMED);
	}

	ppb = (ppb_devstate_t *)ddi_get_soft_state(ppb_state,
	    ddi_get_instance(ddi_get_parent(child)));

	ddi_set_parent_data(child, NULL);

	/*
	 * If hardware is PM capable, set up the power info structure.
	 * This also ensures the the bus will not be off (0MHz) otherwise
	 * system panics during a bus access.
	 */
	if (PM_CAPABLE(ppb->ppb_pwr_p)) {
		/*
		 * Create a pwr_info struct for child.  Bus will be
		 * at full speed after creating info.
		 */
		pci_pwr_create_info(ppb->ppb_pwr_p, child);
#ifdef DEBUG
		ASSERT(ppb->ppb_pwr_p->current_lvl == PM_LEVEL_B0);
#endif
	}

	/*
	 * If configuration registers were previously saved by
	 * child (before it entered D3), then let the child do the
	 * restore to set up the config regs as it'll first need to
	 * power the device out of D3.
	 */
	if (ddi_prop_exists(DDI_DEV_T_ANY, child, DDI_PROP_DONTPASS,
	    "config-regs-saved-by-child") == 1) {
		DEBUG2(DBG_PWR, ddi_get_parent(child),
		    "INITCHILD: config regs to be restored by child"
		    " for %s@%s\n", ddi_node_name(child),
		    ddi_get_name_addr(child));

		return (DDI_SUCCESS);
	}

	DEBUG2(DBG_PWR, ddi_get_parent(child),
	    "INITCHILD: config regs setup for %s@%s\n",
	    ddi_node_name(child), ddi_get_name_addr(child));

	if (pci_config_setup(child, &config_handle) != DDI_SUCCESS) {
		if (PM_CAPABLE(ppb->ppb_pwr_p)) {
			pci_pwr_rm_info(ppb->ppb_pwr_p, child);
		}

		return (DDI_FAILURE);
	}

	/*
	 * Determine the configuration header type.
	 */
	header_type = pci_config_get8(config_handle, PCI_CONF_HEADER);

	/*
	 * Support for the "command-preserve" property.
	 */
	command_preserve = ddi_prop_get_int(DDI_DEV_T_ANY, child,
	    DDI_PROP_DONTPASS, "command-preserve", 0);
	command = pci_config_get16(config_handle, PCI_CONF_COMM);
	command &= (command_preserve | PCI_COMM_BACK2BACK_ENAB);
	command |= (ppb_command_default & ~command_preserve);
	pci_config_put16(config_handle, PCI_CONF_COMM, command);

	/*
	 * If the device has a bus control register then program it
	 * based on the settings in the command register.
	 */
	if ((header_type  & PCI_HEADER_TYPE_M) == PCI_HEADER_ONE) {
		bcr = pci_config_get8(config_handle, PCI_BCNF_BCNTRL);
		if (ppb_command_default & PCI_COMM_PARITY_DETECT)
			bcr |= PCI_BCNF_BCNTRL_PARITY_ENABLE;
		if (ppb_command_default & PCI_COMM_SERR_ENABLE)
			bcr |= PCI_BCNF_BCNTRL_SERR_ENABLE;
		bcr |= PCI_BCNF_BCNTRL_MAST_AB_MODE;
		pci_config_put8(config_handle, PCI_BCNF_BCNTRL, bcr);
	}

	/*
	 * Initialize cache-line-size configuration register if needed.
	 */
	if (ppb_set_cache_line_size_register &&
	    ddi_getprop(DDI_DEV_T_ANY, child, DDI_PROP_DONTPASS,
	    "cache-line-size", 0) == 0) {
		pci_config_put8(config_handle, PCI_CONF_CACHE_LINESZ,
		    ppb->ppb_cache_line_size);
		n = pci_config_get8(config_handle, PCI_CONF_CACHE_LINESZ);
		if (n != 0) {
			(void) ndi_prop_update_int(DDI_DEV_T_NONE, child,
			    "cache-line-size", n);
		}
	}

	/*
	 * Initialize latency timer configuration registers if needed.
	 */
	if (ppb_set_latency_timer_register &&
	    ddi_getprop(DDI_DEV_T_ANY, child, DDI_PROP_DONTPASS,
	    "latency-timer", 0) == 0) {

		if ((header_type & PCI_HEADER_TYPE_M) == PCI_HEADER_ONE) {
			latency_timer = ppb->ppb_latency_timer;
			pci_config_put8(config_handle, PCI_BCNF_LATENCY_TIMER,
			    ppb->ppb_latency_timer);
		} else {
			min_gnt = pci_config_get8(config_handle,
			    PCI_CONF_MIN_G);
			latency_timer = min_gnt * 8;
		}
		pci_config_put8(config_handle, PCI_CONF_LATENCY_TIMER,
		    latency_timer);
		n = pci_config_get8(config_handle, PCI_CONF_LATENCY_TIMER);
		if (n != 0) {
			(void) ndi_prop_update_int(DDI_DEV_T_NONE, child,
			    "latency-timer", n);
		}
	}

	/*
	 * SPARC PCIe FMA specific
	 *
	 * Note: parent_data for parent is created only if this is sparc PCI-E
	 * platform, for which, SG take a different route to handle device
	 * errors.
	 */
	if (ppb->parent_bus == PCIE_PCIECAP_DEV_TYPE_PCIE_DEV) {
		if (pcie_init_cfghdl(child) != DDI_SUCCESS) {
			pci_config_teardown(&config_handle);
			return (DDI_FAILURE);
		}
		pcie_init_dom(child);
	}

	/*
	 * Check to see if the XMITS/PCI-X workaround applies.
	 */
	n = ddi_getprop(DDI_DEV_T_ANY, child, DDI_PROP_NOTPROM,
	    "pcix-update-cmd-reg", -1);

	if (n != -1) {
		extern void pcix_set_cmd_reg(dev_info_t *child, uint16_t value);
		DEBUG1(DBG_INIT_CLD, child, "Turning on XMITS NCPQ "
		    "Workaround: value = %x\n", n);
		pcix_set_cmd_reg(child, n);
	}
	pci_config_teardown(&config_handle);
	return (DDI_SUCCESS);
}
Exemplo n.º 29
0
/*ARGSUSED*/
static int
ppb_attach(dev_info_t *devi, ddi_attach_cmd_t cmd)
{
	dev_info_t *root = ddi_root_node();
	int instance;
	ppb_devstate_t *ppb;
	dev_info_t *pdip;
	ddi_acc_handle_t config_handle;
	char *bus;

	switch (cmd) {
	case DDI_ATTACH:

		/*
		 * Make sure the "device_type" property exists.
		 */
		(void) ddi_prop_update_string(DDI_DEV_T_NONE, devi,
		    "device_type", "pci");

		/*
		 * Allocate and get soft state structure.
		 */
		instance = ddi_get_instance(devi);
		if (ddi_soft_state_zalloc(ppb_state, instance) != DDI_SUCCESS)
			return (DDI_FAILURE);
		ppb = (ppb_devstate_t *)ddi_get_soft_state(ppb_state, instance);
		ppb->dip = devi;
		mutex_init(&ppb->ppb_mutex, NULL, MUTEX_DRIVER, NULL);
		ppb->ppb_soft_state = PCI_SOFT_STATE_CLOSED;
		if (pci_config_setup(devi, &config_handle) != DDI_SUCCESS) {
			mutex_destroy(&ppb->ppb_mutex);
			ddi_soft_state_free(ppb_state, instance);
			return (DDI_FAILURE);
		}
		ppb_pwr_setup(ppb, devi);

		if (PM_CAPABLE(ppb->ppb_pwr_p)) {
			mutex_enter(&ppb->ppb_pwr_p->pwr_mutex);

			/*
			 * Before reading config registers, make sure power is
			 * on, and remains on.
			 */
			ppb->ppb_pwr_p->pwr_fp++;

			pci_pwr_change(ppb->ppb_pwr_p,
			    ppb->ppb_pwr_p->current_lvl,
			    pci_pwr_new_lvl(ppb->ppb_pwr_p));
		}

		ppb->ppb_cache_line_size =
		    pci_config_get8(config_handle, PCI_CONF_CACHE_LINESZ);
		ppb->ppb_latency_timer =
		    pci_config_get8(config_handle, PCI_CONF_LATENCY_TIMER);

		/*
		 * Check whether the "ranges" property is present.
		 * Otherwise create the ranges property by reading
		 * the configuration registers
		 */
		if (ddi_prop_exists(DDI_DEV_T_ANY, devi, DDI_PROP_DONTPASS,
		    "ranges") == 0) {
			ppb_create_ranges_prop(devi, config_handle);
		}

		pci_config_teardown(&config_handle);

		if (PM_CAPABLE(ppb->ppb_pwr_p)) {
			ppb->ppb_pwr_p->pwr_fp--;

			pci_pwr_change(ppb->ppb_pwr_p,
			    ppb->ppb_pwr_p->current_lvl,
			    pci_pwr_new_lvl(ppb->ppb_pwr_p));

			mutex_exit(&ppb->ppb_pwr_p->pwr_mutex);
		}

		ppb->parent_bus = PCIE_PCIECAP_DEV_TYPE_PCI_PSEUDO;
		for (pdip = ddi_get_parent(ppb->dip); pdip && (pdip != root) &&
		    (ppb->parent_bus != PCIE_PCIECAP_DEV_TYPE_PCIE_DEV);
		    pdip = ddi_get_parent(pdip)) {
			if (ddi_prop_lookup_string(DDI_DEV_T_ANY, pdip,
			    DDI_PROP_DONTPASS, "device_type", &bus) !=
			    DDI_PROP_SUCCESS)
				break;

			if (strcmp(bus, "pciex") == 0)
				ppb->parent_bus =
				    PCIE_PCIECAP_DEV_TYPE_PCIE_DEV;

			ddi_prop_free(bus);
		}

		/*
		 * Initialize hotplug support on this bus.
		 */
		if (ppb->parent_bus == PCIE_PCIECAP_DEV_TYPE_PCIE_DEV)
			if (pcie_init(devi, NULL) != DDI_SUCCESS) {
				(void) ppb_detach(devi, DDI_DETACH);
				return (DDI_FAILURE);
			}
		else
			ppb_init_hotplug(ppb);

		DEBUG1(DBG_ATTACH, devi,
		    "ppb_attach(): this nexus %s hotplug slots\n",
		    ppb->hotplug_capable == B_TRUE ? "has":"has no");

		ppb_fm_init(ppb);
		ddi_report_dev(devi);

		return (DDI_SUCCESS);

	case DDI_RESUME:
		/*
		 * Get the soft state structure for the bridge.
		 */
		ppb = (ppb_devstate_t *)
		    ddi_get_soft_state(ppb_state, ddi_get_instance(devi));

		pci_pwr_resume(devi, ppb->ppb_pwr_p);

		return (DDI_SUCCESS);
	}
	return (DDI_FAILURE);
}
Exemplo n.º 30
0
/*
 * If bridge is PM capable, set up PM state for nexus.
 */
static void
ppb_pwr_setup(ppb_devstate_t *ppb, dev_info_t *pdip)
{
	char *comp_array[5];
	int i;
	ddi_acc_handle_t conf_hdl;
	uint8_t pmcsr_bse;
	uint16_t pmcap;

	/*
	 * Determine if bridge is PM capable.  If not, leave ppb_pwr_p NULL
	 * and return.
	 */
	if (pci_config_setup(pdip, &ppb->ppb_conf_hdl) != DDI_SUCCESS) {

		return;
	}

	conf_hdl = ppb->ppb_conf_hdl;

	/*
	 * Locate and store the power management cap_ptr for future references.
	 */
	if ((PCI_CAP_LOCATE(conf_hdl, PCI_CAP_ID_PM, &ppb->ppb_pm_cap_ptr))
	    == DDI_FAILURE) {
		DEBUG0(DBG_PWR, pdip, "bridge does not support PM. PCI"
		    " PM data structure not found in config header\n");
		pci_config_teardown(&conf_hdl);

		return;
	}

	/*
	 * Allocate PM state structure for ppb.
	 */
	ppb->ppb_pwr_p = (pci_pwr_t *)
	    kmem_zalloc(sizeof (pci_pwr_t), KM_SLEEP);
	ppb->ppb_pwr_p->pwr_fp = 0;

	pmcsr_bse = PCI_CAP_GET8(conf_hdl, NULL, ppb->ppb_pm_cap_ptr,
	    PCI_PMCSR_BSE);

	pmcap = PCI_CAP_GET16(conf_hdl, NULL, ppb->ppb_pm_cap_ptr,
	    PCI_PMCAP);

	if (pmcap == PCI_CAP_EINVAL16 || pmcsr_bse == PCI_CAP_EINVAL8) {
		pci_config_teardown(&conf_hdl);
		return;
	}

	if (pmcap & PCI_PMCAP_D1) {
		DEBUG0(DBG_PWR, pdip, "setup: B1 state supported\n");
		ppb->ppb_pwr_p->pwr_flags |= PCI_PWR_B1_CAPABLE;
	} else {
		DEBUG0(DBG_PWR, pdip, "setup: B1 state NOT supported\n");
	}
	if (pmcap & PCI_PMCAP_D2) {
		DEBUG0(DBG_PWR, pdip, "setup: B2 state supported\n");
		ppb->ppb_pwr_p->pwr_flags |= PCI_PWR_B2_CAPABLE;
	} else {
		DEBUG0(DBG_PWR, pdip, "setup: B2 via D2 NOT supported\n");
	}

	if (pmcsr_bse & PCI_PMCSR_BSE_BPCC_EN) {
		DEBUG0(DBG_PWR, pdip,
		"setup: bridge power/clock control enable\n");
	} else {
		DEBUG0(DBG_PWR, pdip,
		"setup: bridge power/clock control disabled\n");

		kmem_free(ppb->ppb_pwr_p, sizeof (pci_pwr_t));
		ppb->ppb_pwr_p = NULL;
		pci_config_teardown(&conf_hdl);

		return;
	}

	/*
	 * PCI states D0 and D3 always are supported for normal PCI
	 * devices.  D1 and D2 are optional which are checked for above.
	 * Bridge function states D0-D3 correspond to secondary bus states
	 * B0-B3, EXCEPT if PCI_PMCSR_BSE_B2_B3 is set.  In this case, setting
	 * the bridge function to D3 will set the bridge bus to state B2 instead
	 * of B3.  D2 will not correspond to B2 (and in fact, probably
	 * won't be D2 capable).  Implicitly, this means that if
	 * PCI_PMCSR_BSE_B2_B3 is set, the bus will not be B3 capable.
	 */
	if (pmcsr_bse & PCI_PMCSR_BSE_B2_B3) {
		ppb->ppb_pwr_p->pwr_flags |= PCI_PWR_B2_CAPABLE;
		DEBUG0(DBG_PWR, pdip, "B2 supported via D3\n");
	} else {
		ppb->ppb_pwr_p->pwr_flags |= PCI_PWR_B3_CAPABLE;
		DEBUG0(DBG_PWR, pdip, "B3 supported via D3\n");
	}

	ppb->ppb_pwr_p->pwr_dip = pdip;
	mutex_init(&ppb->ppb_pwr_p->pwr_mutex, NULL, MUTEX_DRIVER, NULL);

	i = 0;
	comp_array[i++] = "NAME=PCI bridge PM";
	if (ppb->ppb_pwr_p->pwr_flags & PCI_PWR_B3_CAPABLE) {
		comp_array[i++] = "0=Clock/Power Off (B3)";
	}
	if (ppb->ppb_pwr_p->pwr_flags & PCI_PWR_B2_CAPABLE) {
		comp_array[i++] = "1=Clock Off (B2)";
	}
	if (ppb->ppb_pwr_p->pwr_flags & PCI_PWR_B1_CAPABLE) {
		comp_array[i++] = "2=Bus Inactive (B1)";
	}
	comp_array[i++] = "3=Full Power (B0)";

	/*
	 * Create pm-components property. It does not already exist.
	 */
	if (ddi_prop_update_string_array(DDI_DEV_T_NONE, pdip,
	    "pm-components", comp_array, i) != DDI_PROP_SUCCESS) {
		cmn_err(CE_WARN,
		    "%s%d pm-components prop update failed",
		    ddi_driver_name(pdip), ddi_get_instance(pdip));
		pci_config_teardown(&conf_hdl);
		mutex_destroy(&ppb->ppb_pwr_p->pwr_mutex);
		kmem_free(ppb->ppb_pwr_p, sizeof (pci_pwr_t));
		ppb->ppb_pwr_p = NULL;

		return;
	}

	if (ddi_prop_create(DDI_DEV_T_NONE, pdip, DDI_PROP_CANSLEEP,
	    "pm-want-child-notification?", NULL, NULL) != DDI_PROP_SUCCESS) {
		cmn_err(CE_WARN,
		    "%s%d fail to create pm-want-child-notification? prop",
		    ddi_driver_name(pdip), ddi_get_instance(pdip));

		(void) ddi_prop_remove(DDI_DEV_T_NONE, pdip, "pm-components");
		pci_config_teardown(&conf_hdl);
		mutex_destroy(&ppb->ppb_pwr_p->pwr_mutex);
		kmem_free(ppb->ppb_pwr_p, sizeof (pci_pwr_t));
		ppb->ppb_pwr_p = NULL;

		return;
	}

	ppb->ppb_pwr_p->current_lvl =
	    pci_pwr_current_lvl(ppb->ppb_pwr_p);
}