/* * Called from vt devname part. Checks if dip is attached. If it is, * return its major number. */ major_t vt_wc_attached(void) { major_t maj = (major_t)-1; mutex_enter(&vc_lock); if (wc_dip) maj = ddi_driver_major(wc_dip); mutex_exit(&vc_lock); return (maj); }
/* * If the bridge is empty, disable it */ int npe_disable_empty_bridges_workaround(dev_info_t *child) { /* * Do not bind drivers to empty bridges. * Fail above, if the bridge is found to be hotplug capable */ if (ddi_driver_major(child) == ddi_name_to_major("pcieb") && ddi_get_child(child) == NULL && ddi_prop_get_int(DDI_DEV_T_ANY, child, DDI_PROP_DONTPASS, "pci-hotplug-type", INBAND_HPC_NONE) == INBAND_HPC_NONE) return (1); return (0); }
/* * This call causes us to *forget* the instance number we've generated * for a given device if it was not permanent. */ void e_ddi_free_instance(dev_info_t *dip, char *addr) { char *name; in_node_t *np; in_node_t *ap; /* ancestor node */ major_t major; struct devnames *dnp; in_drv_t *dp; /* in_drv entry */ /* * Allow implementation override */ if (impl_free_instance(dip) == DDI_SUCCESS) return; /* * If this is a pseudo-device, no instance number * was assigned. */ if (is_pseudo_device(dip)) { return; } name = (char *)ddi_driver_name(dip); major = ddi_driver_major(dip); ASSERT(major != DDI_MAJOR_T_NONE); dnp = &devnamesp[major]; /* * Only one thread is allowed to change the state of the instance * number assignments on the system at any given time. */ e_ddi_enter_instance(); np = in_devwalk(dip, &ap, addr); ASSERT(np); dp = in_drvwalk(np, name); ASSERT(dp); if (dp->ind_state == IN_PROVISIONAL) { in_removedrv(dnp, dp); } if (np->in_drivers == NULL) { in_removenode(dnp, np, ap); } e_ddi_exit_instance(); }
/* * save config regs for HyperTransport devices without drivers of classes: * memory controller and hostbridge */ int npe_save_htconfig_children(dev_info_t *dip) { dev_info_t *cdip = ddi_get_child(dip); ddi_acc_handle_t cfg_hdl; uint16_t ptr; int rval = DDI_SUCCESS; uint8_t cl, scl; for (; cdip != NULL; cdip = ddi_get_next_sibling(cdip)) { if (ddi_driver_major(cdip) != DDI_MAJOR_T_NONE) continue; if (pci_config_setup(cdip, &cfg_hdl) != DDI_SUCCESS) return (DDI_FAILURE); cl = pci_config_get8(cfg_hdl, PCI_CONF_BASCLASS); scl = pci_config_get8(cfg_hdl, PCI_CONF_SUBCLASS); if (((cl == PCI_CLASS_MEM && scl == PCI_MEM_RAM) || (cl == PCI_CLASS_BRIDGE && scl == PCI_BRIDGE_HOST)) && pci_htcap_locate(cfg_hdl, 0, 0, &ptr) == DDI_SUCCESS) { if (pci_save_config_regs(cdip) != DDI_SUCCESS) { cmn_err(CE_WARN, "Failed to save HT config " "regs for %s\n", ddi_node_name(cdip)); rval = DDI_FAILURE; } else if (ddi_prop_update_int(DDI_DEV_T_NONE, cdip, "htconfig-saved", 1) != DDI_SUCCESS) { cmn_err(CE_WARN, "Failed to set htconfig-saved " "property for %s\n", ddi_node_name(cdip)); rval = DDI_FAILURE; } } pci_config_teardown(&cfg_hdl); } return (rval); }
/* * to get the soft state structure of a specific instance */ struct rmc_comm_state * rmc_comm_getstate(dev_info_t *dip, int instance, const char *caller) { struct rmc_comm_state *rcs = NULL; dev_info_t *sdip = NULL; major_t dmaj = NOMAJOR; if (dip != NULL) { /* * Use the instance number from the <dip>; also, * check that it really corresponds to this driver */ instance = ddi_get_instance(dip); dmaj = ddi_driver_major(dip); if (rmc_comm_major == NOMAJOR && dmaj != NOMAJOR) rmc_comm_major = dmaj; else if (dmaj != rmc_comm_major) { cmn_err(CE_WARN, "%s: major number mismatch (%d vs. %d) in %s()," "probably due to child misconfiguration", MYNAME, rmc_comm_major, dmaj, caller); instance = -1; } } if (instance >= 0) rcs = ddi_get_soft_state(rmc_comm_statep, instance); if (rcs != NULL) { sdip = rcs->dip; if (dip == NULL && sdip == NULL) rcs = NULL; else if (dip != NULL && sdip != NULL && sdip != dip) { cmn_err(CE_WARN, "%s: devinfo mismatch (%p vs. %p) in %s(), " "probably due to child misconfiguration", MYNAME, (void *)dip, (void *)sdip, caller); rcs = NULL; } } return (rcs); }
/* * This makes our memory of an instance assignment permanent */ void e_ddi_keep_instance(dev_info_t *dip) { in_node_t *np, *ap; in_drv_t *dp; /* Don't make nulldriver instance assignments permanent */ if (ddi_driver_major(dip) == nulldriver_major) return; /* * Allow implementation override */ if (impl_keep_instance(dip) == DDI_SUCCESS) return; /* * Nothing to do for pseudo devices. */ if (is_pseudo_device(dip)) return; /* * Only one thread is allowed to change the state of the instance * number assignments on the system at any given time. */ e_ddi_enter_instance(); np = in_devwalk(dip, &ap, NULL); ASSERT(np); dp = in_drvwalk(np, (char *)ddi_driver_name(dip)); ASSERT(dp); mutex_enter(&e_ddi_inst_state.ins_serial); if (dp->ind_state == IN_PROVISIONAL) { dp->ind_state = IN_PERMANENT; i_log_devfs_instance_mod(); e_ddi_inst_state.ins_dirty = 1; } mutex_exit(&e_ddi_inst_state.ins_serial); e_ddi_exit_instance(); }
/* * check for a possible substitute node. This routine searches the * children of parent_dip, looking for a node that: * 1. is a prom node * 2. binds to the same major number * 3. there is no need to verify that the unit-address information * match since it is likely that the substitute node * will have none (e.g. disk) - this would be the reason the * framework rejected it in the first place. * * assumes parent_dip is held */ static dev_info_t * find_alternate_node(dev_info_t *parent_dip, major_t major) { int circ; dev_info_t *child_dip; /* lock down parent to keep children from being removed */ ndi_devi_enter(parent_dip, &circ); for (child_dip = ddi_get_child(parent_dip); child_dip != NULL; child_dip = ddi_get_next_sibling(child_dip)) { /* look for obp node with matching major */ if ((ndi_dev_is_prom_node(child_dip) != 0) && (ddi_driver_major(child_dip) == major)) { ndi_hold_devi(child_dip); break; } } ndi_devi_exit(parent_dip, circ); return (child_dip); }
static int rmc_comm_attach(dev_info_t *dip, ddi_attach_cmd_t cmd) { struct rmc_comm_state *rcs = NULL; sig_state_t *current_sgn_p; int instance; /* * only allow one instance */ instance = ddi_get_instance(dip); if (instance != 0) return (DDI_FAILURE); switch (cmd) { default: return (DDI_FAILURE); case DDI_RESUME: if ((rcs = rmc_comm_getstate(dip, instance, "rmc_comm_attach")) == NULL) return (DDI_FAILURE); /* this "can't happen" */ rmc_comm_hw_reset(rcs); rmc_comm_set_irq(rcs, B_TRUE); rcs->dip = dip; mutex_enter(&tod_lock); if (watchdog_enable && tod_ops.tod_set_watchdog_timer != NULL && watchdog_was_active) { (void) tod_ops.tod_set_watchdog_timer(0); } mutex_exit(&tod_lock); mutex_enter(rcs->dp_state.dp_mutex); dp_reset(rcs, INITIAL_SEQID, 1, 1); mutex_exit(rcs->dp_state.dp_mutex); current_sgn_p = (sig_state_t *)modgetsymvalue( "current_sgn", 0); if ((current_sgn_p != NULL) && (current_sgn_p->state_t.sig != 0)) { CPU_SIGNATURE(current_sgn_p->state_t.sig, current_sgn_p->state_t.state, current_sgn_p->state_t.sub_state, -1); } return (DDI_SUCCESS); case DDI_ATTACH: break; } /* * Allocate the soft-state structure */ if (ddi_soft_state_zalloc(rmc_comm_statep, instance) != DDI_SUCCESS) return (DDI_FAILURE); if ((rcs = rmc_comm_getstate(dip, instance, "rmc_comm_attach")) == NULL) { rmc_comm_unattach(rcs, dip, instance, 0, 0, 0); return (DDI_FAILURE); } ddi_set_driver_private(dip, rcs); rcs->dip = NULL; /* * Set various options from .conf properties */ rcs->baud = ddi_prop_get_int(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS, "baud-rate", 0); rcs->debug = ddi_prop_get_int(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS, "debug", 0); /* * the baud divisor factor tells us how to scale the result of * the SIO_BAUD_TO_DIVISOR macro for platforms which do not * use the standard 24MHz uart clock */ rcs->baud_divisor_factor = ddi_prop_get_int(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS, "baud-divisor-factor", SIO_BAUD_DIVISOR_MIN); /* * try to be reasonable if the scale factor contains a silly value */ if ((rcs->baud_divisor_factor < SIO_BAUD_DIVISOR_MIN) || (rcs->baud_divisor_factor > SIO_BAUD_DIVISOR_MAX)) rcs->baud_divisor_factor = SIO_BAUD_DIVISOR_MIN; /* * initialize serial device */ if (rmc_comm_serdev_init(rcs, dip) != 0) { rmc_comm_unattach(rcs, dip, instance, 0, 0, 0); return (DDI_FAILURE); } /* * initialize data protocol */ rmc_comm_dp_init(rcs); /* * initialize driver interface */ if (rmc_comm_drvintf_init(rcs) != 0) { rmc_comm_unattach(rcs, dip, instance, 0, 1, 1); return (DDI_FAILURE); } /* * Initialise devinfo-related fields */ rcs->majornum = ddi_driver_major(dip); rcs->instance = instance; rcs->dip = dip; /* * enable interrupts now */ rmc_comm_set_irq(rcs, B_TRUE); /* * All done, report success */ ddi_report_dev(dip); mutex_enter(&rmc_comm_attach_lock); rcs->is_attached = B_TRUE; mutex_exit(&rmc_comm_attach_lock); return (DDI_SUCCESS); }
/* ARGSUSED */ int mac_register(mac_register_t *mregp, mac_handle_t *mhp) { mac_impl_t *mip; mactype_t *mtype; int err = EINVAL; struct devnames *dnp = NULL; uint_t instance; boolean_t style1_created = B_FALSE; boolean_t style2_created = B_FALSE; char *driver; minor_t minor = 0; /* A successful call to mac_init_ops() sets the DN_GLDV3_DRIVER flag. */ if (!GLDV3_DRV(ddi_driver_major(mregp->m_dip))) return (EINVAL); /* Find the required MAC-Type plugin. */ if ((mtype = mactype_getplugin(mregp->m_type_ident)) == NULL) return (EINVAL); /* Create a mac_impl_t to represent this MAC. */ mip = kmem_cache_alloc(i_mac_impl_cachep, KM_SLEEP); /* * The mac is not ready for open yet. */ mip->mi_state_flags |= MIS_DISABLED; /* * When a mac is registered, the m_instance field can be set to: * * 0: Get the mac's instance number from m_dip. * This is usually used for physical device dips. * * [1 .. MAC_MAX_MINOR-1]: Use the value as the mac's instance number. * For example, when an aggregation is created with the key option, * "key" will be used as the instance number. * * -1: Assign an instance number from [MAC_MAX_MINOR .. MAXMIN-1]. * This is often used when a MAC of a virtual link is registered * (e.g., aggregation when "key" is not specified, or vnic). * * Note that the instance number is used to derive the mi_minor field * of mac_impl_t, which will then be used to derive the name of kstats * and the devfs nodes. The first 2 cases are needed to preserve * backward compatibility. */ switch (mregp->m_instance) { case 0: instance = ddi_get_instance(mregp->m_dip); break; case ((uint_t)-1): minor = mac_minor_hold(B_TRUE); if (minor == 0) { err = ENOSPC; goto fail; } instance = minor - 1; break; default: instance = mregp->m_instance; if (instance >= MAC_MAX_MINOR) { err = EINVAL; goto fail; } break; } mip->mi_minor = (minor_t)(instance + 1); mip->mi_dip = mregp->m_dip; mip->mi_clients_list = NULL; mip->mi_nclients = 0; /* Set the default IEEE Port VLAN Identifier */ mip->mi_pvid = 1; /* Default bridge link learning protection values */ mip->mi_llimit = 1000; mip->mi_ldecay = 200; driver = (char *)ddi_driver_name(mip->mi_dip); /* Construct the MAC name as <drvname><instance> */ (void) snprintf(mip->mi_name, sizeof (mip->mi_name), "%s%d", driver, instance); mip->mi_driver = mregp->m_driver; mip->mi_type = mtype; mip->mi_margin = mregp->m_margin; mip->mi_info.mi_media = mtype->mt_type; mip->mi_info.mi_nativemedia = mtype->mt_nativetype; if (mregp->m_max_sdu <= mregp->m_min_sdu) goto fail; if (mregp->m_multicast_sdu == 0) mregp->m_multicast_sdu = mregp->m_max_sdu; if (mregp->m_multicast_sdu < mregp->m_min_sdu || mregp->m_multicast_sdu > mregp->m_max_sdu) goto fail; mip->mi_sdu_min = mregp->m_min_sdu; mip->mi_sdu_max = mregp->m_max_sdu; mip->mi_sdu_multicast = mregp->m_multicast_sdu; mip->mi_info.mi_addr_length = mip->mi_type->mt_addr_length; /* * If the media supports a broadcast address, cache a pointer to it * in the mac_info_t so that upper layers can use it. */ mip->mi_info.mi_brdcst_addr = mip->mi_type->mt_brdcst_addr; mip->mi_v12n_level = mregp->m_v12n; /* * Copy the unicast source address into the mac_info_t, but only if * the MAC-Type defines a non-zero address length. We need to * handle MAC-Types that have an address length of 0 * (point-to-point protocol MACs for example). */ if (mip->mi_type->mt_addr_length > 0) { if (mregp->m_src_addr == NULL) goto fail; mip->mi_info.mi_unicst_addr = kmem_alloc(mip->mi_type->mt_addr_length, KM_SLEEP); bcopy(mregp->m_src_addr, mip->mi_info.mi_unicst_addr, mip->mi_type->mt_addr_length); /* * Copy the fixed 'factory' MAC address from the immutable * info. This is taken to be the MAC address currently in * use. */ bcopy(mip->mi_info.mi_unicst_addr, mip->mi_addr, mip->mi_type->mt_addr_length); /* * At this point, we should set up the classification * rules etc but we delay it till mac_open() so that * the resource discovery has taken place and we * know someone wants to use the device. Otherwise * memory gets allocated for Rx ring structures even * during probe. */ /* Copy the destination address if one is provided. */ if (mregp->m_dst_addr != NULL) { bcopy(mregp->m_dst_addr, mip->mi_dstaddr, mip->mi_type->mt_addr_length); mip->mi_dstaddr_set = B_TRUE; } } else if (mregp->m_src_addr != NULL) { goto fail; } /* * The format of the m_pdata is specific to the plugin. It is * passed in as an argument to all of the plugin callbacks. The * driver can update this information by calling * mac_pdata_update(). */ if (mip->mi_type->mt_ops.mtops_ops & MTOPS_PDATA_VERIFY) { /* * Verify if the supplied plugin data is valid. Note that * even if the caller passed in a NULL pointer as plugin data, * we still need to verify if that's valid as the plugin may * require plugin data to function. */ if (!mip->mi_type->mt_ops.mtops_pdata_verify(mregp->m_pdata, mregp->m_pdata_size)) { goto fail; } if (mregp->m_pdata != NULL) { mip->mi_pdata = kmem_alloc(mregp->m_pdata_size, KM_SLEEP); bcopy(mregp->m_pdata, mip->mi_pdata, mregp->m_pdata_size); mip->mi_pdata_size = mregp->m_pdata_size; } } else if (mregp->m_pdata != NULL) { /* * The caller supplied non-NULL plugin data, but the plugin * does not recognize plugin data. */ err = EINVAL; goto fail; } /* * Register the private properties. */ mac_register_priv_prop(mip, mregp->m_priv_props); /* * Stash the driver callbacks into the mac_impl_t, but first sanity * check to make sure all mandatory callbacks are set. */ if (mregp->m_callbacks->mc_getstat == NULL || mregp->m_callbacks->mc_start == NULL || mregp->m_callbacks->mc_stop == NULL || mregp->m_callbacks->mc_setpromisc == NULL || mregp->m_callbacks->mc_multicst == NULL) { goto fail; } mip->mi_callbacks = mregp->m_callbacks; if (mac_capab_get((mac_handle_t)mip, MAC_CAPAB_LEGACY, &mip->mi_capab_legacy)) { mip->mi_state_flags |= MIS_LEGACY; mip->mi_phy_dev = mip->mi_capab_legacy.ml_dev; } else { mip->mi_phy_dev = makedevice(ddi_driver_major(mip->mi_dip), mip->mi_minor); } /* * Allocate a notification thread. thread_create blocks for memory * if needed, it never fails. */ mip->mi_notify_thread = thread_create(NULL, 0, i_mac_notify_thread, mip, 0, &p0, TS_RUN, minclsyspri); /* * Initialize the capabilities */ bzero(&mip->mi_rx_rings_cap, sizeof (mac_capab_rings_t)); bzero(&mip->mi_tx_rings_cap, sizeof (mac_capab_rings_t)); if (i_mac_capab_get((mac_handle_t)mip, MAC_CAPAB_VNIC, NULL)) mip->mi_state_flags |= MIS_IS_VNIC; if (i_mac_capab_get((mac_handle_t)mip, MAC_CAPAB_AGGR, NULL)) mip->mi_state_flags |= MIS_IS_AGGR; mac_addr_factory_init(mip); /* * Enforce the virtrualization level registered. */ if (mip->mi_v12n_level & MAC_VIRT_LEVEL1) { if (mac_init_rings(mip, MAC_RING_TYPE_RX) != 0 || mac_init_rings(mip, MAC_RING_TYPE_TX) != 0) goto fail; /* * The driver needs to register at least rx rings for this * virtualization level. */ if (mip->mi_rx_groups == NULL) goto fail; } /* * The driver must set mc_unicst entry point to NULL when it advertises * CAP_RINGS for rx groups. */ if (mip->mi_rx_groups != NULL) { if (mregp->m_callbacks->mc_unicst != NULL) goto fail; } else { if (mregp->m_callbacks->mc_unicst == NULL) goto fail; } /* * Initialize MAC addresses. Must be called after mac_init_rings(). */ mac_init_macaddr(mip); mip->mi_share_capab.ms_snum = 0; if (mip->mi_v12n_level & MAC_VIRT_HIO) { (void) mac_capab_get((mac_handle_t)mip, MAC_CAPAB_SHARES, &mip->mi_share_capab); } /* * Initialize the kstats for this device. */ mac_driver_stat_create(mip); /* Zero out any properties. */ bzero(&mip->mi_resource_props, sizeof (mac_resource_props_t)); if (mip->mi_minor <= MAC_MAX_MINOR) { /* Create a style-2 DLPI device */ if (ddi_create_minor_node(mip->mi_dip, driver, S_IFCHR, 0, DDI_NT_NET, CLONE_DEV) != DDI_SUCCESS) goto fail; style2_created = B_TRUE; /* Create a style-1 DLPI device */ if (ddi_create_minor_node(mip->mi_dip, mip->mi_name, S_IFCHR, mip->mi_minor, DDI_NT_NET, 0) != DDI_SUCCESS) goto fail; style1_created = B_TRUE; } mac_flow_l2tab_create(mip, &mip->mi_flow_tab); rw_enter(&i_mac_impl_lock, RW_WRITER); if (mod_hash_insert(i_mac_impl_hash, (mod_hash_key_t)mip->mi_name, (mod_hash_val_t)mip) != 0) { rw_exit(&i_mac_impl_lock); err = EEXIST; goto fail; } DTRACE_PROBE2(mac__register, struct devnames *, dnp, (mac_impl_t *), mip); /* * Mark the MAC to be ready for open. */ mip->mi_state_flags &= ~MIS_DISABLED; rw_exit(&i_mac_impl_lock); atomic_inc_32(&i_mac_impl_count); cmn_err(CE_NOTE, "!%s registered", mip->mi_name); *mhp = (mac_handle_t)mip; return (0); fail: if (style1_created) ddi_remove_minor_node(mip->mi_dip, mip->mi_name); if (style2_created) ddi_remove_minor_node(mip->mi_dip, driver); mac_addr_factory_fini(mip); /* Clean up registered MAC addresses */ mac_fini_macaddr(mip); /* Clean up registered rings */ mac_free_rings(mip, MAC_RING_TYPE_RX); mac_free_rings(mip, MAC_RING_TYPE_TX); /* Clean up notification thread */ if (mip->mi_notify_thread != NULL) i_mac_notify_exit(mip); if (mip->mi_info.mi_unicst_addr != NULL) { kmem_free(mip->mi_info.mi_unicst_addr, mip->mi_type->mt_addr_length); mip->mi_info.mi_unicst_addr = NULL; } mac_driver_stat_delete(mip); if (mip->mi_type != NULL) { atomic_dec_32(&mip->mi_type->mt_ref); mip->mi_type = NULL; } if (mip->mi_pdata != NULL) { kmem_free(mip->mi_pdata, mip->mi_pdata_size); mip->mi_pdata = NULL; mip->mi_pdata_size = 0; } if (minor != 0) { ASSERT(minor > MAC_MAX_MINOR); mac_minor_rele(minor); } mip->mi_state_flags = 0; mac_unregister_priv_prop(mip); /* * Clear the state before destroying the mac_impl_t */ mip->mi_state_flags = 0; kmem_cache_free(i_mac_impl_cachep, mip); return (err); }
/* * Look up an instance number for a dev_info node, and assign one if it does * not have one (the dev_info node has devi_name and devi_addr already set). */ uint_t e_ddi_assign_instance(dev_info_t *dip) { char *name; in_node_t *ap, *np; in_drv_t *dp; major_t major; uint_t ret; char *bname; /* * Allow implementation to override */ if ((ret = impl_assign_instance(dip)) != (uint_t)-1) return (ret); /* * If this is a pseudo-device, use the instance number * assigned by the pseudo nexus driver. The mutex is * not needed since the instance tree is not used. */ if (is_pseudo_device(dip)) { return (ddi_get_instance(dip)); } /* * Only one thread is allowed to change the state of the instance * number assignments on the system at any given time. */ e_ddi_enter_instance(); /* * Look for instance node, allocate one if not found */ np = in_devwalk(dip, &ap, NULL); if (np == NULL) { if (in_assign_instance_block(dip)) { np = in_devwalk(dip, &ap, NULL); } else { name = ddi_node_name(dip); np = in_alloc_node(name, ddi_get_name_addr(dip)); ASSERT(np != NULL); in_enlist(ap, np); /* insert into tree */ } } ASSERT(np == in_devwalk(dip, &ap, NULL)); /* * Look for driver entry, allocate one if not found */ bname = (char *)ddi_driver_name(dip); dp = in_drvwalk(np, bname); if (dp == NULL) { dp = in_alloc_drv(bname); ASSERT(dp != NULL); major = ddi_driver_major(dip); ASSERT(major != DDI_MAJOR_T_NONE); in_endrv(np, dp); in_set_instance(dip, dp, major); dp->ind_state = IN_PROVISIONAL; in_hashdrv(dp); } ret = dp->ind_instance; e_ddi_exit_instance(); return (ret); }
/* * Return 1 if instance block was assigned for the path. * * For multi-port NIC cards, sequential instance assignment across all * ports on a card is highly desirable since the ppa is typically the * same as the instance number, and the ppa is used in the NIC's public * /dev name. This sequential assignment typically occurs as a result * of in_preassign_instance() after initial install, or by * i_ndi_init_hw_children() for NIC ports that share a common parent. * * Some NIC cards however use multi-function bridge chips, and to * support sequential instance assignment accross all ports, without * disabling multi-threaded attach, we have a (currently) undocumented * hack to allocate instance numbers in contiguous blocks based on * driver.conf properties. * * ^ * /---------- ------------\ * pci@0 pci@0,1 MULTI-FUNCTION BRIDGE CHIP * / \ / \ * FJSV,e4ta@4 FJSV,e4ta@4,1 FJSV,e4ta@6 FJSV,e4ta@6,1 NIC PORTS * n n+2 n+2 n+3 INSTANCE * * For the above example, the following driver.conf properties would be * used to guarantee sequential instance number assignment. * * ddi-instance-blocks ="ib-FJSVe4ca", "ib-FJSVe4ta", "ib-generic"; * ib-FJSVe4ca = "/pci@0/FJSV,e4ca@4", "/pci@0/FJSV,e4ca@4,1", * "/pci@0,1/FJSV,e4ca@6", "/pci@0,1/FJSV,e4ca@6,1"; * ib-FJSVe4ta = "/pci@0/FJSV,e4ta@4", "/pci@0/FJSV,e4ta@4,1", * "/pci@0,1/FJSV,e4ta@6", "/pci@0,1/FJSV,e4ta@6,1"; * ib-generic = "/pci@0/network@4", "/pci@0/network@4,1", * "/pci@0,1/network@6", "/pci@0,1/network@6,1"; * * The value of the 'ddi-instance-blocks' property references a series * of card specific properties, like 'ib-FJSV-e4ta', who's value * defines a single 'instance block'. The 'instance block' describes * all the paths below a multi-function bridge, where each path is * called an 'instance path'. The 'instance block' property value is a * series of 'instance paths'. The number of 'instance paths' in an * 'instance block' defines the size of the instance block, and the * ordering of the 'instance paths' defines the instance number * assignment order for paths going through the 'instance block'. * * In the instance assignment code below, if a (path, driver) that * currently has no instance number has a path that goes through an * 'instance block', then block instance number allocation occurs. The * block allocation code will find a sequential set of unused instance * numbers, and assign instance numbers for all the paths in the * 'instance block'. Each path is assigned a persistent instance * number, even paths that don't exist in the device tree or fail * probe(9E). */ static int in_assign_instance_block(dev_info_t *dip) { char **ibn; /* instance block names */ uint_t nibn; /* number of instance block names */ uint_t ibni; /* ibn index */ char *driver; major_t major; char *path; char *addr; int plen; char **ibp; /* instance block paths */ uint_t nibp; /* number of paths in instance block */ uint_t ibpi; /* ibp index */ int ibplen; /* length of instance block path */ char *ipath; int instance_base; int splice; int i; /* check for fresh install case (in miniroot) */ if (DEVI(dip)->devi_instance != -1) return (0); /* already assigned */ /* * Check to see if we need to allocate a block of contiguous instance * numbers by looking for the 'ddi-instance-blocks' property. */ if (ddi_prop_lookup_string_array(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS, "ddi-instance-blocks", &ibn, &nibn) != DDI_SUCCESS) return (0); /* no instance block needed */ /* * Get information out about node we are processing. * * NOTE: Since the node is not yet at DS_INITIALIZED, ddi_pathname() * will not return the unit-address of the final path component even * though the node has an established devi_addr unit-address - so we * need to add the unit-address by hand. */ driver = (char *)ddi_driver_name(dip); major = ddi_driver_major(dip); path = kmem_alloc(MAXPATHLEN, KM_SLEEP); (void) ddi_pathname(dip, path); if ((addr = ddi_get_name_addr(dip)) != NULL) { (void) strcat(path, "@"); (void) strcat(path, addr); } plen = strlen(path); /* loop through instance block names */ for (ibni = 0; ibni < nibn; ibni++) { if (ibn[ibni] == NULL) continue; /* lookup instance block */ if (ddi_prop_lookup_string_array(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS, ibn[ibni], &ibp, &nibp) != DDI_SUCCESS) { cmn_err(CE_WARN, "no devinition for instance block '%s' in %s.conf", ibn[ibni], driver); continue; } /* Does 'path' go through this instance block? */ for (ibpi = 0; ibpi < nibp; ibpi++) { if (ibp[ibpi] == NULL) continue; ibplen = strlen(ibp[ibpi]); if ((ibplen <= plen) && (strcmp(ibp[ibpi], path + plen - ibplen) == 0)) break; } if (ibpi >= nibp) { ddi_prop_free(ibp); continue; /* no try next instance block */ } /* yes, allocate and assign instances for all paths in block */ /* * determine where we splice in instance paths and verify * that none of the paths are too long. */ splice = plen - ibplen; for (i = 0; i < nibp; i++) { if ((splice + strlen(ibp[i])+ 1) >= MAXPATHLEN) { cmn_err(CE_WARN, "path %d through instance block '%s' from " "%s.conf too long", i, ibn[ibni], driver); break; } } if (i < nibp) { ddi_prop_free(ibp); continue; /* too long */ } /* allocate the instance block - no more failures */ instance_base = in_next_instance_block(major, nibp); ipath = kmem_alloc(MAXPATHLEN, KM_SLEEP); for (ibpi = 0; ibpi < nibp; ibpi++) { if (ibp[ibpi] == NULL) continue; (void) strcpy(ipath, path); (void) strcpy(ipath + splice, ibp[ibpi]); (void) in_pathin(ipath, instance_base + ibpi, driver, NULL); } /* free allocations */ kmem_free(ipath, MAXPATHLEN); ddi_prop_free(ibp); kmem_free(path, MAXPATHLEN); ddi_prop_free(ibn); /* notify devfsadmd to sync of path_to_inst file */ mutex_enter(&e_ddi_inst_state.ins_serial); i_log_devfs_instance_mod(); e_ddi_inst_state.ins_dirty = 1; mutex_exit(&e_ddi_inst_state.ins_serial); return (1); } /* our path did not go through any of of the instance blocks */ kmem_free(path, MAXPATHLEN); ddi_prop_free(ibn); return (0); }
/* * The function is to get prom name according non-client dip node. * And the function will set the alternate node of dip to alt_dip * if it is exist which must be PROM node. */ static int i_devi_to_promname(dev_info_t *dip, char *prom_path, dev_info_t **alt_dipp) { dev_info_t *pdip, *cdip, *idip; char *unit_address, *nodename; major_t major; int depth, old_depth = 0; struct parinfo *parinfo = NULL; struct parinfo *info; int ret = 0; if (MDI_CLIENT(dip)) return (EINVAL); if (ddi_pathname_obp(dip, prom_path) != NULL) { return (0); } /* * ddi_pathname_obp return NULL, but the obp path still could * be different with the devfs path name, so need use a parents * stack to compose the path name string layer by layer. */ /* find the closest ancestor which is a prom node */ pdip = dip; parinfo = kmem_alloc(OBP_STACKDEPTH * sizeof (*parinfo), KM_SLEEP); for (depth = 0; ndi_dev_is_prom_node(pdip) == 0; depth++) { if (depth == OBP_STACKDEPTH) { ret = EINVAL; /* must not have been an obp node */ goto out; } pdip = get_parent(pdip, &parinfo[depth]); } old_depth = depth; ASSERT(pdip); /* at least root is prom node */ if (pdip) (void) ddi_pathname(pdip, prom_path); ndi_hold_devi(pdip); for (depth = old_depth; depth > 0; depth--) { info = &parinfo[depth - 1]; idip = info->dip; nodename = ddi_node_name(idip); unit_address = ddi_get_name_addr(idip); if (pdip) { major = ddi_driver_major(idip); cdip = find_alternate_node(pdip, major); ndi_rele_devi(pdip); if (cdip) { nodename = ddi_node_name(cdip); } } /* * node name + unitaddr to the prom_path */ (void) strcat(prom_path, "/"); (void) strcat(prom_path, nodename); if (unit_address && (*unit_address)) { (void) strcat(prom_path, "@"); (void) strcat(prom_path, unit_address); } pdip = cdip; } if (pdip) { ndi_rele_devi(pdip); /* hold from find_alternate_node */ } /* * Now pdip is the alternate node which is same hierarchy as dip * if it exists. */ *alt_dipp = pdip; out: if (parinfo) { /* release holds from get_parent() */ for (depth = old_depth; depth > 0; depth--) { info = &parinfo[depth - 1]; if (info && info->pdip) ndi_rele_devi(info->pdip); } kmem_free(parinfo, OBP_STACKDEPTH * sizeof (*parinfo)); } return (ret); }
/* * translate a devfs pathname to one that will be acceptable * by the prom. In most cases, there is no translation needed. * For systems supporting generically named devices, the prom * may support nodes such as 'disk' that do not have any unit * address information (i.e. target,lun info). If this is the * case, the ddi framework will reject the node as invalid and * populate the devinfo tree with nodes froms the .conf file * (e.g. sd). In this case, the names that show up in /devices * are sd - since the prom only knows about 'disk' nodes, this * routine detects this situation and does the conversion * There are also cases such as pluto where the disk node in the * prom is named "SUNW,ssd" but in /devices the name is "ssd". * * If MPxIO is enabled, the translation involves following * pathinfo nodes to the "best" parent. * * return a 0 on success with the new device string in ret_buf. * Otherwise return the appropriate error code as we may be called * from the openprom driver. */ int i_devname_to_promname(char *dev_name, char *ret_buf, size_t len) { dev_info_t *dip, *pdip, *cdip, *alt_dip = NULL; mdi_pathinfo_t *pip = NULL; char *dev_path, *prom_path; char *unit_address, *minorname, *nodename; major_t major; char *rptr, *optr, *offline; size_t olen, rlen; int circ; int ret = 0; /* do some sanity checks */ if ((dev_name == NULL) || (ret_buf == NULL) || (strlen(dev_name) > MAXPATHLEN)) { return (EINVAL); } /* * Convert to a /devices name. Fail the translation if * the name doesn't exist. */ dev_path = kmem_zalloc(MAXPATHLEN, KM_SLEEP); if (resolve_devfs_name(dev_name, dev_path) != 0 || strncmp(dev_path, "/devices/", 9) != 0) { kmem_free(dev_path, MAXPATHLEN); return (EINVAL); } dev_name = dev_path + sizeof ("/devices") - 1; bzero(ret_buf, len); if (prom_finddevice(dev_name) != OBP_BADNODE) { /* we are done */ (void) snprintf(ret_buf, len, "%s", dev_name); kmem_free(dev_path, MAXPATHLEN); return (0); } /* * if we get here, then some portion of the device path is * not understood by the prom. We need to look for alternate * names (e.g. replace ssd by disk) and mpxio enabled devices. */ dip = e_ddi_hold_devi_by_path(dev_name, 0); if (dip == NULL) { cmn_err(CE_NOTE, "cannot find dip for %s", dev_name); kmem_free(dev_path, MAXPATHLEN); return (EINVAL); } prom_path = kmem_zalloc(MAXPATHLEN, KM_SLEEP); rlen = len; rptr = ret_buf; if (!MDI_CLIENT(dip)) { ret = i_devi_to_promname(dip, prom_path, &alt_dip); if (ret == 0) { minorname = strrchr(dev_name, ':'); if (minorname && (minorname[1] != '\0')) { (void) strcat(prom_path, minorname); } (void) snprintf(rptr, rlen, "%s", prom_path); } } else { /* * if get to here, means dip is a vhci client */ offline = kmem_zalloc(len, KM_SLEEP); /* offline paths */ olen = len; optr = offline; /* * The following code assumes that the phci client is at leaf * level. */ ndi_devi_enter(dip, &circ); while ((pip = mdi_get_next_phci_path(dip, pip)) != NULL) { /* * walk all paths associated to the client node */ bzero(prom_path, MAXPATHLEN); /* * replace with mdi_hold_path() when mpxio goes into * genunix */ MDI_PI_LOCK(pip); MDI_PI_HOLD(pip); MDI_PI_UNLOCK(pip); if (mdi_pi_pathname_obp(pip, prom_path) != NULL) { /* * The path has different obp path */ goto minor_pathinfo; } pdip = mdi_pi_get_phci(pip); ndi_hold_devi(pdip); /* * Get obp path name of the phci node firstly. * NOTE: if the alternate node of pdip exists, * the third argument of the i_devi_to_promname() * would be set to the alternate node. */ (void) i_devi_to_promname(pdip, prom_path, &alt_dip); if (alt_dip != NULL) { ndi_rele_devi(pdip); pdip = alt_dip; ndi_hold_devi(pdip); } nodename = ddi_node_name(dip); unit_address = MDI_PI(pip)->pi_addr; major = ddi_driver_major(dip); cdip = find_alternate_node(pdip, major); if (cdip) { nodename = ddi_node_name(cdip); } /* * node name + unitaddr to the prom_path */ (void) strcat(prom_path, "/"); (void) strcat(prom_path, nodename); if (unit_address && (*unit_address)) { (void) strcat(prom_path, "@"); (void) strcat(prom_path, unit_address); } if (cdip) { /* hold from find_alternate_node */ ndi_rele_devi(cdip); } ndi_rele_devi(pdip); minor_pathinfo: minorname = strrchr(dev_name, ':'); if (minorname && (minorname[1] != '\0')) { (void) strcat(prom_path, minorname); } if (MDI_PI_IS_ONLINE(pip)) { (void) snprintf(rptr, rlen, "%s", prom_path); rlen -= strlen(rptr) + 1; rptr += strlen(rptr) + 1; if (rlen <= 0) /* drop paths we can't store */ break; } else { /* path is offline */ (void) snprintf(optr, olen, "%s", prom_path); olen -= strlen(optr) + 1; if (olen > 0) /* drop paths we can't store */ optr += strlen(optr) + 1; } MDI_PI_LOCK(pip); MDI_PI_RELE(pip); if (MDI_PI(pip)->pi_ref_cnt == 0) cv_broadcast(&MDI_PI(pip)->pi_ref_cv); MDI_PI_UNLOCK(pip); } ndi_devi_exit(dip, circ); ret = 0; if (rlen > 0) { /* now add as much of offline to ret_buf as possible */ bcopy(offline, rptr, rlen); } kmem_free(offline, len); } /* release hold from e_ddi_hold_devi_by_path() */ ndi_rele_devi(dip); ret_buf[len - 1] = '\0'; ret_buf[len - 2] = '\0'; kmem_free(dev_path, MAXPATHLEN); kmem_free(prom_path, MAXPATHLEN); return (ret); }
/*ARGSUSED*/ static int mouse8042_close(queue_t *q, int flag, cred_t *cred_p) { struct mouse_state *state; minor_t minor; state = (struct mouse_state *)q->q_ptr; /* * Disable queue processing now, so that another reset cannot get in * after we wait for the current reset (if any) to complete. */ qprocsoff(q); mutex_enter(&state->reset_mutex); while (state->reset_state != MSE_RESET_IDLE) { /* * Waiting for the previous reset to finish is * non-interruptible. Some upper-level clients * cannot deal with EINTR and will not close the * STREAM properly, resulting in failure to reopen it * within the same process. */ cv_wait(&state->reset_cv, &state->reset_mutex); } if (state->reset_tid != 0) { (void) quntimeout(q, state->reset_tid); state->reset_tid = 0; } if (state->reply_mp != NULL) { freemsg(state->reply_mp); state->reply_mp = NULL; } if (state->reset_ack_mp != NULL) { freemsg(state->reset_ack_mp); state->reset_ack_mp = NULL; } mutex_exit(&state->reset_mutex); mutex_enter(&state->ms_mutex); if (state->bc_id != 0) { (void) qunbufcall(q, state->bc_id); state->bc_id = 0; } q->q_ptr = NULL; WR(q)->q_ptr = NULL; state->ms_rqp = NULL; state->ms_wqp = NULL; state->ms_opened = B_FALSE; minor = state->ms_minor; mutex_exit(&state->ms_mutex); if (!MOUSE8042_INTERNAL_OPEN(minor)) { /* * Closing physical PS/2 mouse * * Link it back to virtual mouse, and * mouse8042_open will be called as a result * of the consconfig_link call. Do NOT try * this if the mouse is about to be detached! * * If linking back fails, this specific mouse * will not be available underneath the virtual * mouse, and can only be accessed via physical * open. */ consconfig_link(ddi_driver_major(mouse8042_dip), MOUSE8042_INTERNAL_MINOR(minor)); } return (0); }
/*ARGSUSED*/ static int zc_slave_open(zc_state_t *zcs, queue_t *rqp, /* pointer to the read side queue */ dev_t *devp, /* pointer to stream tail's dev */ int oflag, /* the user open(2) supplied flags */ int sflag, /* open state flag */ cred_t *credp) /* credentials */ { mblk_t *mop; struct stroptions *sop; major_t major; minor_t minor; minor_t lastminor; uint_t anchorindex; /* * The slave side can be opened as many times as needed. */ if ((zcs->zc_state & ZC_STATE_SOPEN) != 0) { ASSERT((rqp != NULL) && (WR(rqp)->q_ptr == zcs)); return (0); } /* * Set up sad(7D) so that the necessary STREAMS modules will be in * place. A wrinkle is that 'ptem' must be anchored * in place (see streamio(7i)) because we always want the console to * have terminal semantics. */ minor = ddi_get_instance(zcs->zc_devinfo) << 1 | ZC_SLAVE_MINOR; major = ddi_driver_major(zcs->zc_devinfo); lastminor = 0; anchorindex = 1; if (kstr_autopush(SET_AUTOPUSH, &major, &minor, &lastminor, &anchorindex, zcons_mods) != 0) { DBG("zc_slave_open(): kstr_autopush() failed\n"); return (EIO); } if ((mop = allocb(sizeof (struct stroptions), BPRI_MED)) == NULL) { DBG("zc_slave_open(): mop allocation failed\n"); return (ENOMEM); } zcs->zc_state |= ZC_STATE_SOPEN; /* * q_ptr stores driver private data; stash the soft state data on both * read and write sides of the queue. */ WR(rqp)->q_ptr = rqp->q_ptr = zcs; qprocson(rqp); /* * Must follow qprocson(), since we aren't ready to process until then. */ zcs->zc_slave_rdq = rqp; /* * set up hi/lo water marks on stream head read queue and add * controlling tty as needed. */ mop->b_datap->db_type = M_SETOPTS; mop->b_wptr += sizeof (struct stroptions); sop = (struct stroptions *)(void *)mop->b_rptr; sop->so_flags = SO_HIWAT | SO_LOWAT | SO_ISTTY; sop->so_hiwat = _TTY_BUFSIZ; sop->so_lowat = 256; putnext(rqp, mop); return (0); }
/* * Init and attach the root node. root node is the first one to be * attached, so the process is somewhat "handcrafted". */ void i_ddi_init_root() { #ifdef DDI_PROP_DEBUG (void) ddi_prop_debug(1); /* Enable property debugging */ #endif /* DDI_PROP_DEBUG */ /* * Initialize root node */ if (impl_ddi_sunbus_initchild(top_devinfo) != DDI_SUCCESS) panic("Could not initialize root nexus"); /* * Attach root node (no need to probe) * Hold both devinfo and rootnex driver so they can't go away. */ DEVI(top_devinfo)->devi_ops = ndi_hold_driver(top_devinfo); ASSERT(DEV_OPS_HELD(DEVI(top_devinfo)->devi_ops)); DEVI(top_devinfo)->devi_instance = e_ddi_assign_instance(top_devinfo); (void) i_ddi_load_drvconf(DEVI(top_devinfo)->devi_major); mutex_enter(&(DEVI(top_devinfo)->devi_lock)); DEVI_SET_ATTACHING(top_devinfo); mutex_exit(&(DEVI(top_devinfo)->devi_lock)); if (devi_attach(top_devinfo, DDI_ATTACH) != DDI_SUCCESS) panic("Could not attach root nexus"); mutex_enter(&(DEVI(top_devinfo)->devi_lock)); DEVI_CLR_ATTACHING(top_devinfo); mutex_exit(&(DEVI(top_devinfo)->devi_lock)); mutex_init(&global_vhci_lock, NULL, MUTEX_DEFAULT, NULL); ndi_hold_devi(top_devinfo); /* hold it forever */ i_ddi_set_node_state(top_devinfo, DS_READY); /* * Now, expand .conf children of root */ (void) i_ndi_make_spec_children(top_devinfo, 0); /* * Must be set up before attaching root or pseudo drivers */ pm_init_locks(); /* * Attach options dip */ options_dip = i_ddi_attach_pseudo_node("options"); /* * Attach pseudo nexus and enumerate its children */ pseudo_dip = i_ddi_attach_pseudo_node(DEVI_PSEUDO_NEXNAME); (void) i_ndi_make_spec_children(pseudo_dip, 0); /* * Attach and hold clone dip */ clone_dip = i_ddi_attach_pseudo_node("clone"); clone_major = ddi_driver_major(clone_dip); mm_major = ddi_name_to_major("mm"); nulldriver_major = ddi_name_to_major("nulldriver"); /* * Attach scsi_vhci for MPXIO, this registers scsi vhci class * with the MPXIO framework. */ scsi_vhci_dip = i_ddi_attach_pseudo_node("scsi_vhci"); }
/* * dm2s_attach - Module's attach routine. */ int dm2s_attach(dev_info_t *dip, ddi_attach_cmd_t cmd) { int instance; dm2s_t *dm2sp; char name[20]; instance = ddi_get_instance(dip); /* Only one instance is supported. */ if (instance != 0) { cmn_err(CE_WARN, "only one instance is supported"); return (DDI_FAILURE); } if (cmd != DDI_ATTACH) { return (DDI_FAILURE); } if (ddi_soft_state_zalloc(dm2s_softstate, instance) != DDI_SUCCESS) { cmn_err(CE_WARN, "softstate allocation failure"); return (DDI_FAILURE); } dm2sp = (dm2s_t *)ddi_get_soft_state(dm2s_softstate, instance); if (dm2sp == NULL) { ddi_soft_state_free(dm2s_softstate, instance); cmn_err(CE_WARN, "softstate allocation failure."); return (DDI_FAILURE); } dm2sp->ms_dip = dip; dm2sp->ms_major = ddi_driver_major(dip); dm2sp->ms_ppa = instance; /* * Get an interrupt block cookie corresponding to the * interrupt priority of the event handler. * Assert that the event priority is not re-defined to * some higher priority. */ /* LINTED */ ASSERT(SCF_EVENT_PRI == DDI_SOFTINT_LOW); if (ddi_get_soft_iblock_cookie(dip, SCF_EVENT_PRI, &dm2sp->ms_ibcookie) != DDI_SUCCESS) { cmn_err(CE_WARN, "ddi_get_soft_iblock_cookie failed."); goto error; } mutex_init(&dm2sp->ms_lock, NULL, MUTEX_DRIVER, (void *)dm2sp->ms_ibcookie); dm2sp->ms_clean |= DM2S_CLEAN_LOCK; cv_init(&dm2sp->ms_wait, NULL, CV_DRIVER, NULL); dm2sp->ms_clean |= DM2S_CLEAN_CV; (void) sprintf(name, "%s%d", DM2S_MODNAME, instance); if (ddi_create_minor_node(dip, name, S_IFCHR, instance, DDI_PSEUDO, NULL) == DDI_FAILURE) { ddi_remove_minor_node(dip, NULL); cmn_err(CE_WARN, "Device node creation failed."); goto error; } dm2sp->ms_clean |= DM2S_CLEAN_NODE; ddi_set_driver_private(dip, (caddr_t)dm2sp); ddi_report_dev(dip); return (DDI_SUCCESS); error: dm2s_cleanup(dm2sp); return (DDI_FAILURE); }
/*ARGSUSED1*/ static int zc_close(queue_t *rqp, int flag, cred_t *credp) { queue_t *wqp; mblk_t *bp; zc_state_t *zcs; major_t major; minor_t minor; zcs = (zc_state_t *)rqp->q_ptr; if (rqp == zcs->zc_master_rdq) { DBG("Closing master side"); zcs->zc_master_rdq = NULL; zcs->zc_state &= ~ZC_STATE_MOPEN; /* * qenable slave side write queue so that it can flush * its messages as master's read queue is going away */ if (zcs->zc_slave_rdq != NULL) { qenable(WR(zcs->zc_slave_rdq)); } qprocsoff(rqp); WR(rqp)->q_ptr = rqp->q_ptr = NULL; } else if (rqp == zcs->zc_slave_rdq) { DBG("Closing slave side"); zcs->zc_state &= ~ZC_STATE_SOPEN; zcs->zc_slave_rdq = NULL; wqp = WR(rqp); while ((bp = getq(wqp)) != NULL) { if (zcs->zc_master_rdq != NULL) putnext(zcs->zc_master_rdq, bp); else if (bp->b_datap->db_type == M_IOCTL) miocnak(wqp, bp, 0, 0); else freemsg(bp); } /* * Qenable master side write queue so that it can flush its * messages as slaves's read queue is going away. */ if (zcs->zc_master_rdq != NULL) qenable(WR(zcs->zc_master_rdq)); qprocsoff(rqp); WR(rqp)->q_ptr = rqp->q_ptr = NULL; /* * Clear the sad configuration so that reopening doesn't fail * to set up sad configuration. */ major = ddi_driver_major(zcs->zc_devinfo); minor = ddi_get_instance(zcs->zc_devinfo) << 1 | ZC_SLAVE_MINOR; (void) kstr_autopush(CLR_AUTOPUSH, &major, &minor, NULL, NULL, NULL); } return (0); }
/*ARGSUSED*/ static int mouse8042_open( queue_t *q, dev_t *devp, int flag, int sflag, cred_t *cred_p) { struct mouse_state *state; minor_t minor = getminor(*devp); int rval; if (mouse8042_dip == NULL) return (ENXIO); state = ddi_get_driver_private(mouse8042_dip); mutex_enter(&state->ms_mutex); if (state->ms_opened) { /* * Exit if the same minor node is already open */ if (state->ms_minor == minor) { mutex_exit(&state->ms_mutex); return (0); } /* * Check whether it is switch between physical and virtual * * Opening from virtual while the device is being physically * opened by an application should not happen. So we ASSERT * this in DEBUG version, and return error in the non-DEBUG * case. */ ASSERT(!MOUSE8042_INTERNAL_OPEN(minor)); if (MOUSE8042_INTERNAL_OPEN(minor)) { mutex_exit(&state->ms_mutex); return (EINVAL); } /* * Opening the physical one while it is being underneath * the virtual one. * * consconfig_unlink is called to unlink this device from * the virtual one, thus the old stream serving for this * device under the virtual one is closed, and then the * lower driver's close routine (here is mouse8042_close) * is also called to accomplish the whole stream close. * Here we have to drop the lock because mouse8042_close * also needs the lock. * * For mouse, the old stream is: * consms->["pushmod"->]"mouse_vp driver" * * After the consconfig_unlink returns, the old stream is closed * and we grab the lock again to reopen this device as normal. */ mutex_exit(&state->ms_mutex); /* * If unlink fails, fail the physical open. */ if ((rval = consconfig_unlink(ddi_driver_major(mouse8042_dip), MOUSE8042_INTERNAL_MINOR(minor))) != 0) { return (rval); } mutex_enter(&state->ms_mutex); } q->q_ptr = (caddr_t)state; WR(q)->q_ptr = (caddr_t)state; state->ms_rqp = q; state->ms_wqp = WR(q); qprocson(q); state->ms_minor = minor; state->ms_opened = B_TRUE; mutex_exit(&state->ms_mutex); return (0); }