static int dld_capab_lso(dld_str_t *dsp, void *data, uint_t flags) { dld_capab_lso_t *lso = data; ASSERT(MAC_PERIM_HELD(dsp->ds_mh)); switch (flags) { case DLD_ENABLE: { mac_capab_lso_t mac_lso; /* * Check if LSO is supported on this MAC & enable LSO * accordingly. */ if (mac_capab_get(dsp->ds_mh, MAC_CAPAB_LSO, &mac_lso)) { lso->lso_max = mac_lso.lso_basic_tcp_ipv4.lso_max; lso->lso_flags = 0; /* translate the flag for mac clients */ if ((mac_lso.lso_flags & LSO_TX_BASIC_TCP_IPV4) != 0) lso->lso_flags |= DLD_LSO_BASIC_TCP_IPV4; dsp->ds_lso = B_TRUE; dsp->ds_lso_max = lso->lso_max; } else { dsp->ds_lso = B_FALSE; dsp->ds_lso_max = 0; return (ENOTSUP); } return (0); } case DLD_DISABLE: { dsp->ds_lso = B_FALSE; dsp->ds_lso_max = 0; return (0); } } return (ENOTSUP); }
/* ARGSUSED */ int vnic_dev_create(datalink_id_t vnic_id, datalink_id_t linkid, vnic_mac_addr_type_t *vnic_addr_type, int *mac_len, uchar_t *mac_addr, int *mac_slot, uint_t mac_prefix_len, uint16_t vid, vrid_t vrid, int af, mac_resource_props_t *mrp, uint32_t flags, vnic_ioc_diag_t *diag, cred_t *credp) { vnic_t *vnic; mac_register_t *mac; int err; boolean_t is_anchor = ((flags & VNIC_IOC_CREATE_ANCHOR) != 0); char vnic_name[MAXNAMELEN]; const mac_info_t *minfop; uint32_t req_hwgrp_flag = B_FALSE; *diag = VNIC_IOC_DIAG_NONE; rw_enter(&vnic_lock, RW_WRITER); /* does a VNIC with the same id already exist? */ err = mod_hash_find(vnic_hash, VNIC_HASH_KEY(vnic_id), (mod_hash_val_t *)&vnic); if (err == 0) { rw_exit(&vnic_lock); return (EEXIST); } vnic = kmem_cache_alloc(vnic_cache, KM_NOSLEEP); if (vnic == NULL) { rw_exit(&vnic_lock); return (ENOMEM); } bzero(vnic, sizeof (*vnic)); vnic->vn_id = vnic_id; vnic->vn_link_id = linkid; vnic->vn_vrid = vrid; vnic->vn_af = af; if (!is_anchor) { if (linkid == DATALINK_INVALID_LINKID) { err = EINVAL; goto bail; } /* * Open the lower MAC and assign its initial bandwidth and * MAC address. We do this here during VNIC creation and * do not wait until the upper MAC client open so that we * can validate the VNIC creation parameters (bandwidth, * MAC address, etc) and reserve a factory MAC address if * one was requested. */ err = mac_open_by_linkid(linkid, &vnic->vn_lower_mh); if (err != 0) goto bail; /* * VNIC(vlan) over VNICs(vlans) is not supported. */ if (mac_is_vnic(vnic->vn_lower_mh)) { err = EINVAL; goto bail; } /* only ethernet support for now */ minfop = mac_info(vnic->vn_lower_mh); if (minfop->mi_nativemedia != DL_ETHER) { err = ENOTSUP; goto bail; } (void) dls_mgmt_get_linkinfo(vnic_id, vnic_name, NULL, NULL, NULL); err = mac_client_open(vnic->vn_lower_mh, &vnic->vn_mch, vnic_name, MAC_OPEN_FLAGS_IS_VNIC); if (err != 0) goto bail; /* assign a MAC address to the VNIC */ err = vnic_unicast_add(vnic, *vnic_addr_type, mac_slot, mac_prefix_len, mac_len, mac_addr, flags, diag, vid, req_hwgrp_flag); if (err != 0) { vnic->vn_muh = NULL; if (diag != NULL && req_hwgrp_flag) *diag = VNIC_IOC_DIAG_NO_HWRINGS; goto bail; } /* register to receive notification from underlying MAC */ vnic->vn_mnh = mac_notify_add(vnic->vn_lower_mh, vnic_notify_cb, vnic); *vnic_addr_type = vnic->vn_addr_type; vnic->vn_addr_len = *mac_len; vnic->vn_vid = vid; bcopy(mac_addr, vnic->vn_addr, vnic->vn_addr_len); if (vnic->vn_addr_type == VNIC_MAC_ADDR_TYPE_FACTORY) vnic->vn_slot_id = *mac_slot; /* * Set the initial VNIC capabilities. If the VNIC is created * over MACs which does not support nactive vlan, disable * VNIC's hardware checksum capability if its VID is not 0, * since the underlying MAC would get the hardware checksum * offset wrong in case of VLAN packets. */ if (vid == 0 || !mac_capab_get(vnic->vn_lower_mh, MAC_CAPAB_NO_NATIVEVLAN, NULL)) { if (!mac_capab_get(vnic->vn_lower_mh, MAC_CAPAB_HCKSUM, &vnic->vn_hcksum_txflags)) vnic->vn_hcksum_txflags = 0; } else { vnic->vn_hcksum_txflags = 0; } } /* register with the MAC module */ if ((mac = mac_alloc(MAC_VERSION)) == NULL) goto bail; mac->m_type_ident = MAC_PLUGIN_IDENT_ETHER; mac->m_driver = vnic; mac->m_dip = vnic_get_dip(); mac->m_instance = (uint_t)-1; mac->m_src_addr = vnic->vn_addr; mac->m_callbacks = &vnic_m_callbacks; if (!is_anchor) { /* * If this is a VNIC based VLAN, then we check for the * margin unless it has been created with the force * flag. If we are configuring a VLAN over an etherstub, * we don't check the margin even if force is not set. */ if (vid == 0 || (flags & VNIC_IOC_CREATE_FORCE) != 0) { if (vid != VLAN_ID_NONE) vnic->vn_force = B_TRUE; /* * As the current margin size of the underlying mac is * used to determine the margin size of the VNIC * itself, request the underlying mac not to change * to a smaller margin size. */ err = mac_margin_add(vnic->vn_lower_mh, &vnic->vn_margin, B_TRUE); ASSERT(err == 0); } else { vnic->vn_margin = VLAN_TAGSZ; err = mac_margin_add(vnic->vn_lower_mh, &vnic->vn_margin, B_FALSE); if (err != 0) { mac_free(mac); if (diag != NULL) *diag = VNIC_IOC_DIAG_MACMARGIN_INVALID; goto bail; } } mac_sdu_get(vnic->vn_lower_mh, &mac->m_min_sdu, &mac->m_max_sdu); err = mac_mtu_add(vnic->vn_lower_mh, &mac->m_max_sdu, B_FALSE); if (err != 0) { VERIFY(mac_margin_remove(vnic->vn_lower_mh, vnic->vn_margin) == 0); mac_free(mac); if (diag != NULL) *diag = VNIC_IOC_DIAG_MACMTU_INVALID; goto bail; } vnic->vn_mtu = mac->m_max_sdu; } else { vnic->vn_margin = VLAN_TAGSZ; mac->m_min_sdu = 1; mac->m_max_sdu = ANCHOR_VNIC_MAX_MTU; vnic->vn_mtu = ANCHOR_VNIC_MAX_MTU; } mac->m_margin = vnic->vn_margin; err = mac_register(mac, &vnic->vn_mh); mac_free(mac); if (err != 0) { if (!is_anchor) { VERIFY(mac_mtu_remove(vnic->vn_lower_mh, vnic->vn_mtu) == 0); VERIFY(mac_margin_remove(vnic->vn_lower_mh, vnic->vn_margin) == 0); } goto bail; } /* Set the VNIC's MAC in the client */ if (!is_anchor) { mac_set_upper_mac(vnic->vn_mch, vnic->vn_mh, mrp); if (mrp != NULL) { if ((mrp->mrp_mask & MRP_RX_RINGS) != 0 || (mrp->mrp_mask & MRP_TX_RINGS) != 0) { req_hwgrp_flag = B_TRUE; } err = mac_client_set_resources(vnic->vn_mch, mrp); if (err != 0) { VERIFY(mac_mtu_remove(vnic->vn_lower_mh, vnic->vn_mtu) == 0); VERIFY(mac_margin_remove(vnic->vn_lower_mh, vnic->vn_margin) == 0); (void) mac_unregister(vnic->vn_mh); goto bail; } } } err = dls_devnet_create(vnic->vn_mh, vnic->vn_id, crgetzoneid(credp)); if (err != 0) { VERIFY(is_anchor || mac_margin_remove(vnic->vn_lower_mh, vnic->vn_margin) == 0); if (!is_anchor) { VERIFY(mac_mtu_remove(vnic->vn_lower_mh, vnic->vn_mtu) == 0); VERIFY(mac_margin_remove(vnic->vn_lower_mh, vnic->vn_margin) == 0); } (void) mac_unregister(vnic->vn_mh); goto bail; } /* add new VNIC to hash table */ err = mod_hash_insert(vnic_hash, VNIC_HASH_KEY(vnic_id), (mod_hash_val_t)vnic); ASSERT(err == 0); vnic_count++; /* * Now that we've enabled this VNIC, we should go through and update the * link state by setting it to our parents. */ vnic->vn_enabled = B_TRUE; if (is_anchor) { mac_link_update(vnic->vn_mh, LINK_STATE_UP); } else { mac_link_update(vnic->vn_mh, mac_client_stat_get(vnic->vn_mch, MAC_STAT_LINK_STATE)); } rw_exit(&vnic_lock); return (0); bail: rw_exit(&vnic_lock); if (!is_anchor) { if (vnic->vn_mnh != NULL) (void) mac_notify_remove(vnic->vn_mnh, B_TRUE); if (vnic->vn_muh != NULL) (void) mac_unicast_remove(vnic->vn_mch, vnic->vn_muh); if (vnic->vn_mch != NULL) mac_client_close(vnic->vn_mch, MAC_CLOSE_FLAGS_IS_VNIC); if (vnic->vn_lower_mh != NULL) mac_close(vnic->vn_lower_mh); } kmem_cache_free(vnic_cache, vnic); return (err); }
static boolean_t xnbo_open_mac(xnb_t *xnbp, char *mac) { xnbo_t *xnbop = xnbp->xnb_flavour_data; int err; const mac_info_t *mi; void (*rx_fn)(void *, mac_resource_handle_t, mblk_t *, boolean_t); struct ether_addr ea; uint_t max_sdu; mac_diag_t diag; if ((err = mac_open_by_linkname(mac, &xnbop->o_mh)) != 0) { cmn_err(CE_WARN, "xnbo_open_mac: " "cannot open mac for link %s (%d)", mac, err); return (B_FALSE); } ASSERT(xnbop->o_mh != NULL); mi = mac_info(xnbop->o_mh); ASSERT(mi != NULL); if (mi->mi_media != DL_ETHER) { cmn_err(CE_WARN, "xnbo_open_mac: " "device is not DL_ETHER (%d)", mi->mi_media); i_xnbo_close_mac(xnbp, B_TRUE); return (B_FALSE); } if (mi->mi_media != mi->mi_nativemedia) { cmn_err(CE_WARN, "xnbo_open_mac: " "device media and native media mismatch (%d != %d)", mi->mi_media, mi->mi_nativemedia); i_xnbo_close_mac(xnbp, B_TRUE); return (B_FALSE); } mac_sdu_get(xnbop->o_mh, NULL, &max_sdu); if (max_sdu > XNBMAXPKT) { cmn_err(CE_WARN, "xnbo_open_mac: mac device SDU too big (%d)", max_sdu); i_xnbo_close_mac(xnbp, B_TRUE); return (B_FALSE); } /* * MAC_OPEN_FLAGS_MULTI_PRIMARY is relevant when we are migrating a * guest on the localhost itself. In this case we would have the MAC * client open for the guest being migrated *and* also for the * migrated guest (i.e. the former will be active till the migration * is complete when the latter will be activated). This flag states * that it is OK for mac_unicast_add to add the primary MAC unicast * address multiple times. */ if (mac_client_open(xnbop->o_mh, &xnbop->o_mch, NULL, MAC_OPEN_FLAGS_USE_DATALINK_NAME | MAC_OPEN_FLAGS_MULTI_PRIMARY) != 0) { cmn_err(CE_WARN, "xnbo_open_mac: " "error (%d) opening mac client", err); i_xnbo_close_mac(xnbp, B_TRUE); return (B_FALSE); } if (xnbop->o_need_rx_filter) rx_fn = xnbo_from_mac_filter; else rx_fn = xnbo_from_mac; err = mac_unicast_add_set_rx(xnbop->o_mch, NULL, MAC_UNICAST_PRIMARY, &xnbop->o_mah, 0, &diag, xnbop->o_multicast_control ? rx_fn : NULL, xnbp); if (err != 0) { cmn_err(CE_WARN, "xnbo_open_mac: failed to get the primary " "MAC address of %s: %d", mac, err); i_xnbo_close_mac(xnbp, B_TRUE); return (B_FALSE); } if (!xnbop->o_multicast_control) { err = mac_promisc_add(xnbop->o_mch, MAC_CLIENT_PROMISC_ALL, rx_fn, xnbp, &xnbop->o_mphp, MAC_PROMISC_FLAGS_NO_TX_LOOP | MAC_PROMISC_FLAGS_VLAN_TAG_STRIP); if (err != 0) { cmn_err(CE_WARN, "xnbo_open_mac: " "cannot enable promiscuous mode of %s: %d", mac, err); i_xnbo_close_mac(xnbp, B_TRUE); return (B_FALSE); } xnbop->o_promiscuous = B_TRUE; } if (xnbop->o_need_setphysaddr) { err = mac_unicast_primary_set(xnbop->o_mh, xnbp->xnb_mac_addr); /* Warn, but continue on. */ if (err != 0) { bcopy(xnbp->xnb_mac_addr, ea.ether_addr_octet, ETHERADDRL); cmn_err(CE_WARN, "xnbo_open_mac: " "cannot set MAC address of %s to " "%s: %d", mac, ether_sprintf(&ea), err); } } if (!mac_capab_get(xnbop->o_mh, MAC_CAPAB_HCKSUM, &xnbop->o_hcksum_capab)) xnbop->o_hcksum_capab = 0; xnbop->o_running = B_TRUE; return (B_TRUE); }
/* * DL_CAPABILITY_ACK/DL_ERROR_ACK */ static void proto_capability_advertise(dld_str_t *dsp, mblk_t *mp) { dl_capability_ack_t *dlap; dl_capability_sub_t *dlsp; size_t subsize; dl_capab_dld_t dld; dl_capab_hcksum_t hcksum; dl_capab_zerocopy_t zcopy; dl_capab_vrrp_t vrrp; mac_capab_vrrp_t vrrp_capab; uint8_t *ptr; queue_t *q = dsp->ds_wq; mblk_t *mp1; boolean_t hcksum_capable = B_FALSE; boolean_t zcopy_capable = B_FALSE; boolean_t dld_capable = B_FALSE; boolean_t vrrp_capable = B_FALSE; /* * Initially assume no capabilities. */ subsize = 0; /* * Check if checksum offload is supported on this MAC. */ bzero(&hcksum, sizeof (dl_capab_hcksum_t)); if (mac_capab_get(dsp->ds_mh, MAC_CAPAB_HCKSUM, &hcksum.hcksum_txflags)) { if (hcksum.hcksum_txflags != 0) { hcksum_capable = B_TRUE; subsize += sizeof (dl_capability_sub_t) + sizeof (dl_capab_hcksum_t); } } /* * Check if zerocopy is supported on this interface. * If advertising DL_CAPAB_ZEROCOPY has not been explicitly disabled * then reserve space for that capability. */ if (!mac_capab_get(dsp->ds_mh, MAC_CAPAB_NO_ZCOPY, NULL) && !(dld_opt & DLD_OPT_NO_ZEROCOPY)) { zcopy_capable = B_TRUE; subsize += sizeof (dl_capability_sub_t) + sizeof (dl_capab_zerocopy_t); } /* * Direct capability negotiation interface between IP and DLD */ if (dsp->ds_sap == ETHERTYPE_IP && check_mod_above(dsp->ds_rq, "ip")) { dld_capable = B_TRUE; subsize += sizeof (dl_capability_sub_t) + sizeof (dl_capab_dld_t); } /* * Check if vrrp is supported on this interface. If so, reserve * space for that capability. */ if (mac_capab_get(dsp->ds_mh, MAC_CAPAB_VRRP, &vrrp_capab)) { vrrp_capable = B_TRUE; subsize += sizeof (dl_capability_sub_t) + sizeof (dl_capab_vrrp_t); } /* * If there are no capabilities to advertise or if we * can't allocate a response, send a DL_ERROR_ACK. */ if ((mp1 = reallocb(mp, sizeof (dl_capability_ack_t) + subsize, 0)) == NULL) { dlerrorack(q, mp, DL_CAPABILITY_REQ, DL_NOTSUPPORTED, 0); return; } mp = mp1; DB_TYPE(mp) = M_PROTO; mp->b_wptr = mp->b_rptr + sizeof (dl_capability_ack_t) + subsize; bzero(mp->b_rptr, MBLKL(mp)); dlap = (dl_capability_ack_t *)mp->b_rptr; dlap->dl_primitive = DL_CAPABILITY_ACK; dlap->dl_sub_offset = sizeof (dl_capability_ack_t); dlap->dl_sub_length = subsize; ptr = (uint8_t *)&dlap[1]; /* * TCP/IP checksum offload. */ if (hcksum_capable) { dlsp = (dl_capability_sub_t *)ptr; dlsp->dl_cap = DL_CAPAB_HCKSUM; dlsp->dl_length = sizeof (dl_capab_hcksum_t); ptr += sizeof (dl_capability_sub_t); hcksum.hcksum_version = HCKSUM_VERSION_1; dlcapabsetqid(&(hcksum.hcksum_mid), dsp->ds_rq); bcopy(&hcksum, ptr, sizeof (dl_capab_hcksum_t)); ptr += sizeof (dl_capab_hcksum_t); } /* * Zero copy */ if (zcopy_capable) { dlsp = (dl_capability_sub_t *)ptr; dlsp->dl_cap = DL_CAPAB_ZEROCOPY; dlsp->dl_length = sizeof (dl_capab_zerocopy_t); ptr += sizeof (dl_capability_sub_t); bzero(&zcopy, sizeof (dl_capab_zerocopy_t)); zcopy.zerocopy_version = ZEROCOPY_VERSION_1; zcopy.zerocopy_flags = DL_CAPAB_VMSAFE_MEM; dlcapabsetqid(&(zcopy.zerocopy_mid), dsp->ds_rq); bcopy(&zcopy, ptr, sizeof (dl_capab_zerocopy_t)); ptr += sizeof (dl_capab_zerocopy_t); } /* * VRRP capability negotiation */ if (vrrp_capable) { dlsp = (dl_capability_sub_t *)ptr; dlsp->dl_cap = DL_CAPAB_VRRP; dlsp->dl_length = sizeof (dl_capab_vrrp_t); ptr += sizeof (dl_capability_sub_t); bzero(&vrrp, sizeof (dl_capab_vrrp_t)); vrrp.vrrp_af = vrrp_capab.mcv_af; bcopy(&vrrp, ptr, sizeof (dl_capab_vrrp_t)); ptr += sizeof (dl_capab_vrrp_t); } /* * Direct capability negotiation interface between IP and DLD. * Refer to dld.h for details. */ if (dld_capable) { dlsp = (dl_capability_sub_t *)ptr; dlsp->dl_cap = DL_CAPAB_DLD; dlsp->dl_length = sizeof (dl_capab_dld_t); ptr += sizeof (dl_capability_sub_t); bzero(&dld, sizeof (dl_capab_dld_t)); dld.dld_version = DLD_CURRENT_VERSION; dld.dld_capab = (uintptr_t)dld_capab; dld.dld_capab_handle = (uintptr_t)dsp; dlcapabsetqid(&(dld.dld_mid), dsp->ds_rq); bcopy(&dld, ptr, sizeof (dl_capab_dld_t)); ptr += sizeof (dl_capab_dld_t); } ASSERT(ptr == mp->b_rptr + sizeof (dl_capability_ack_t) + subsize); qreply(q, mp); }
/* ARGSUSED */ int mac_register(mac_register_t *mregp, mac_handle_t *mhp) { mac_impl_t *mip; mactype_t *mtype; int err = EINVAL; struct devnames *dnp = NULL; uint_t instance; boolean_t style1_created = B_FALSE; boolean_t style2_created = B_FALSE; char *driver; minor_t minor = 0; /* A successful call to mac_init_ops() sets the DN_GLDV3_DRIVER flag. */ if (!GLDV3_DRV(ddi_driver_major(mregp->m_dip))) return (EINVAL); /* Find the required MAC-Type plugin. */ if ((mtype = mactype_getplugin(mregp->m_type_ident)) == NULL) return (EINVAL); /* Create a mac_impl_t to represent this MAC. */ mip = kmem_cache_alloc(i_mac_impl_cachep, KM_SLEEP); /* * The mac is not ready for open yet. */ mip->mi_state_flags |= MIS_DISABLED; /* * When a mac is registered, the m_instance field can be set to: * * 0: Get the mac's instance number from m_dip. * This is usually used for physical device dips. * * [1 .. MAC_MAX_MINOR-1]: Use the value as the mac's instance number. * For example, when an aggregation is created with the key option, * "key" will be used as the instance number. * * -1: Assign an instance number from [MAC_MAX_MINOR .. MAXMIN-1]. * This is often used when a MAC of a virtual link is registered * (e.g., aggregation when "key" is not specified, or vnic). * * Note that the instance number is used to derive the mi_minor field * of mac_impl_t, which will then be used to derive the name of kstats * and the devfs nodes. The first 2 cases are needed to preserve * backward compatibility. */ switch (mregp->m_instance) { case 0: instance = ddi_get_instance(mregp->m_dip); break; case ((uint_t)-1): minor = mac_minor_hold(B_TRUE); if (minor == 0) { err = ENOSPC; goto fail; } instance = minor - 1; break; default: instance = mregp->m_instance; if (instance >= MAC_MAX_MINOR) { err = EINVAL; goto fail; } break; } mip->mi_minor = (minor_t)(instance + 1); mip->mi_dip = mregp->m_dip; mip->mi_clients_list = NULL; mip->mi_nclients = 0; /* Set the default IEEE Port VLAN Identifier */ mip->mi_pvid = 1; /* Default bridge link learning protection values */ mip->mi_llimit = 1000; mip->mi_ldecay = 200; driver = (char *)ddi_driver_name(mip->mi_dip); /* Construct the MAC name as <drvname><instance> */ (void) snprintf(mip->mi_name, sizeof (mip->mi_name), "%s%d", driver, instance); mip->mi_driver = mregp->m_driver; mip->mi_type = mtype; mip->mi_margin = mregp->m_margin; mip->mi_info.mi_media = mtype->mt_type; mip->mi_info.mi_nativemedia = mtype->mt_nativetype; if (mregp->m_max_sdu <= mregp->m_min_sdu) goto fail; if (mregp->m_multicast_sdu == 0) mregp->m_multicast_sdu = mregp->m_max_sdu; if (mregp->m_multicast_sdu < mregp->m_min_sdu || mregp->m_multicast_sdu > mregp->m_max_sdu) goto fail; mip->mi_sdu_min = mregp->m_min_sdu; mip->mi_sdu_max = mregp->m_max_sdu; mip->mi_sdu_multicast = mregp->m_multicast_sdu; mip->mi_info.mi_addr_length = mip->mi_type->mt_addr_length; /* * If the media supports a broadcast address, cache a pointer to it * in the mac_info_t so that upper layers can use it. */ mip->mi_info.mi_brdcst_addr = mip->mi_type->mt_brdcst_addr; mip->mi_v12n_level = mregp->m_v12n; /* * Copy the unicast source address into the mac_info_t, but only if * the MAC-Type defines a non-zero address length. We need to * handle MAC-Types that have an address length of 0 * (point-to-point protocol MACs for example). */ if (mip->mi_type->mt_addr_length > 0) { if (mregp->m_src_addr == NULL) goto fail; mip->mi_info.mi_unicst_addr = kmem_alloc(mip->mi_type->mt_addr_length, KM_SLEEP); bcopy(mregp->m_src_addr, mip->mi_info.mi_unicst_addr, mip->mi_type->mt_addr_length); /* * Copy the fixed 'factory' MAC address from the immutable * info. This is taken to be the MAC address currently in * use. */ bcopy(mip->mi_info.mi_unicst_addr, mip->mi_addr, mip->mi_type->mt_addr_length); /* * At this point, we should set up the classification * rules etc but we delay it till mac_open() so that * the resource discovery has taken place and we * know someone wants to use the device. Otherwise * memory gets allocated for Rx ring structures even * during probe. */ /* Copy the destination address if one is provided. */ if (mregp->m_dst_addr != NULL) { bcopy(mregp->m_dst_addr, mip->mi_dstaddr, mip->mi_type->mt_addr_length); mip->mi_dstaddr_set = B_TRUE; } } else if (mregp->m_src_addr != NULL) { goto fail; } /* * The format of the m_pdata is specific to the plugin. It is * passed in as an argument to all of the plugin callbacks. The * driver can update this information by calling * mac_pdata_update(). */ if (mip->mi_type->mt_ops.mtops_ops & MTOPS_PDATA_VERIFY) { /* * Verify if the supplied plugin data is valid. Note that * even if the caller passed in a NULL pointer as plugin data, * we still need to verify if that's valid as the plugin may * require plugin data to function. */ if (!mip->mi_type->mt_ops.mtops_pdata_verify(mregp->m_pdata, mregp->m_pdata_size)) { goto fail; } if (mregp->m_pdata != NULL) { mip->mi_pdata = kmem_alloc(mregp->m_pdata_size, KM_SLEEP); bcopy(mregp->m_pdata, mip->mi_pdata, mregp->m_pdata_size); mip->mi_pdata_size = mregp->m_pdata_size; } } else if (mregp->m_pdata != NULL) { /* * The caller supplied non-NULL plugin data, but the plugin * does not recognize plugin data. */ err = EINVAL; goto fail; } /* * Register the private properties. */ mac_register_priv_prop(mip, mregp->m_priv_props); /* * Stash the driver callbacks into the mac_impl_t, but first sanity * check to make sure all mandatory callbacks are set. */ if (mregp->m_callbacks->mc_getstat == NULL || mregp->m_callbacks->mc_start == NULL || mregp->m_callbacks->mc_stop == NULL || mregp->m_callbacks->mc_setpromisc == NULL || mregp->m_callbacks->mc_multicst == NULL) { goto fail; } mip->mi_callbacks = mregp->m_callbacks; if (mac_capab_get((mac_handle_t)mip, MAC_CAPAB_LEGACY, &mip->mi_capab_legacy)) { mip->mi_state_flags |= MIS_LEGACY; mip->mi_phy_dev = mip->mi_capab_legacy.ml_dev; } else { mip->mi_phy_dev = makedevice(ddi_driver_major(mip->mi_dip), mip->mi_minor); } /* * Allocate a notification thread. thread_create blocks for memory * if needed, it never fails. */ mip->mi_notify_thread = thread_create(NULL, 0, i_mac_notify_thread, mip, 0, &p0, TS_RUN, minclsyspri); /* * Initialize the capabilities */ bzero(&mip->mi_rx_rings_cap, sizeof (mac_capab_rings_t)); bzero(&mip->mi_tx_rings_cap, sizeof (mac_capab_rings_t)); if (i_mac_capab_get((mac_handle_t)mip, MAC_CAPAB_VNIC, NULL)) mip->mi_state_flags |= MIS_IS_VNIC; if (i_mac_capab_get((mac_handle_t)mip, MAC_CAPAB_AGGR, NULL)) mip->mi_state_flags |= MIS_IS_AGGR; mac_addr_factory_init(mip); /* * Enforce the virtrualization level registered. */ if (mip->mi_v12n_level & MAC_VIRT_LEVEL1) { if (mac_init_rings(mip, MAC_RING_TYPE_RX) != 0 || mac_init_rings(mip, MAC_RING_TYPE_TX) != 0) goto fail; /* * The driver needs to register at least rx rings for this * virtualization level. */ if (mip->mi_rx_groups == NULL) goto fail; } /* * The driver must set mc_unicst entry point to NULL when it advertises * CAP_RINGS for rx groups. */ if (mip->mi_rx_groups != NULL) { if (mregp->m_callbacks->mc_unicst != NULL) goto fail; } else { if (mregp->m_callbacks->mc_unicst == NULL) goto fail; } /* * Initialize MAC addresses. Must be called after mac_init_rings(). */ mac_init_macaddr(mip); mip->mi_share_capab.ms_snum = 0; if (mip->mi_v12n_level & MAC_VIRT_HIO) { (void) mac_capab_get((mac_handle_t)mip, MAC_CAPAB_SHARES, &mip->mi_share_capab); } /* * Initialize the kstats for this device. */ mac_driver_stat_create(mip); /* Zero out any properties. */ bzero(&mip->mi_resource_props, sizeof (mac_resource_props_t)); if (mip->mi_minor <= MAC_MAX_MINOR) { /* Create a style-2 DLPI device */ if (ddi_create_minor_node(mip->mi_dip, driver, S_IFCHR, 0, DDI_NT_NET, CLONE_DEV) != DDI_SUCCESS) goto fail; style2_created = B_TRUE; /* Create a style-1 DLPI device */ if (ddi_create_minor_node(mip->mi_dip, mip->mi_name, S_IFCHR, mip->mi_minor, DDI_NT_NET, 0) != DDI_SUCCESS) goto fail; style1_created = B_TRUE; } mac_flow_l2tab_create(mip, &mip->mi_flow_tab); rw_enter(&i_mac_impl_lock, RW_WRITER); if (mod_hash_insert(i_mac_impl_hash, (mod_hash_key_t)mip->mi_name, (mod_hash_val_t)mip) != 0) { rw_exit(&i_mac_impl_lock); err = EEXIST; goto fail; } DTRACE_PROBE2(mac__register, struct devnames *, dnp, (mac_impl_t *), mip); /* * Mark the MAC to be ready for open. */ mip->mi_state_flags &= ~MIS_DISABLED; rw_exit(&i_mac_impl_lock); atomic_inc_32(&i_mac_impl_count); cmn_err(CE_NOTE, "!%s registered", mip->mi_name); *mhp = (mac_handle_t)mip; return (0); fail: if (style1_created) ddi_remove_minor_node(mip->mi_dip, mip->mi_name); if (style2_created) ddi_remove_minor_node(mip->mi_dip, driver); mac_addr_factory_fini(mip); /* Clean up registered MAC addresses */ mac_fini_macaddr(mip); /* Clean up registered rings */ mac_free_rings(mip, MAC_RING_TYPE_RX); mac_free_rings(mip, MAC_RING_TYPE_TX); /* Clean up notification thread */ if (mip->mi_notify_thread != NULL) i_mac_notify_exit(mip); if (mip->mi_info.mi_unicst_addr != NULL) { kmem_free(mip->mi_info.mi_unicst_addr, mip->mi_type->mt_addr_length); mip->mi_info.mi_unicst_addr = NULL; } mac_driver_stat_delete(mip); if (mip->mi_type != NULL) { atomic_dec_32(&mip->mi_type->mt_ref); mip->mi_type = NULL; } if (mip->mi_pdata != NULL) { kmem_free(mip->mi_pdata, mip->mi_pdata_size); mip->mi_pdata = NULL; mip->mi_pdata_size = 0; } if (minor != 0) { ASSERT(minor > MAC_MAX_MINOR); mac_minor_rele(minor); } mip->mi_state_flags = 0; mac_unregister_priv_prop(mip); /* * Clear the state before destroying the mac_impl_t */ mip->mi_state_flags = 0; kmem_cache_free(i_mac_impl_cachep, mip); return (err); }