/* * Add vlan ids of the given vsw device or port into its hash table. */ void vsw_vlan_add_ids(void *arg, int type) { int rv; int i; if (type == VSW_LOCALDEV) { vsw_t *vswp = (vsw_t *)arg; rv = mod_hash_insert(vswp->vlan_hashp, (mod_hash_key_t)VLAN_ID_KEY(vswp->pvid), (mod_hash_val_t)B_TRUE); if (rv != 0) { cmn_err(CE_WARN, "vsw%d: Duplicate vlan-id(%d) for " "the interface", vswp->instance, vswp->pvid); } for (i = 0; i < vswp->nvids; i++) { rv = mod_hash_insert(vswp->vlan_hashp, (mod_hash_key_t)VLAN_ID_KEY(vswp->vids[i].vl_vid), (mod_hash_val_t)B_TRUE); if (rv != 0) { cmn_err(CE_WARN, "vsw%d: Duplicate vlan-id(%d)" " for the interface", vswp->instance, vswp->pvid); } } } else if (type == VSW_VNETPORT) { vsw_port_t *portp = (vsw_port_t *)arg; vsw_t *vswp = portp->p_vswp; rv = mod_hash_insert(portp->vlan_hashp, (mod_hash_key_t)VLAN_ID_KEY(portp->pvid), (mod_hash_val_t)B_TRUE); if (rv != 0) { cmn_err(CE_WARN, "vsw%d: Duplicate vlan-id(%d) for " "the port(%d)", vswp->instance, vswp->pvid, portp->p_instance); } for (i = 0; i < portp->nvids; i++) { rv = mod_hash_insert(portp->vlan_hashp, (mod_hash_key_t)VLAN_ID_KEY(portp->vids[i].vl_vid), (mod_hash_val_t)B_TRUE); if (rv != 0) { cmn_err(CE_WARN, "vsw%d: Duplicate vlan-id(%d)" " for the port(%d)", vswp->instance, vswp->pvid, portp->p_instance); } } } }
/* * Add an entry into FDB for the given vsw. */ void vsw_fdbe_add(vsw_t *vswp, void *port) { uint64_t addr = 0; vsw_port_t *portp; vsw_fdbe_t *fp; int rv; portp = (vsw_port_t *)port; KEY_HASH(addr, &portp->p_macaddr); fp = kmem_zalloc(sizeof (vsw_fdbe_t), KM_SLEEP); fp->portp = port; /* * Note: duplicate keys will be rejected by mod_hash. */ rv = mod_hash_insert(vswp->fdb_hashp, (mod_hash_key_t)addr, (mod_hash_val_t)fp); if (rv != 0) { cmn_err(CE_WARN, "vsw%d: Duplicate mac-address(%s) for " "the port(%d)", vswp->instance, ether_sprintf(&portp->p_macaddr), portp->p_instance); kmem_free(fp, sizeof (*fp)); } }
int dls_vlan_create(const char *vlanname, const char *macname, uint_t ddi_instance, uint16_t vid) { dls_link_t *dlp; dls_vlan_t *dvp; int err; uint_t len; /* * Check to see the name is legal. It must be less than IFNAMSIZ * characters in length and must terminate with a digit (before the * NUL, of course). */ len = strlen(vlanname); if (len == 0 || len >= IFNAMSIZ) return (EINVAL); if (!isdigit(vlanname[len - 1])) return (EINVAL); /* * Get a reference to a dls_link_t representing the MAC. This call * will create one if necessary. */ if ((err = dls_link_hold(macname, ddi_instance, &dlp)) != 0) return (err); /* * Allocate a new dls_vlan_t. */ dvp = kmem_cache_alloc(i_dls_vlan_cachep, KM_SLEEP); (void) strlcpy(dvp->dv_name, vlanname, sizeof (dvp->dv_name)); dvp->dv_id = vid; dvp->dv_dlp = dlp; /* * Insert the entry into the table. */ rw_enter(&i_dls_vlan_lock, RW_WRITER); if ((err = mod_hash_insert(i_dls_vlan_hash, (mod_hash_key_t)dvp->dv_name, (mod_hash_val_t)dvp)) != 0) { kmem_cache_free(i_dls_vlan_cachep, dvp); dls_link_rele(dlp); err = EEXIST; goto done; } i_dls_vlan_count++; done: rw_exit(&i_dls_vlan_lock); return (err); }
int space_store(char *key, uintptr_t ptr) { char *s; int rval; size_t l; /* some sanity checks first */ if (key == NULL) { return (-1); } l = (size_t)strlen(key); if (l == 0) { return (-1); } /* increment for null terminator */ l++; /* alloc space for the string, mod_hash_insert will deallocate */ s = kmem_alloc(l, KM_SLEEP); bcopy(key, s, l); rval = mod_hash_insert(space_hash, (mod_hash_key_t)s, (mod_hash_val_t)ptr); switch (rval) { case 0: break; #ifdef DEBUG case MH_ERR_DUPLICATE: cmn_err(CE_WARN, "space_store: duplicate key %s", key); rval = -1; break; case MH_ERR_NOMEM: cmn_err(CE_WARN, "space_store: no mem for key %s", key); rval = -1; break; default: cmn_err(CE_WARN, "space_store: unspecified error for key %s", key); rval = -1; break; #else default: rval = -1; break; #endif } return (rval); }
static fct_status_t fcoet_send_sol_ct(fct_cmd_t *cmd) { fcoe_frame_t *frm; fcoet_exchange_t *xch; xch = CMD2XCH(cmd); xch->xch_flags = 0; xch->xch_ss = CMD2SS(cmd); xch->xch_cmd = cmd; xch->xch_current_seq = NULL; xch->xch_left_data_size = 0; xch->xch_sequence_no = 0; xch->xch_start_time = ddi_get_lbolt(); xch->xch_rxid = 0xFFFF; xch->xch_oxid = atomic_add_16_nv(&xch->xch_ss->ss_next_sol_oxid, 1); if (xch->xch_oxid == 0xFFFF) { xch->xch_oxid = atomic_add_16_nv(&xch->xch_ss->ss_next_sol_oxid, 1); } frm = CMD2SS(cmd)->ss_eport->eport_alloc_frame(CMD2SS(cmd)->ss_eport, CMD2ELS(cmd)->els_req_size + FCFH_SIZE, NULL); if (frm == NULL) { ASSERT(0); return (FCT_FAILURE); } else { fcoet_init_tfm(frm, CMD2XCH(cmd)); bzero(frm->frm_payload, frm->frm_payload_size); } (void) mod_hash_insert(FRM2SS(frm)->ss_sol_oxid_hash, (mod_hash_key_t)(uintptr_t)xch->xch_oxid, (mod_hash_val_t)xch); xch->xch_flags |= XCH_FLAG_IN_HASH_TABLE; bcopy(CMD2ELS(cmd)->els_req_payload, frm->frm_payload, frm->frm_payload_size); FFM_R_CTL(0x2, frm); FRM2TFM(frm)->tfm_rctl = 0x2; FFM_TYPE(0x20, frm); FFM_F_CTL(0x290000, frm); FFM_OXID(xch->xch_oxid, frm); FFM_RXID(xch->xch_rxid, frm); FFM_S_ID(cmd->cmd_lportid, frm); FFM_D_ID(cmd->cmd_rportid, frm); CMD2SS(cmd)->ss_eport->eport_tx_frame(frm); return (FCT_SUCCESS); }
void fcoet_send_sol_flogi(fcoet_soft_state_t *ss) { fcoet_exchange_t *xch; fct_cmd_t *cmd; fct_els_t *els; fcoe_frame_t *frm; /* * FCT will initialize fct_cmd_t * Initialize fcoet_exchange */ cmd = (fct_cmd_t *)fct_alloc(FCT_STRUCT_CMD_SOL_ELS, sizeof (fcoet_exchange_t), 0); xch = CMD2XCH(cmd); els = CMD2ELS(cmd); xch->xch_oxid = atomic_add_16_nv(&ss->ss_next_sol_oxid, 1); if (xch->xch_oxid == 0xFFFF) { xch->xch_oxid = atomic_add_16_nv(&ss->ss_next_sol_oxid, 1); } xch->xch_rxid = 0xFFFF; xch->xch_flags = 0; xch->xch_ss = ss; xch->xch_cmd = cmd; xch->xch_current_seq = NULL; xch->xch_start_time = ddi_get_lbolt(); /* * Keep it to compare with response */ ss->ss_sol_flogi = xch; els->els_resp_alloc_size = 116; els->els_resp_size = 116; els->els_resp_payload = (uint8_t *) kmem_zalloc(els->els_resp_size, KM_SLEEP); (void) mod_hash_insert(xch->xch_ss->ss_sol_oxid_hash, (mod_hash_key_t)(uintptr_t)xch->xch_oxid, (mod_hash_val_t)xch); xch->xch_flags |= XCH_FLAG_IN_HASH_TABLE; atomic_or_32(&ss->ss_flags, SS_FLAG_DELAY_PLOGI); /* * FCoE will initialize fcoe_frame_t */ frm = ss->ss_eport->eport_alloc_frame(ss->ss_eport, FLOGI_REQ_PAYLOAD_SIZE + FCFH_SIZE, NULL); if (frm == NULL) { ASSERT(0); return; } else { fcoet_init_tfm(frm, xch); bzero(frm->frm_payload, frm->frm_payload_size); } FFM_R_CTL(0x22, frm); FRM2TFM(frm)->tfm_rctl = 0x22; FFM_TYPE(0x01, frm); FFM_F_CTL(0x290000, frm); FFM_OXID(xch->xch_oxid, frm); FFM_RXID(xch->xch_rxid, frm); FFM_D_ID(0xfffffe, frm); frm->frm_payload[0] = ELS_OP_FLOGI; /* Common Service Parameters */ frm->frm_payload[4] = 0x20; frm->frm_payload[5] = 0x08; frm->frm_payload[6] = 0x0; frm->frm_payload[7] = 0x03; /* N_PORT */ frm->frm_payload[8] = 0x88; frm->frm_payload[9] = 0x00; frm->frm_payload[10] = 0x08; frm->frm_payload[11] = 0x0; frm->frm_payload[12] = 0x0; frm->frm_payload[13] = 0xff; frm->frm_payload[14] = 0x0; frm->frm_payload[15] = 0x03; frm->frm_payload[16] = 0x0; frm->frm_payload[17] = 0x0; frm->frm_payload[18] = 0x07; frm->frm_payload[19] = 0xd0; /* PWWN and NWWN */ frm->frm_payload[20] = 0x0; bcopy(ss->ss_eport->eport_portwwn, frm->frm_payload+20, 8); bcopy(ss->ss_eport->eport_nodewwn, frm->frm_payload+28, 8); /* Class 3 Service Parameters */ frm->frm_payload[68] = 0x88; frm->frm_payload[74] = 0x08; frm->frm_payload[77] = 0xff; ss->ss_eport->eport_tx_frame(frm); xch->xch_flags |= XCH_FLAG_NONFCP_REQ_SENT; }
/* ARGSUSED */ int vnic_dev_create(datalink_id_t vnic_id, datalink_id_t linkid, vnic_mac_addr_type_t *vnic_addr_type, int *mac_len, uchar_t *mac_addr, int *mac_slot, uint_t mac_prefix_len, uint16_t vid, vrid_t vrid, int af, mac_resource_props_t *mrp, uint32_t flags, vnic_ioc_diag_t *diag, cred_t *credp) { vnic_t *vnic; mac_register_t *mac; int err; boolean_t is_anchor = ((flags & VNIC_IOC_CREATE_ANCHOR) != 0); char vnic_name[MAXNAMELEN]; const mac_info_t *minfop; uint32_t req_hwgrp_flag = B_FALSE; *diag = VNIC_IOC_DIAG_NONE; rw_enter(&vnic_lock, RW_WRITER); /* does a VNIC with the same id already exist? */ err = mod_hash_find(vnic_hash, VNIC_HASH_KEY(vnic_id), (mod_hash_val_t *)&vnic); if (err == 0) { rw_exit(&vnic_lock); return (EEXIST); } vnic = kmem_cache_alloc(vnic_cache, KM_NOSLEEP); if (vnic == NULL) { rw_exit(&vnic_lock); return (ENOMEM); } bzero(vnic, sizeof (*vnic)); vnic->vn_id = vnic_id; vnic->vn_link_id = linkid; vnic->vn_vrid = vrid; vnic->vn_af = af; if (!is_anchor) { if (linkid == DATALINK_INVALID_LINKID) { err = EINVAL; goto bail; } /* * Open the lower MAC and assign its initial bandwidth and * MAC address. We do this here during VNIC creation and * do not wait until the upper MAC client open so that we * can validate the VNIC creation parameters (bandwidth, * MAC address, etc) and reserve a factory MAC address if * one was requested. */ err = mac_open_by_linkid(linkid, &vnic->vn_lower_mh); if (err != 0) goto bail; /* * VNIC(vlan) over VNICs(vlans) is not supported. */ if (mac_is_vnic(vnic->vn_lower_mh)) { err = EINVAL; goto bail; } /* only ethernet support for now */ minfop = mac_info(vnic->vn_lower_mh); if (minfop->mi_nativemedia != DL_ETHER) { err = ENOTSUP; goto bail; } (void) dls_mgmt_get_linkinfo(vnic_id, vnic_name, NULL, NULL, NULL); err = mac_client_open(vnic->vn_lower_mh, &vnic->vn_mch, vnic_name, MAC_OPEN_FLAGS_IS_VNIC); if (err != 0) goto bail; /* assign a MAC address to the VNIC */ err = vnic_unicast_add(vnic, *vnic_addr_type, mac_slot, mac_prefix_len, mac_len, mac_addr, flags, diag, vid, req_hwgrp_flag); if (err != 0) { vnic->vn_muh = NULL; if (diag != NULL && req_hwgrp_flag) *diag = VNIC_IOC_DIAG_NO_HWRINGS; goto bail; } /* register to receive notification from underlying MAC */ vnic->vn_mnh = mac_notify_add(vnic->vn_lower_mh, vnic_notify_cb, vnic); *vnic_addr_type = vnic->vn_addr_type; vnic->vn_addr_len = *mac_len; vnic->vn_vid = vid; bcopy(mac_addr, vnic->vn_addr, vnic->vn_addr_len); if (vnic->vn_addr_type == VNIC_MAC_ADDR_TYPE_FACTORY) vnic->vn_slot_id = *mac_slot; /* * Set the initial VNIC capabilities. If the VNIC is created * over MACs which does not support nactive vlan, disable * VNIC's hardware checksum capability if its VID is not 0, * since the underlying MAC would get the hardware checksum * offset wrong in case of VLAN packets. */ if (vid == 0 || !mac_capab_get(vnic->vn_lower_mh, MAC_CAPAB_NO_NATIVEVLAN, NULL)) { if (!mac_capab_get(vnic->vn_lower_mh, MAC_CAPAB_HCKSUM, &vnic->vn_hcksum_txflags)) vnic->vn_hcksum_txflags = 0; } else { vnic->vn_hcksum_txflags = 0; } } /* register with the MAC module */ if ((mac = mac_alloc(MAC_VERSION)) == NULL) goto bail; mac->m_type_ident = MAC_PLUGIN_IDENT_ETHER; mac->m_driver = vnic; mac->m_dip = vnic_get_dip(); mac->m_instance = (uint_t)-1; mac->m_src_addr = vnic->vn_addr; mac->m_callbacks = &vnic_m_callbacks; if (!is_anchor) { /* * If this is a VNIC based VLAN, then we check for the * margin unless it has been created with the force * flag. If we are configuring a VLAN over an etherstub, * we don't check the margin even if force is not set. */ if (vid == 0 || (flags & VNIC_IOC_CREATE_FORCE) != 0) { if (vid != VLAN_ID_NONE) vnic->vn_force = B_TRUE; /* * As the current margin size of the underlying mac is * used to determine the margin size of the VNIC * itself, request the underlying mac not to change * to a smaller margin size. */ err = mac_margin_add(vnic->vn_lower_mh, &vnic->vn_margin, B_TRUE); ASSERT(err == 0); } else { vnic->vn_margin = VLAN_TAGSZ; err = mac_margin_add(vnic->vn_lower_mh, &vnic->vn_margin, B_FALSE); if (err != 0) { mac_free(mac); if (diag != NULL) *diag = VNIC_IOC_DIAG_MACMARGIN_INVALID; goto bail; } } mac_sdu_get(vnic->vn_lower_mh, &mac->m_min_sdu, &mac->m_max_sdu); err = mac_mtu_add(vnic->vn_lower_mh, &mac->m_max_sdu, B_FALSE); if (err != 0) { VERIFY(mac_margin_remove(vnic->vn_lower_mh, vnic->vn_margin) == 0); mac_free(mac); if (diag != NULL) *diag = VNIC_IOC_DIAG_MACMTU_INVALID; goto bail; } vnic->vn_mtu = mac->m_max_sdu; } else { vnic->vn_margin = VLAN_TAGSZ; mac->m_min_sdu = 1; mac->m_max_sdu = ANCHOR_VNIC_MAX_MTU; vnic->vn_mtu = ANCHOR_VNIC_MAX_MTU; } mac->m_margin = vnic->vn_margin; err = mac_register(mac, &vnic->vn_mh); mac_free(mac); if (err != 0) { if (!is_anchor) { VERIFY(mac_mtu_remove(vnic->vn_lower_mh, vnic->vn_mtu) == 0); VERIFY(mac_margin_remove(vnic->vn_lower_mh, vnic->vn_margin) == 0); } goto bail; } /* Set the VNIC's MAC in the client */ if (!is_anchor) { mac_set_upper_mac(vnic->vn_mch, vnic->vn_mh, mrp); if (mrp != NULL) { if ((mrp->mrp_mask & MRP_RX_RINGS) != 0 || (mrp->mrp_mask & MRP_TX_RINGS) != 0) { req_hwgrp_flag = B_TRUE; } err = mac_client_set_resources(vnic->vn_mch, mrp); if (err != 0) { VERIFY(mac_mtu_remove(vnic->vn_lower_mh, vnic->vn_mtu) == 0); VERIFY(mac_margin_remove(vnic->vn_lower_mh, vnic->vn_margin) == 0); (void) mac_unregister(vnic->vn_mh); goto bail; } } } err = dls_devnet_create(vnic->vn_mh, vnic->vn_id, crgetzoneid(credp)); if (err != 0) { VERIFY(is_anchor || mac_margin_remove(vnic->vn_lower_mh, vnic->vn_margin) == 0); if (!is_anchor) { VERIFY(mac_mtu_remove(vnic->vn_lower_mh, vnic->vn_mtu) == 0); VERIFY(mac_margin_remove(vnic->vn_lower_mh, vnic->vn_margin) == 0); } (void) mac_unregister(vnic->vn_mh); goto bail; } /* add new VNIC to hash table */ err = mod_hash_insert(vnic_hash, VNIC_HASH_KEY(vnic_id), (mod_hash_val_t)vnic); ASSERT(err == 0); vnic_count++; /* * Now that we've enabled this VNIC, we should go through and update the * link state by setting it to our parents. */ vnic->vn_enabled = B_TRUE; if (is_anchor) { mac_link_update(vnic->vn_mh, LINK_STATE_UP); } else { mac_link_update(vnic->vn_mh, mac_client_stat_get(vnic->vn_mch, MAC_STAT_LINK_STATE)); } rw_exit(&vnic_lock); return (0); bail: rw_exit(&vnic_lock); if (!is_anchor) { if (vnic->vn_mnh != NULL) (void) mac_notify_remove(vnic->vn_mnh, B_TRUE); if (vnic->vn_muh != NULL) (void) mac_unicast_remove(vnic->vn_mch, vnic->vn_muh); if (vnic->vn_mch != NULL) mac_client_close(vnic->vn_mch, MAC_CLOSE_FLAGS_IS_VNIC); if (vnic->vn_lower_mh != NULL) mac_close(vnic->vn_lower_mh); } kmem_cache_free(vnic_cache, vnic); return (err); }
/* * Add a new multicast entry. * * Search hash table based on address. If match found then * update associated val (which is chain of ports), otherwise * create new key/val (addr/port) pair and insert into table. */ int vsw_add_mcst(vsw_t *vswp, uint8_t devtype, uint64_t addr, void *arg) { int dup = 0; int rv = 0; mfdb_ent_t *ment = NULL; mfdb_ent_t *tmp_ent = NULL; mfdb_ent_t *new_ent = NULL; void *tgt = NULL; if (devtype == VSW_VNETPORT) { /* * Being invoked from a vnet. */ ASSERT(arg != NULL); tgt = arg; D2(NULL, "%s: port %d : address 0x%llx", __func__, ((vsw_port_t *)arg)->p_instance, addr); } else { /* * We are being invoked via the m_multicst mac entry * point. */ D2(NULL, "%s: address 0x%llx", __func__, addr); tgt = (void *)vswp; } WRITE_ENTER(&vswp->mfdbrw); if (mod_hash_find(vswp->mfdb, (mod_hash_key_t)addr, (mod_hash_val_t *)&ment) != 0) { /* address not currently in table */ ment = kmem_alloc(sizeof (mfdb_ent_t), KM_SLEEP); ment->d_addr = (void *)tgt; ment->d_type = devtype; ment->nextp = NULL; if (mod_hash_insert(vswp->mfdb, (mod_hash_key_t)addr, (mod_hash_val_t)ment) != 0) { DERR(vswp, "%s: hash table insertion failed", __func__); kmem_free(ment, sizeof (mfdb_ent_t)); rv = 1; } else { D2(vswp, "%s: added initial entry for 0x%llx to " "table", __func__, addr); } } else { /* * Address in table. Check to see if specified port * is already associated with the address. If not add * it now. */ tmp_ent = ment; while (tmp_ent != NULL) { if (tmp_ent->d_addr == (void *)tgt) { if (devtype == VSW_VNETPORT) { DERR(vswp, "%s: duplicate port entry " "found for portid %ld and key " "0x%llx", __func__, ((vsw_port_t *)arg)->p_instance, addr); } else { DERR(vswp, "%s: duplicate entry found" "for key 0x%llx", __func__, addr); } rv = 1; dup = 1; break; } tmp_ent = tmp_ent->nextp; } /* * Port not on list so add it to end now. */ if (0 == dup) { D2(vswp, "%s: added entry for 0x%llx to table", __func__, addr); new_ent = kmem_alloc(sizeof (mfdb_ent_t), KM_SLEEP); new_ent->d_addr = (void *)tgt; new_ent->d_type = devtype; new_ent->nextp = NULL; tmp_ent = ment; while (tmp_ent->nextp != NULL) tmp_ent = tmp_ent->nextp; tmp_ent->nextp = new_ent; } } RW_EXIT(&vswp->mfdbrw); return (rv); }
/* ARGSUSED */ int mac_register(mac_register_t *mregp, mac_handle_t *mhp) { mac_impl_t *mip; mactype_t *mtype; int err = EINVAL; struct devnames *dnp = NULL; uint_t instance; boolean_t style1_created = B_FALSE; boolean_t style2_created = B_FALSE; char *driver; minor_t minor = 0; /* A successful call to mac_init_ops() sets the DN_GLDV3_DRIVER flag. */ if (!GLDV3_DRV(ddi_driver_major(mregp->m_dip))) return (EINVAL); /* Find the required MAC-Type plugin. */ if ((mtype = mactype_getplugin(mregp->m_type_ident)) == NULL) return (EINVAL); /* Create a mac_impl_t to represent this MAC. */ mip = kmem_cache_alloc(i_mac_impl_cachep, KM_SLEEP); /* * The mac is not ready for open yet. */ mip->mi_state_flags |= MIS_DISABLED; /* * When a mac is registered, the m_instance field can be set to: * * 0: Get the mac's instance number from m_dip. * This is usually used for physical device dips. * * [1 .. MAC_MAX_MINOR-1]: Use the value as the mac's instance number. * For example, when an aggregation is created with the key option, * "key" will be used as the instance number. * * -1: Assign an instance number from [MAC_MAX_MINOR .. MAXMIN-1]. * This is often used when a MAC of a virtual link is registered * (e.g., aggregation when "key" is not specified, or vnic). * * Note that the instance number is used to derive the mi_minor field * of mac_impl_t, which will then be used to derive the name of kstats * and the devfs nodes. The first 2 cases are needed to preserve * backward compatibility. */ switch (mregp->m_instance) { case 0: instance = ddi_get_instance(mregp->m_dip); break; case ((uint_t)-1): minor = mac_minor_hold(B_TRUE); if (minor == 0) { err = ENOSPC; goto fail; } instance = minor - 1; break; default: instance = mregp->m_instance; if (instance >= MAC_MAX_MINOR) { err = EINVAL; goto fail; } break; } mip->mi_minor = (minor_t)(instance + 1); mip->mi_dip = mregp->m_dip; mip->mi_clients_list = NULL; mip->mi_nclients = 0; /* Set the default IEEE Port VLAN Identifier */ mip->mi_pvid = 1; /* Default bridge link learning protection values */ mip->mi_llimit = 1000; mip->mi_ldecay = 200; driver = (char *)ddi_driver_name(mip->mi_dip); /* Construct the MAC name as <drvname><instance> */ (void) snprintf(mip->mi_name, sizeof (mip->mi_name), "%s%d", driver, instance); mip->mi_driver = mregp->m_driver; mip->mi_type = mtype; mip->mi_margin = mregp->m_margin; mip->mi_info.mi_media = mtype->mt_type; mip->mi_info.mi_nativemedia = mtype->mt_nativetype; if (mregp->m_max_sdu <= mregp->m_min_sdu) goto fail; if (mregp->m_multicast_sdu == 0) mregp->m_multicast_sdu = mregp->m_max_sdu; if (mregp->m_multicast_sdu < mregp->m_min_sdu || mregp->m_multicast_sdu > mregp->m_max_sdu) goto fail; mip->mi_sdu_min = mregp->m_min_sdu; mip->mi_sdu_max = mregp->m_max_sdu; mip->mi_sdu_multicast = mregp->m_multicast_sdu; mip->mi_info.mi_addr_length = mip->mi_type->mt_addr_length; /* * If the media supports a broadcast address, cache a pointer to it * in the mac_info_t so that upper layers can use it. */ mip->mi_info.mi_brdcst_addr = mip->mi_type->mt_brdcst_addr; mip->mi_v12n_level = mregp->m_v12n; /* * Copy the unicast source address into the mac_info_t, but only if * the MAC-Type defines a non-zero address length. We need to * handle MAC-Types that have an address length of 0 * (point-to-point protocol MACs for example). */ if (mip->mi_type->mt_addr_length > 0) { if (mregp->m_src_addr == NULL) goto fail; mip->mi_info.mi_unicst_addr = kmem_alloc(mip->mi_type->mt_addr_length, KM_SLEEP); bcopy(mregp->m_src_addr, mip->mi_info.mi_unicst_addr, mip->mi_type->mt_addr_length); /* * Copy the fixed 'factory' MAC address from the immutable * info. This is taken to be the MAC address currently in * use. */ bcopy(mip->mi_info.mi_unicst_addr, mip->mi_addr, mip->mi_type->mt_addr_length); /* * At this point, we should set up the classification * rules etc but we delay it till mac_open() so that * the resource discovery has taken place and we * know someone wants to use the device. Otherwise * memory gets allocated for Rx ring structures even * during probe. */ /* Copy the destination address if one is provided. */ if (mregp->m_dst_addr != NULL) { bcopy(mregp->m_dst_addr, mip->mi_dstaddr, mip->mi_type->mt_addr_length); mip->mi_dstaddr_set = B_TRUE; } } else if (mregp->m_src_addr != NULL) { goto fail; } /* * The format of the m_pdata is specific to the plugin. It is * passed in as an argument to all of the plugin callbacks. The * driver can update this information by calling * mac_pdata_update(). */ if (mip->mi_type->mt_ops.mtops_ops & MTOPS_PDATA_VERIFY) { /* * Verify if the supplied plugin data is valid. Note that * even if the caller passed in a NULL pointer as plugin data, * we still need to verify if that's valid as the plugin may * require plugin data to function. */ if (!mip->mi_type->mt_ops.mtops_pdata_verify(mregp->m_pdata, mregp->m_pdata_size)) { goto fail; } if (mregp->m_pdata != NULL) { mip->mi_pdata = kmem_alloc(mregp->m_pdata_size, KM_SLEEP); bcopy(mregp->m_pdata, mip->mi_pdata, mregp->m_pdata_size); mip->mi_pdata_size = mregp->m_pdata_size; } } else if (mregp->m_pdata != NULL) { /* * The caller supplied non-NULL plugin data, but the plugin * does not recognize plugin data. */ err = EINVAL; goto fail; } /* * Register the private properties. */ mac_register_priv_prop(mip, mregp->m_priv_props); /* * Stash the driver callbacks into the mac_impl_t, but first sanity * check to make sure all mandatory callbacks are set. */ if (mregp->m_callbacks->mc_getstat == NULL || mregp->m_callbacks->mc_start == NULL || mregp->m_callbacks->mc_stop == NULL || mregp->m_callbacks->mc_setpromisc == NULL || mregp->m_callbacks->mc_multicst == NULL) { goto fail; } mip->mi_callbacks = mregp->m_callbacks; if (mac_capab_get((mac_handle_t)mip, MAC_CAPAB_LEGACY, &mip->mi_capab_legacy)) { mip->mi_state_flags |= MIS_LEGACY; mip->mi_phy_dev = mip->mi_capab_legacy.ml_dev; } else { mip->mi_phy_dev = makedevice(ddi_driver_major(mip->mi_dip), mip->mi_minor); } /* * Allocate a notification thread. thread_create blocks for memory * if needed, it never fails. */ mip->mi_notify_thread = thread_create(NULL, 0, i_mac_notify_thread, mip, 0, &p0, TS_RUN, minclsyspri); /* * Initialize the capabilities */ bzero(&mip->mi_rx_rings_cap, sizeof (mac_capab_rings_t)); bzero(&mip->mi_tx_rings_cap, sizeof (mac_capab_rings_t)); if (i_mac_capab_get((mac_handle_t)mip, MAC_CAPAB_VNIC, NULL)) mip->mi_state_flags |= MIS_IS_VNIC; if (i_mac_capab_get((mac_handle_t)mip, MAC_CAPAB_AGGR, NULL)) mip->mi_state_flags |= MIS_IS_AGGR; mac_addr_factory_init(mip); /* * Enforce the virtrualization level registered. */ if (mip->mi_v12n_level & MAC_VIRT_LEVEL1) { if (mac_init_rings(mip, MAC_RING_TYPE_RX) != 0 || mac_init_rings(mip, MAC_RING_TYPE_TX) != 0) goto fail; /* * The driver needs to register at least rx rings for this * virtualization level. */ if (mip->mi_rx_groups == NULL) goto fail; } /* * The driver must set mc_unicst entry point to NULL when it advertises * CAP_RINGS for rx groups. */ if (mip->mi_rx_groups != NULL) { if (mregp->m_callbacks->mc_unicst != NULL) goto fail; } else { if (mregp->m_callbacks->mc_unicst == NULL) goto fail; } /* * Initialize MAC addresses. Must be called after mac_init_rings(). */ mac_init_macaddr(mip); mip->mi_share_capab.ms_snum = 0; if (mip->mi_v12n_level & MAC_VIRT_HIO) { (void) mac_capab_get((mac_handle_t)mip, MAC_CAPAB_SHARES, &mip->mi_share_capab); } /* * Initialize the kstats for this device. */ mac_driver_stat_create(mip); /* Zero out any properties. */ bzero(&mip->mi_resource_props, sizeof (mac_resource_props_t)); if (mip->mi_minor <= MAC_MAX_MINOR) { /* Create a style-2 DLPI device */ if (ddi_create_minor_node(mip->mi_dip, driver, S_IFCHR, 0, DDI_NT_NET, CLONE_DEV) != DDI_SUCCESS) goto fail; style2_created = B_TRUE; /* Create a style-1 DLPI device */ if (ddi_create_minor_node(mip->mi_dip, mip->mi_name, S_IFCHR, mip->mi_minor, DDI_NT_NET, 0) != DDI_SUCCESS) goto fail; style1_created = B_TRUE; } mac_flow_l2tab_create(mip, &mip->mi_flow_tab); rw_enter(&i_mac_impl_lock, RW_WRITER); if (mod_hash_insert(i_mac_impl_hash, (mod_hash_key_t)mip->mi_name, (mod_hash_val_t)mip) != 0) { rw_exit(&i_mac_impl_lock); err = EEXIST; goto fail; } DTRACE_PROBE2(mac__register, struct devnames *, dnp, (mac_impl_t *), mip); /* * Mark the MAC to be ready for open. */ mip->mi_state_flags &= ~MIS_DISABLED; rw_exit(&i_mac_impl_lock); atomic_inc_32(&i_mac_impl_count); cmn_err(CE_NOTE, "!%s registered", mip->mi_name); *mhp = (mac_handle_t)mip; return (0); fail: if (style1_created) ddi_remove_minor_node(mip->mi_dip, mip->mi_name); if (style2_created) ddi_remove_minor_node(mip->mi_dip, driver); mac_addr_factory_fini(mip); /* Clean up registered MAC addresses */ mac_fini_macaddr(mip); /* Clean up registered rings */ mac_free_rings(mip, MAC_RING_TYPE_RX); mac_free_rings(mip, MAC_RING_TYPE_TX); /* Clean up notification thread */ if (mip->mi_notify_thread != NULL) i_mac_notify_exit(mip); if (mip->mi_info.mi_unicst_addr != NULL) { kmem_free(mip->mi_info.mi_unicst_addr, mip->mi_type->mt_addr_length); mip->mi_info.mi_unicst_addr = NULL; } mac_driver_stat_delete(mip); if (mip->mi_type != NULL) { atomic_dec_32(&mip->mi_type->mt_ref); mip->mi_type = NULL; } if (mip->mi_pdata != NULL) { kmem_free(mip->mi_pdata, mip->mi_pdata_size); mip->mi_pdata = NULL; mip->mi_pdata_size = 0; } if (minor != 0) { ASSERT(minor > MAC_MAX_MINOR); mac_minor_rele(minor); } mip->mi_state_flags = 0; mac_unregister_priv_prop(mip); /* * Clear the state before destroying the mac_impl_t */ mip->mi_state_flags = 0; kmem_cache_free(i_mac_impl_cachep, mip); return (err); }
/* * For unsolicited exchanges, FCoET is only responsible for allocation of * req_payload. FCT will allocate resp_payload after the exchange is * passed on. */ static fcoet_exchange_t * fcoet_create_unsol_exchange(fcoe_frame_t *frm) { uint8_t r_ctl; int cdb_size; fcoet_exchange_t *xch, *xch_tmp; fct_cmd_t *cmd; fcoe_fcp_cmnd_t *ffc; uint32_t task_expected_len = 0; r_ctl = FRM_R_CTL(frm); switch (r_ctl) { case 0x22: /* * FCoET's unsolicited ELS */ cmd = (fct_cmd_t *)fct_alloc(FCT_STRUCT_CMD_RCVD_ELS, GET_STRUCT_SIZE(fcoet_exchange_t) + frm->frm_payload_size, 0); if (cmd == NULL) { FCOET_EXT_LOG(0, "can't get cmd"); return (NULL); } break; case 0x06: /* * FCoET's unsolicited SCSI cmd */ cdb_size = 16; /* need improve later */ cmd = fct_scsi_task_alloc(FRM2SS(frm)->ss_port, FCT_HANDLE_NONE, FRM_S_ID(frm), frm->frm_payload, cdb_size, STMF_TASK_EXT_NONE); if (cmd == NULL) { FCOET_EXT_LOG(0, "can't get fcp cmd"); return (NULL); } ffc = (fcoe_fcp_cmnd_t *)frm->frm_payload; task_expected_len = FCOE_B2V_4(ffc->ffc_fcp_dl); break; default: FCOET_EXT_LOG(0, "unsupported R_CTL: %x", r_ctl); return (NULL); } /* * xch initialization */ xch = CMD2XCH(cmd); xch->xch_oxid = FRM_OXID(frm); xch->xch_flags = 0; xch->xch_ss = FRM2SS(frm); xch->xch_cmd = cmd; xch->xch_current_seq = NULL; xch->xch_left_data_size = 0; if (task_expected_len) { xch->xch_dbuf_num = (task_expected_len + FCOET_MAX_DBUF_LEN - 1) / FCOET_MAX_DBUF_LEN; xch->xch_dbufs = kmem_zalloc(xch->xch_dbuf_num * sizeof (stmf_data_buf_t *), KM_SLEEP); } xch->xch_start_time = ddi_get_lbolt(); do { xch->xch_rxid = atomic_add_16_nv( &xch->xch_ss->ss_next_unsol_rxid, 1); if (xch->xch_rxid == 0xFFFF) { xch->xch_rxid = atomic_add_16_nv( &xch->xch_ss->ss_next_unsol_rxid, 1); } } while (mod_hash_find(FRM2SS(frm)->ss_unsol_rxid_hash, (mod_hash_key_t)(intptr_t)xch->xch_rxid, (mod_hash_val_t)&xch_tmp) == 0); xch->xch_sequence_no = 0; xch->xch_ref = 0; (void) mod_hash_insert(xch->xch_ss->ss_unsol_rxid_hash, (mod_hash_key_t)(intptr_t)xch->xch_rxid, (mod_hash_val_t)xch); xch->xch_flags |= XCH_FLAG_IN_HASH_TABLE; /* * cmd initialization */ cmd->cmd_port = FRM2SS(frm)->ss_port; cmd->cmd_rp_handle = FCT_HANDLE_NONE; cmd->cmd_rportid = FRM_S_ID(frm); cmd->cmd_lportid = FRM_D_ID(frm); cmd->cmd_oxid = xch->xch_oxid; cmd->cmd_rxid = xch->xch_rxid; fcoet_init_tfm(frm, xch); return (xch); }