/* * Remove vlan ids of the given vsw device or port from its hash table. */ void vsw_vlan_remove_ids(void *arg, int type) { mod_hash_val_t vp; int rv; int i; if (type == VSW_LOCALDEV) { vsw_t *vswp = (vsw_t *)arg; rv = vsw_vlan_lookup(vswp->vlan_hashp, vswp->pvid); if (rv == B_TRUE) { rv = mod_hash_remove(vswp->vlan_hashp, (mod_hash_key_t)VLAN_ID_KEY(vswp->pvid), (mod_hash_val_t *)&vp); ASSERT(rv == 0); } for (i = 0; i < vswp->nvids; i++) { rv = vsw_vlan_lookup(vswp->vlan_hashp, vswp->vids[i].vl_vid); if (rv == B_TRUE) { rv = mod_hash_remove(vswp->vlan_hashp, (mod_hash_key_t)VLAN_ID_KEY( vswp->vids[i].vl_vid), (mod_hash_val_t *)&vp); ASSERT(rv == 0); } } } else if (type == VSW_VNETPORT) { vsw_port_t *portp = (vsw_port_t *)arg; portp = (vsw_port_t *)arg; rv = vsw_vlan_lookup(portp->vlan_hashp, portp->pvid); if (rv == B_TRUE) { rv = mod_hash_remove(portp->vlan_hashp, (mod_hash_key_t)VLAN_ID_KEY(portp->pvid), (mod_hash_val_t *)&vp); ASSERT(rv == 0); } for (i = 0; i < portp->nvids; i++) { rv = vsw_vlan_lookup(portp->vlan_hashp, portp->vids[i].vl_vid); if (rv == B_TRUE) { rv = mod_hash_remove(portp->vlan_hashp, (mod_hash_key_t)VLAN_ID_KEY( portp->vids[i].vl_vid), (mod_hash_val_t *)&vp); ASSERT(rv == 0); } } } else { return; } }
/* * Remove an entry from FDB. */ void vsw_fdbe_del(vsw_t *vswp, struct ether_addr *eaddr) { uint64_t addr = 0; vsw_fdbe_t *fp; int rv; KEY_HASH(addr, eaddr); /* * Remove the entry from fdb hash table. * This prevents further references to this fdb entry. */ rv = mod_hash_remove(vswp->fdb_hashp, (mod_hash_key_t)addr, (mod_hash_val_t *)&fp); if (rv != 0) { /* invalid key? */ return; } /* * If there are threads already ref holding before the entry was * removed from hash table, then wait for ref count to drop to zero. */ while (fp->refcnt != 0) { delay(drv_usectohz(vsw_fdbe_refcnt_delay)); } kmem_free(fp, sizeof (*fp)); }
static int fcoet_process_sol_abts_rjt(fcoe_frame_t *frm) { fcoet_exchange_t *xch = NULL; uint16_t sol_oxid; sol_oxid = FRM_OXID(frm); if (mod_hash_remove(FRM2SS(frm)->ss_sol_oxid_hash, (mod_hash_key_t)(intptr_t)sol_oxid, (mod_hash_val_t *)&xch) != 0) { /* * So far ABTS for FLOGI might be removed from ss_sol_oxid_hash * in fcoet_watch_handle_sol_flogi, Will improve it later */ return (FCOE_SUCCESS); } xch->xch_flags &= ~XCH_FLAG_IN_HASH_TABLE; if (!FRM_IS_LAST_FRAME(frm)) { FCOET_LOG("fcoet_process_sol_abts_rjt", "not supported this kind frame"); return (FCOE_FAILURE); } FCOET_LOG("fcoet_process_sol_abts_rjt", "ABTS_RJT received rjt reason %x but there is nothing to do", frm->frm_payload[1]); return (FCOE_SUCCESS); }
int dls_vlan_destroy(const char *name) { int err; dls_vlan_t *dvp; dls_link_t *dlp; mod_hash_val_t val; /* * Find the dls_vlan_t in the global hash table. */ rw_enter(&i_dls_vlan_lock, RW_WRITER); err = mod_hash_find(i_dls_vlan_hash, (mod_hash_key_t)name, (mod_hash_val_t *)&dvp); if (err != 0) { err = ENOENT; goto done; } /* * Check to see if it is referenced by any dls_impl_t. */ if (dvp->dv_ref != 0) { err = EBUSY; goto done; } /* * Remove and destroy the hash table entry. */ err = mod_hash_remove(i_dls_vlan_hash, (mod_hash_key_t)name, (mod_hash_val_t *)&val); ASSERT(err == 0); ASSERT(dvp == (dls_vlan_t *)val); ASSERT(i_dls_vlan_count > 0); i_dls_vlan_count--; /* * Save a reference to dv_dlp before freeing the dls_vlan_t back * to the cache. */ dlp = dvp->dv_dlp; kmem_cache_free(i_dls_vlan_cachep, dvp); /* * Release the dls_link_t. This will destroy the dls_link_t and * release the MAC if there are no more dls_vlan_t. */ dls_link_rele(dlp); done: rw_exit(&i_dls_vlan_lock); return (err); }
void fcoet_clear_sol_exchange(fcoet_exchange_t *xch) { mod_hash_val_t val = NULL; if (xch->xch_flags & XCH_FLAG_IN_HASH_TABLE) { (void) mod_hash_remove(xch->xch_ss->ss_sol_oxid_hash, (mod_hash_key_t)(intptr_t)xch->xch_oxid, &val); ASSERT((fcoet_exchange_t *)val == xch); xch->xch_flags &= ~XCH_FLAG_IN_HASH_TABLE; } }
static int fcoet_process_sol_ct_rsp(fcoe_frame_t *frm) { uint32_t actual_size; fct_status_t fc_st; uint32_t iof; fct_sol_ct_t *ct = NULL; fcoet_exchange_t *xch = NULL; uint16_t sol_oxid; sol_oxid = FRM_OXID(frm); if (mod_hash_remove(FRM2SS(frm)->ss_sol_oxid_hash, (mod_hash_key_t)(intptr_t)sol_oxid, (mod_hash_val_t *)&xch) != 0) { return (FCOE_SUCCESS); } xch->xch_flags &= ~XCH_FLAG_IN_HASH_TABLE; fcoet_init_tfm(frm, xch); ASSERT(FRM_IS_LAST_FRAME(frm)); actual_size = CMD2ELS(xch->xch_cmd)->els_resp_size; if (actual_size > frm->frm_payload_size) { actual_size = frm->frm_payload_size; } ct = CMD2CT(xch->xch_cmd); ct->ct_resp_size = (uint16_t)actual_size; bcopy(frm->frm_payload, CMD2CT(xch->xch_cmd)->ct_resp_payload, actual_size); fc_st = FCT_SUCCESS; iof = FCT_IOF_FCA_DONE; fct_send_cmd_done(xch->xch_cmd, fc_st, iof); return (FCOE_SUCCESS); }
int fcoet_clear_unsol_exchange(fcoet_exchange_t *xch) { mod_hash_val_t val = NULL; if (mod_hash_remove(xch->xch_ss->ss_unsol_rxid_hash, (mod_hash_key_t)(intptr_t)xch->xch_rxid, &val) == 0) { if (xch->xch_dbuf_num) { kmem_free((void*)xch->xch_dbufs, xch->xch_dbuf_num * sizeof (void *)); xch->xch_dbufs = NULL; xch->xch_dbuf_num = 0; } ASSERT(xch->xch_flags & XCH_FLAG_IN_HASH_TABLE); ASSERT((fcoet_exchange_t *)val == xch); xch->xch_flags &= ~XCH_FLAG_IN_HASH_TABLE; return (FCOE_SUCCESS); } FCOET_LOG("fcoet_clear_unsol_exchange", "xch %p already cleared from " "hash table", xch); return (FCOE_FAILURE); }
/* ARGSUSED */ int vnic_dev_delete(datalink_id_t vnic_id, uint32_t flags, cred_t *credp) { vnic_t *vnic = NULL; mod_hash_val_t val; datalink_id_t tmpid; int rc; rw_enter(&vnic_lock, RW_WRITER); if (mod_hash_find(vnic_hash, VNIC_HASH_KEY(vnic_id), (mod_hash_val_t *)&vnic) != 0) { rw_exit(&vnic_lock); return (ENOENT); } if ((rc = dls_devnet_destroy(vnic->vn_mh, &tmpid, B_TRUE)) != 0) { rw_exit(&vnic_lock); return (rc); } ASSERT(vnic_id == tmpid); /* * We cannot unregister the MAC yet. Unregistering would * free up mac_impl_t which should not happen at this time. * So disable mac_impl_t by calling mac_disable(). This will prevent * any new claims on mac_impl_t. */ if ((rc = mac_disable(vnic->vn_mh)) != 0) { (void) dls_devnet_create(vnic->vn_mh, vnic_id, crgetzoneid(credp)); rw_exit(&vnic_lock); return (rc); } vnic_cleanup_secondary_macs(vnic, vnic->vn_nhandles); vnic->vn_enabled = B_FALSE; (void) mod_hash_remove(vnic_hash, VNIC_HASH_KEY(vnic_id), &val); ASSERT(vnic == (vnic_t *)val); vnic_count--; rw_exit(&vnic_lock); /* * XXX-nicolas shouldn't have a void cast here, if it's * expected that the function will never fail, then we should * have an ASSERT(). */ (void) mac_unregister(vnic->vn_mh); if (vnic->vn_lower_mh != NULL) { /* * Check if MAC address for the vnic was obtained from the * factory MAC addresses. If yes, release it. */ if (vnic->vn_addr_type == VNIC_MAC_ADDR_TYPE_FACTORY) { (void) mac_addr_factory_release(vnic->vn_mch, vnic->vn_slot_id); } (void) mac_margin_remove(vnic->vn_lower_mh, vnic->vn_margin); (void) mac_mtu_remove(vnic->vn_lower_mh, vnic->vn_mtu); (void) mac_notify_remove(vnic->vn_mnh, B_TRUE); (void) mac_unicast_remove(vnic->vn_mch, vnic->vn_muh); mac_client_close(vnic->vn_mch, MAC_CLOSE_FLAGS_IS_VNIC); mac_close(vnic->vn_lower_mh); } kmem_cache_free(vnic_cache, vnic); return (0); }
/* * Unregister from the GLDv3 framework */ int mac_unregister(mac_handle_t mh) { int err; mac_impl_t *mip = (mac_impl_t *)mh; mod_hash_val_t val; mac_margin_req_t *mmr, *nextmmr; /* Fail the unregister if there are any open references to this mac. */ if ((err = mac_disable_nowait(mh)) != 0) return (err); /* * Clean up notification thread and wait for it to exit. */ i_mac_notify_exit(mip); i_mac_perim_enter(mip); /* * There is still resource properties configured over this mac. */ if (mip->mi_resource_props.mrp_mask != 0) mac_fastpath_enable((mac_handle_t)mip); if (mip->mi_minor < MAC_MAX_MINOR + 1) { ddi_remove_minor_node(mip->mi_dip, mip->mi_name); ddi_remove_minor_node(mip->mi_dip, (char *)ddi_driver_name(mip->mi_dip)); } ASSERT(mip->mi_nactiveclients == 0 && !(mip->mi_state_flags & MIS_EXCLUSIVE)); mac_driver_stat_delete(mip); (void) mod_hash_remove(i_mac_impl_hash, (mod_hash_key_t)mip->mi_name, &val); ASSERT(mip == (mac_impl_t *)val); ASSERT(i_mac_impl_count > 0); atomic_dec_32(&i_mac_impl_count); if (mip->mi_pdata != NULL) kmem_free(mip->mi_pdata, mip->mi_pdata_size); mip->mi_pdata = NULL; mip->mi_pdata_size = 0; /* * Free the list of margin request. */ for (mmr = mip->mi_mmrp; mmr != NULL; mmr = nextmmr) { nextmmr = mmr->mmr_nextp; kmem_free(mmr, sizeof (mac_margin_req_t)); } mip->mi_mmrp = NULL; mip->mi_linkstate = mip->mi_lowlinkstate = LINK_STATE_UNKNOWN; kmem_free(mip->mi_info.mi_unicst_addr, mip->mi_type->mt_addr_length); mip->mi_info.mi_unicst_addr = NULL; atomic_dec_32(&mip->mi_type->mt_ref); mip->mi_type = NULL; /* * Free the primary MAC address. */ mac_fini_macaddr(mip); /* * free all rings */ mac_free_rings(mip, MAC_RING_TYPE_RX); mac_free_rings(mip, MAC_RING_TYPE_TX); mac_addr_factory_fini(mip); bzero(mip->mi_addr, MAXMACADDRLEN); bzero(mip->mi_dstaddr, MAXMACADDRLEN); mip->mi_dstaddr_set = B_FALSE; /* and the flows */ mac_flow_tab_destroy(mip->mi_flow_tab); mip->mi_flow_tab = NULL; if (mip->mi_minor > MAC_MAX_MINOR) mac_minor_rele(mip->mi_minor); cmn_err(CE_NOTE, "!%s unregistered", mip->mi_name); /* * Reset the perim related fields to default values before * kmem_cache_free */ i_mac_perim_exit(mip); mip->mi_state_flags = 0; mac_unregister_priv_prop(mip); ASSERT(mip->mi_bridge_link == NULL); kmem_cache_free(i_mac_impl_cachep, mip); return (0); }