int cxgb4_get_free_ftid(struct net_device *dev, int family) { struct adapter *adap = netdev2adap(dev); struct tid_info *t = &adap->tids; int ftid; spin_lock_bh(&t->ftid_lock); if (family == PF_INET) { ftid = find_first_zero_bit(t->ftid_bmap, t->nftids); if (ftid >= t->nftids) ftid = -1; } else { if (is_t6(adap->params.chip)) { ftid = bitmap_find_free_region(t->ftid_bmap, t->nftids, 1); if (ftid < 0) goto out_unlock; /* this is only a lookup, keep the found region * unallocated */ bitmap_release_region(t->ftid_bmap, ftid, 1); } else { ftid = bitmap_find_free_region(t->ftid_bmap, t->nftids, 2); if (ftid < 0) goto out_unlock; bitmap_release_region(t->ftid_bmap, ftid, 2); } } out_unlock: spin_unlock_bh(&t->ftid_lock); return ftid; }
/* Clear a filter and release any of its resources that we own. This also * clears the filter's "pending" status. */ void clear_filter(struct adapter *adap, struct filter_entry *f) { /* If the new or old filter have loopback rewriteing rules then we'll * need to free any existing L2T, SMT, CLIP entries of filter * rule. */ if (f->l2t) cxgb4_l2t_release(f->l2t); if (f->smt) cxgb4_smt_release(f->smt); if ((f->fs.hash || is_t6(adap->params.chip)) && f->fs.type) cxgb4_clip_release(f->dev, (const u32 *)&f->fs.val.lip, 1); /* The zeroing of the filter rule below clears the filter valid, * pending, locked flags, l2t pointer, etc. so it's all we need for * this operation. */ memset(f, 0, sizeof(*f)); }
int init_hash_filter(struct adapter *adap) { /* On T6, verify the necessary register configs and warn the user in * case of improper config */ if (is_t6(adap->params.chip)) { if (TCAM_ACTV_HIT_G(t4_read_reg(adap, LE_DB_RSP_CODE_0_A)) != 4) goto err; if (HASH_ACTV_HIT_G(t4_read_reg(adap, LE_DB_RSP_CODE_1_A)) != 4) goto err; } else { dev_err(adap->pdev_dev, "Hash filter supported only on T6\n"); return -EINVAL; } adap->params.hash_filter = 1; return 0; err: dev_warn(adap->pdev_dev, "Invalid hash filter config!\n"); return -EINVAL; }
/* Check a Chelsio Filter Request for validity, convert it into our internal * format and send it to the hardware. Return 0 on success, an error number * otherwise. We attach any provided filter operation context to the internal * filter specification in order to facilitate signaling completion of the * operation. */ int __cxgb4_set_filter(struct net_device *dev, int filter_id, struct ch_filter_specification *fs, struct filter_ctx *ctx) { struct adapter *adapter = netdev2adap(dev); unsigned int chip_ver = CHELSIO_CHIP_VERSION(adapter->params.chip); unsigned int max_fidx, fidx; struct filter_entry *f; u32 iconf; int iq, ret; if (fs->hash) { if (is_hashfilter(adapter)) return cxgb4_set_hash_filter(dev, fs, ctx); netdev_err(dev, "%s: Exact-match filters only supported with Hash Filter configuration\n", __func__); return -EINVAL; } max_fidx = adapter->tids.nftids; if (filter_id != (max_fidx + adapter->tids.nsftids - 1) && filter_id >= max_fidx) return -E2BIG; fill_default_mask(fs); ret = validate_filter(dev, fs); if (ret) return ret; iq = get_filter_steerq(dev, fs); if (iq < 0) return iq; /* IPv6 filters occupy four slots and must be aligned on * four-slot boundaries. IPv4 filters only occupy a single * slot and have no alignment requirements but writing a new * IPv4 filter into the middle of an existing IPv6 filter * requires clearing the old IPv6 filter and hence we prevent * insertion. */ if (fs->type == 0) { /* IPv4 */ /* For T6, If our IPv4 filter isn't being written to a * multiple of two filter index and there's an IPv6 * filter at the multiple of 2 base slot, then we need * to delete that IPv6 filter ... * For adapters below T6, IPv6 filter occupies 4 entries. * Hence we need to delete the filter in multiple of 4 slot. */ if (chip_ver < CHELSIO_T6) fidx = filter_id & ~0x3; else fidx = filter_id & ~0x1; if (fidx != filter_id && adapter->tids.ftid_tab[fidx].fs.type) { f = &adapter->tids.ftid_tab[fidx]; if (f->valid) { dev_err(adapter->pdev_dev, "Invalid location. IPv6 requires 4 slots and is occupying slots %u to %u\n", fidx, fidx + 3); return -EINVAL; } } } else { /* IPv6 */ if (chip_ver < CHELSIO_T6) { /* Ensure that the IPv6 filter is aligned on a * multiple of 4 boundary. */ if (filter_id & 0x3) { dev_err(adapter->pdev_dev, "Invalid location. IPv6 must be aligned on a 4-slot boundary\n"); return -EINVAL; } /* Check all except the base overlapping IPv4 filter * slots. */ for (fidx = filter_id + 1; fidx < filter_id + 4; fidx++) { f = &adapter->tids.ftid_tab[fidx]; if (f->valid) { dev_err(adapter->pdev_dev, "Invalid location. IPv6 requires 4 slots and an IPv4 filter exists at %u\n", fidx); return -EBUSY; } } } else { /* For T6, CLIP being enabled, IPv6 filter would occupy * 2 entries. */ if (filter_id & 0x1) return -EINVAL; /* Check overlapping IPv4 filter slot */ fidx = filter_id + 1; f = &adapter->tids.ftid_tab[fidx]; if (f->valid) { pr_err("%s: IPv6 filter requires 2 indices. IPv4 filter already present at %d. Please remove IPv4 filter first.\n", __func__, fidx); return -EBUSY; } } } /* Check to make sure that provided filter index is not * already in use by someone else */ f = &adapter->tids.ftid_tab[filter_id]; if (f->valid) return -EBUSY; fidx = filter_id + adapter->tids.ftid_base; ret = cxgb4_set_ftid(&adapter->tids, filter_id, fs->type ? PF_INET6 : PF_INET, chip_ver); if (ret) return ret; /* Check t make sure the filter requested is writable ... */ ret = writable_filter(f); if (ret) { /* Clear the bits we have set above */ cxgb4_clear_ftid(&adapter->tids, filter_id, fs->type ? PF_INET6 : PF_INET, chip_ver); return ret; } if (is_t6(adapter->params.chip) && fs->type && ipv6_addr_type((const struct in6_addr *)fs->val.lip) != IPV6_ADDR_ANY) { ret = cxgb4_clip_get(dev, (const u32 *)&fs->val.lip, 1); if (ret) { cxgb4_clear_ftid(&adapter->tids, filter_id, PF_INET6, chip_ver); return ret; } } /* Convert the filter specification into our internal format. * We copy the PF/VF specification into the Outer VLAN field * here so the rest of the code -- including the interface to * the firmware -- doesn't have to constantly do these checks. */ f->fs = *fs; f->fs.iq = iq; f->dev = dev; iconf = adapter->params.tp.ingress_config; if (iconf & VNIC_F) { f->fs.val.ovlan = (fs->val.pf << 13) | fs->val.vf; f->fs.mask.ovlan = (fs->mask.pf << 13) | fs->mask.vf; f->fs.val.ovlan_vld = fs->val.pfvf_vld; f->fs.mask.ovlan_vld = fs->mask.pfvf_vld; } /* Attempt to set the filter. If we don't succeed, we clear * it and return the failure. */ f->ctx = ctx; f->tid = fidx; /* Save the actual tid */ ret = set_filter_wr(adapter, filter_id); if (ret) { cxgb4_clear_ftid(&adapter->tids, filter_id, fs->type ? PF_INET6 : PF_INET, chip_ver); clear_filter(adapter, f); } return ret; }
static u32 cxgb4_get_entity_length(struct adapter *adap, u32 entity) { struct cudbg_tcam tcam_region = { 0 }; u32 value, n = 0, len = 0; switch (entity) { case CUDBG_REG_DUMP: switch (CHELSIO_CHIP_VERSION(adap->params.chip)) { case CHELSIO_T4: len = T4_REGMAP_SIZE; break; case CHELSIO_T5: case CHELSIO_T6: len = T5_REGMAP_SIZE; break; default: break; } break; case CUDBG_DEV_LOG: len = adap->params.devlog.size; break; case CUDBG_CIM_LA: if (is_t6(adap->params.chip)) { len = adap->params.cim_la_size / 10 + 1; len *= 10 * sizeof(u32); } else { len = adap->params.cim_la_size / 8; len *= 8 * sizeof(u32); } len += sizeof(u32); /* for reading CIM LA configuration */ break; case CUDBG_CIM_MA_LA: len = 2 * CIM_MALA_SIZE * 5 * sizeof(u32); break; case CUDBG_CIM_QCFG: len = sizeof(struct cudbg_cim_qcfg); break; case CUDBG_CIM_IBQ_TP0: case CUDBG_CIM_IBQ_TP1: case CUDBG_CIM_IBQ_ULP: case CUDBG_CIM_IBQ_SGE0: case CUDBG_CIM_IBQ_SGE1: case CUDBG_CIM_IBQ_NCSI: len = CIM_IBQ_SIZE * 4 * sizeof(u32); break; case CUDBG_CIM_OBQ_ULP0: len = cudbg_cim_obq_size(adap, 0); break; case CUDBG_CIM_OBQ_ULP1: len = cudbg_cim_obq_size(adap, 1); break; case CUDBG_CIM_OBQ_ULP2: len = cudbg_cim_obq_size(adap, 2); break; case CUDBG_CIM_OBQ_ULP3: len = cudbg_cim_obq_size(adap, 3); break; case CUDBG_CIM_OBQ_SGE: len = cudbg_cim_obq_size(adap, 4); break; case CUDBG_CIM_OBQ_NCSI: len = cudbg_cim_obq_size(adap, 5); break; case CUDBG_CIM_OBQ_RXQ0: len = cudbg_cim_obq_size(adap, 6); break; case CUDBG_CIM_OBQ_RXQ1: len = cudbg_cim_obq_size(adap, 7); break; case CUDBG_EDC0: value = t4_read_reg(adap, MA_TARGET_MEM_ENABLE_A); if (value & EDRAM0_ENABLE_F) { value = t4_read_reg(adap, MA_EDRAM0_BAR_A); len = EDRAM0_SIZE_G(value); } len = cudbg_mbytes_to_bytes(len); break; case CUDBG_EDC1: value = t4_read_reg(adap, MA_TARGET_MEM_ENABLE_A); if (value & EDRAM1_ENABLE_F) { value = t4_read_reg(adap, MA_EDRAM1_BAR_A); len = EDRAM1_SIZE_G(value); } len = cudbg_mbytes_to_bytes(len); break; case CUDBG_MC0: value = t4_read_reg(adap, MA_TARGET_MEM_ENABLE_A); if (value & EXT_MEM0_ENABLE_F) { value = t4_read_reg(adap, MA_EXT_MEMORY0_BAR_A); len = EXT_MEM0_SIZE_G(value); } len = cudbg_mbytes_to_bytes(len); break; case CUDBG_MC1: value = t4_read_reg(adap, MA_TARGET_MEM_ENABLE_A); if (value & EXT_MEM1_ENABLE_F) { value = t4_read_reg(adap, MA_EXT_MEMORY1_BAR_A); len = EXT_MEM1_SIZE_G(value); } len = cudbg_mbytes_to_bytes(len); break; case CUDBG_RSS: len = t4_chip_rss_size(adap) * sizeof(u16); break; case CUDBG_RSS_VF_CONF: len = adap->params.arch.vfcount * sizeof(struct cudbg_rss_vf_conf); break; case CUDBG_PATH_MTU: len = NMTUS * sizeof(u16); break; case CUDBG_PM_STATS: len = sizeof(struct cudbg_pm_stats); break; case CUDBG_HW_SCHED: len = sizeof(struct cudbg_hw_sched); break; case CUDBG_TP_INDIRECT: switch (CHELSIO_CHIP_VERSION(adap->params.chip)) { case CHELSIO_T5: n = sizeof(t5_tp_pio_array) + sizeof(t5_tp_tm_pio_array) + sizeof(t5_tp_mib_index_array); break; case CHELSIO_T6: n = sizeof(t6_tp_pio_array) + sizeof(t6_tp_tm_pio_array) + sizeof(t6_tp_mib_index_array); break; default: break; } n = n / (IREG_NUM_ELEM * sizeof(u32)); len = sizeof(struct ireg_buf) * n; break; case CUDBG_SGE_INDIRECT: len = sizeof(struct ireg_buf) * 2 + sizeof(struct sge_qbase_reg_field); break; case CUDBG_ULPRX_LA: len = sizeof(struct cudbg_ulprx_la); break; case CUDBG_TP_LA: len = sizeof(struct cudbg_tp_la) + TPLA_SIZE * sizeof(u64); break; case CUDBG_MEMINFO: len = sizeof(struct cudbg_meminfo); break; case CUDBG_CIM_PIF_LA: len = sizeof(struct cudbg_cim_pif_la); len += 2 * CIM_PIFLA_SIZE * 6 * sizeof(u32); break; case CUDBG_CLK: len = sizeof(struct cudbg_clk_info); break; case CUDBG_PCIE_INDIRECT: n = sizeof(t5_pcie_pdbg_array) / (IREG_NUM_ELEM * sizeof(u32)); len = sizeof(struct ireg_buf) * n * 2; break; case CUDBG_PM_INDIRECT: n = sizeof(t5_pm_rx_array) / (IREG_NUM_ELEM * sizeof(u32)); len = sizeof(struct ireg_buf) * n * 2; break; case CUDBG_TID_INFO: len = sizeof(struct cudbg_tid_info_region_rev1); break; case CUDBG_PCIE_CONFIG: len = sizeof(u32) * CUDBG_NUM_PCIE_CONFIG_REGS; break; case CUDBG_DUMP_CONTEXT: len = cudbg_dump_context_size(adap); break; case CUDBG_MPS_TCAM: len = sizeof(struct cudbg_mps_tcam) * adap->params.arch.mps_tcam_size; break; case CUDBG_VPD_DATA: len = sizeof(struct cudbg_vpd_data); break; case CUDBG_LE_TCAM: cudbg_fill_le_tcam_info(adap, &tcam_region); len = sizeof(struct cudbg_tcam) + sizeof(struct cudbg_tid_data) * tcam_region.max_tid; break; case CUDBG_CCTRL: len = sizeof(u16) * NMTUS * NCCTRL_WIN; break; case CUDBG_MA_INDIRECT: if (CHELSIO_CHIP_VERSION(adap->params.chip) > CHELSIO_T5) { n = sizeof(t6_ma_ireg_array) / (IREG_NUM_ELEM * sizeof(u32)); len = sizeof(struct ireg_buf) * n * 2; } break; case CUDBG_ULPTX_LA: len = sizeof(struct cudbg_ulptx_la); break; case CUDBG_UP_CIM_INDIRECT: n = 0; if (is_t5(adap->params.chip)) n = sizeof(t5_up_cim_reg_array) / ((IREG_NUM_ELEM + 1) * sizeof(u32)); else if (is_t6(adap->params.chip)) n = sizeof(t6_up_cim_reg_array) / ((IREG_NUM_ELEM + 1) * sizeof(u32)); len = sizeof(struct ireg_buf) * n; break; case CUDBG_PBT_TABLE: len = sizeof(struct cudbg_pbt_tables); break; case CUDBG_MBOX_LOG: len = sizeof(struct cudbg_mbox_log) * adap->mbox_log->size; break; case CUDBG_HMA_INDIRECT: if (CHELSIO_CHIP_VERSION(adap->params.chip) > CHELSIO_T5) { n = sizeof(t6_hma_ireg_array) / (IREG_NUM_ELEM * sizeof(u32)); len = sizeof(struct ireg_buf) * n; } break; case CUDBG_HMA: value = t4_read_reg(adap, MA_TARGET_MEM_ENABLE_A); if (value & HMA_MUX_F) { /* In T6, there's no MC1. So, HMA shares MC1 * address space. */ value = t4_read_reg(adap, MA_EXT_MEMORY1_BAR_A); len = EXT_MEM1_SIZE_G(value); } len = cudbg_mbytes_to_bytes(len); break; default: break; } return len; }