static int lio_cn23xx_pf_sriov_config(struct octeon_device *oct) { struct lio_cn23xx_pf *cn23xx = (struct lio_cn23xx_pf *)oct->chip; uint32_t num_pf_rings, total_rings, max_rings; cn23xx->conf = (struct lio_config *)lio_get_config_info(oct, LIO_23XX); max_rings = LIO_CN23XX_PF_MAX_RINGS; if (oct->sriov_info.num_pf_rings) { num_pf_rings = oct->sriov_info.num_pf_rings; if (num_pf_rings > max_rings) { num_pf_rings = min(mp_ncpus, max_rings); lio_dev_warn(oct, "num_queues_per_pf requested %u is more than available rings (%u). Reducing to %u\n", oct->sriov_info.num_pf_rings, max_rings, num_pf_rings); } } else { #ifdef RSS num_pf_rings = min(rss_getnumbuckets(), mp_ncpus); #else num_pf_rings = min(mp_ncpus, max_rings); #endif } total_rings = num_pf_rings; oct->sriov_info.trs = total_rings; oct->sriov_info.pf_srn = total_rings - num_pf_rings; oct->sriov_info.num_pf_rings = num_pf_rings; lio_dev_dbg(oct, "trs:%d pf_srn:%d num_pf_rings:%d\n", oct->sriov_info.trs, oct->sriov_info.pf_srn, oct->sriov_info.num_pf_rings); return (0); }
void in_pcbgroup_init(struct inpcbinfo *pcbinfo, u_int hashfields, int hash_nelements) { struct inpcbgroup *pcbgroup; u_int numpcbgroups, pgn; /* * Only enable connection groups for a protocol if it has been * specifically requested. */ if (hashfields == IPI_HASHFIELDS_NONE) return; /* * Connection groups are about multi-processor load distribution, * lock contention, and connection CPU affinity. As such, no point * in turning them on for a uniprocessor machine, it only wastes * memory. */ if (mp_ncpus == 1) return; #ifdef RSS /* * If we're using RSS, then RSS determines the number of connection * groups to use: one connection group per RSS bucket. If for some * reason RSS isn't able to provide a number of buckets, disable * connection groups entirely. * * XXXRW: Can this ever happen? */ numpcbgroups = rss_getnumbuckets(); if (numpcbgroups == 0) return; #else /* * Otherwise, we'll just use one per CPU for now. If we decide to * do dynamic rebalancing a la RSS, we'll need similar logic here. */ numpcbgroups = mp_ncpus; #endif pcbinfo->ipi_hashfields = hashfields; pcbinfo->ipi_pcbgroups = malloc(numpcbgroups * sizeof(*pcbinfo->ipi_pcbgroups), M_PCB, M_WAITOK | M_ZERO); pcbinfo->ipi_npcbgroups = numpcbgroups; pcbinfo->ipi_wildbase = hashinit(hash_nelements, M_PCB, &pcbinfo->ipi_wildmask); for (pgn = 0; pgn < pcbinfo->ipi_npcbgroups; pgn++) { pcbgroup = &pcbinfo->ipi_pcbgroups[pgn]; pcbgroup->ipg_hashbase = hashinit(hash_nelements, M_PCB, &pcbgroup->ipg_hashmask); INP_GROUP_LOCK_INIT(pcbgroup, "pcbgroup"); /* * Initialise notional affinity of the pcbgroup -- for RSS, * we want the same notion of affinity as NICs to be used. In * the non-RSS case, just round robin for the time being. * * XXXRW: The notion of a bucket to CPU mapping is common at * both pcbgroup and RSS layers -- does that mean that we * should migrate it all from RSS to here, and just leave RSS * responsible only for providing hashing and mapping funtions? */ #ifdef RSS pcbgroup->ipg_cpu = rss_getcpu(pgn); #else pcbgroup->ipg_cpu = (pgn % mp_ncpus); #endif } }