static int nicvf_set_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring) { struct nicvf *nic = netdev_priv(netdev); struct queue_set *qs = nic->qs; u32 rx_count, tx_count; /* Due to HW errata this is not supported on T88 pass 1.x silicon */ if (pass1_silicon(nic->pdev)) return -EINVAL; if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending)) return -EINVAL; tx_count = clamp_t(u32, ring->tx_pending, MIN_SND_QUEUE_LEN, MAX_SND_QUEUE_LEN); rx_count = clamp_t(u32, ring->rx_pending, MIN_CMP_QUEUE_LEN, MAX_CMP_QUEUE_LEN); if ((tx_count == qs->sq_len) && (rx_count == qs->cq_len)) return 0; /* Permitted lengths are 1K, 2K, 4K, 8K, 16K, 32K, 64K */ qs->sq_len = rounddown_pow_of_two(tx_count); qs->cq_len = rounddown_pow_of_two(rx_count); if (netif_running(netdev)) { nicvf_stop(netdev); nicvf_open(netdev); } return 0; }
static ssize_t notrace stm_generic_packet(struct stm_data *stm_data, unsigned int master, unsigned int channel, unsigned int packet, unsigned int flags, unsigned int size, const unsigned char *payload) { void __iomem *ch_addr; struct stm_drvdata *drvdata = container_of(stm_data, struct stm_drvdata, stm); if (!(drvdata && local_read(&drvdata->mode))) return -EACCES; if (channel >= drvdata->numsp) return -EINVAL; ch_addr = stm_channel_addr(drvdata, channel); flags = (flags == STP_PACKET_TIMESTAMPED) ? STM_FLAG_TIMESTAMPED : 0; flags |= test_bit(channel, drvdata->chs.guaranteed) ? STM_FLAG_GUARANTEED : 0; if (size > drvdata->write_bytes) size = drvdata->write_bytes; else size = rounddown_pow_of_two(size); switch (packet) { case STP_PACKET_FLAG: ch_addr += stm_channel_off(STM_PKT_TYPE_FLAG, flags); /* * The generic STM core sets a size of '0' on flag packets. * As such send a flag packet of size '1' and tell the * core we did so. */ stm_send(ch_addr, payload, 1, drvdata->write_bytes); size = 1; break; case STP_PACKET_DATA: ch_addr += stm_channel_off(STM_PKT_TYPE_DATA, flags); stm_send(ch_addr, payload, size, drvdata->write_bytes); break; default: return -ENOTSUPP; } return size; }
static void qlcnic_get_channels(struct net_device *dev, struct ethtool_channels *channel) { int min; struct qlcnic_adapter *adapter = netdev_priv(dev); min = min_t(int, adapter->ahw->max_rx_ques, num_online_cpus()); channel->max_rx = rounddown_pow_of_two(min); channel->max_tx = adapter->ahw->max_tx_ques; channel->rx_count = adapter->max_sds_rings; channel->tx_count = adapter->ahw->max_tx_ques; }
static void mlx4_en_validate_params(struct mlx4_en_dev *mdev) { #if defined(__VMKLNX__) && defined(__VMKNETDDI_QUEUEOPS__) #ifdef __VMKERNEL_RSS_NETQ_SUPPORT__ if (!netq) { /* not using netq so no reason for netq RSS */ mlx4_warn(mdev, "netq is disabled, setting netq_num_rings_per_rss to 0\n"); netq_num_rings_per_rss = 0; } else { if (netq_num_rings_per_rss > MAX_NETQ_NUM_RINGS_PER_RSS) { mlx4_warn(mdev, "Unable to set netq_num_rings_per_rss to = %u " "since it is too high, Using %u instead\n", netq_num_rings_per_rss, MAX_NETQ_NUM_RINGS_PER_RSS); netq_num_rings_per_rss = MAX_NETQ_NUM_RINGS_PER_RSS; } else if (netq_num_rings_per_rss < MIN_NETQ_NUM_RINGS_PER_RSS) { mlx4_warn(mdev, "Unable to set netq_num_rings_per_rss to = %u " "since it is too low, Using %u instead\n", netq_num_rings_per_rss, MIN_NETQ_NUM_RINGS_PER_RSS); netq_num_rings_per_rss = MIN_NETQ_NUM_RINGS_PER_RSS; } /* netq_num_rings_per_rss must be even */ if ((netq_num_rings_per_rss % 2) != 0) { --netq_num_rings_per_rss; mlx4_warn(mdev, "netq_num_rings_per_rss must be of even value, " "setting it to %u\n", netq_num_rings_per_rss); } /* netq_num_rings_per_rss must be power of 2 */ if ((netq_num_rings_per_rss != 0) && (!is_power_of_2(netq_num_rings_per_rss))) { mlx4_warn(mdev, "netq_num_rings_per_rss must be power of 2 " "rounding down to %lu\n", rounddown_pow_of_two(netq_num_rings_per_rss)); netq_num_rings_per_rss = rounddown_pow_of_two(netq_num_rings_per_rss); } } #endif /* __VMKERNEL_RSS_NETQ_SUPPORT__ */ #endif /* NET QUEUE */ }
static int mlx4_en_fill_rx_buffers(struct mlx4_en_priv *priv) { struct mlx4_en_rx_ring *ring; int ring_ind; int buf_ind; int new_size; int err; for (buf_ind = 0; buf_ind < priv->prof->rx_ring_size; buf_ind++) { for (ring_ind = 0; ring_ind < priv->rx_ring_num; ring_ind++) { ring = priv->rx_ring[ring_ind]; err = mlx4_en_prepare_rx_desc(priv, ring, ring->actual_size); if (err) { if (ring->actual_size == 0) { en_err(priv, "Failed to allocate " "enough rx buffers\n"); return -ENOMEM; } else { new_size = rounddown_pow_of_two(ring->actual_size); en_warn(priv, "Only %d buffers allocated " "reducing ring size to %d\n", ring->actual_size, new_size); goto reduce_rings; } } ring->actual_size++; ring->prod++; } } return 0; reduce_rings: for (ring_ind = 0; ring_ind < priv->rx_ring_num; ring_ind++) { ring = priv->rx_ring[ring_ind]; while (ring->actual_size > new_size) { ring->actual_size--; ring->prod--; mlx4_en_free_buf(ring, ring->mbuf + ring->actual_size); } } return 0; }
static u32 mlx4_en_calc_rx_ring_num(struct mlx4_en_dev *mdev) { struct mlx4_dev *dev = mdev->dev; u32 rx_ring_num; if (!dev->caps.comp_pool) rx_ring_num = dev->caps.num_comp_vectors; else { rx_ring_num = dev->caps.comp_pool / dev->caps.num_ports; rx_ring_num = min_t(int, rx_ring_num, MAX_MSIX_P_PORT); } rx_ring_num = min_t(int, rx_ring_num, MAX_RX_RINGS); rx_ring_num = max_t(int, rx_ring_num, MIN_RX_RINGS); rx_ring_num = rounddown_pow_of_two(rx_ring_num); return rx_ring_num; }
void mlx4_en_set_num_rx_rings(struct mlx4_en_dev *mdev) { int i; int num_of_eqs; int num_rx_rings; struct mlx4_dev *dev = mdev->dev; mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_ETH) { num_of_eqs = max_t(int, MIN_RX_RINGS, min_t(int, mlx4_get_eqs_per_port(mdev->dev, i), DEF_RX_RINGS)); num_rx_rings = mlx4_low_memory_profile() ? MIN_RX_RINGS : num_of_eqs; mdev->profile.prof[i].rx_ring_num = rounddown_pow_of_two(num_rx_rings); }
static int mlx4_en_fill_rx_buffers(struct mlx4_en_priv *priv) { struct mlx4_en_rx_ring *ring; int ring_ind; int buf_ind; int new_size; for (buf_ind = 0; buf_ind < priv->prof->rx_ring_size; buf_ind++) { for (ring_ind = 0; ring_ind < priv->rx_ring_num; ring_ind++) { ring = priv->rx_ring[ring_ind]; if (mlx4_en_prepare_rx_desc(priv, ring, ring->actual_size, GFP_KERNEL | __GFP_COLD)) { if (ring->actual_size < MLX4_EN_MIN_RX_SIZE) { en_err(priv, "Failed to allocate enough rx buffers\n"); return -ENOMEM; } else { new_size = rounddown_pow_of_two(ring->actual_size); en_warn(priv, "Only %d buffers allocated reducing ring size to %d\n", ring->actual_size, new_size); goto reduce_rings; } } ring->actual_size++; ring->prod++; } } return 0; reduce_rings: for (ring_ind = 0; ring_ind < priv->rx_ring_num; ring_ind++) { ring = priv->rx_ring[ring_ind]; while (ring->actual_size > new_size) { ring->actual_size--; ring->prod--; mlx4_en_free_rx_desc(priv, ring, ring->actual_size); } } return 0; }
static u32 mlx4_en_calc_rings_per_rss(struct mlx4_en_dev *mdev, u32 total_rx_ring, u32 num_rss_queue, u32 requested) { u32 granted = requested; if (!requested) goto out; if (!num_rss_queue) { granted = 0; goto out; } /* 1 default ring + 1 regular ring + requested RSS rings */ if (total_rx_ring < (2 + num_rss_queue * requested)) { mlx4_warn(mdev, "not enough free EQs to open netq RSS with " "%u rings per RSS\n", requested); /* best effort to open with as many RSS rings as possible */ while (requested > 2) { requested = rounddown_pow_of_two(requested-1); /* 1 default ring + 1 regular ring + requested RSS rings */ if (total_rx_ring >= (2 + num_rss_queue * requested)) { mlx4_warn(mdev, "Setting netq_num_rings_per_rss to %u\n", requested); granted = requested; goto out; } } mlx4_warn(mdev, "disabling netq RSS\n"); granted = 0; } out: return granted; }
void mlx4_en_set_num_rx_rings(struct mlx4_en_dev *mdev) { int i; int num_of_eqs; int num_rx_rings; struct mlx4_dev *dev = mdev->dev; mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_ETH) { if (!dev->caps.comp_pool) num_of_eqs = max_t(int, MIN_RX_RINGS, min_t(int, dev->caps.num_comp_vectors, DEF_RX_RINGS)); else num_of_eqs = min_t(int, MAX_MSIX_P_PORT, dev->caps.comp_pool/ dev->caps.num_ports) - 1; num_rx_rings = mlx4_low_memory_profile() ? MIN_RX_RINGS : min_t(int, num_of_eqs, netif_get_num_default_rss_queues()); mdev->profile.prof[i].rx_ring_num = rounddown_pow_of_two(num_rx_rings); }
static int ramoops_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct ramoops_platform_data *pdata = dev->platform_data; struct ramoops_context *cxt = &oops_cxt; size_t dump_mem_sz; phys_addr_t paddr; int err = -EINVAL; if (dev_of_node(dev) && !pdata) { pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL); if (!pdata) { err = -ENOMEM; goto fail_out; } err = ramoops_parse_dt(pdev, pdata); if (err < 0) goto fail_out; } /* Only a single ramoops area allowed at a time, so fail extra * probes. */ if (cxt->max_dump_cnt) goto fail_out; if (!pdata->mem_size || (!pdata->record_size && !pdata->console_size && !pdata->ftrace_size && !pdata->pmsg_size)) { pr_err("The memory size and the record/console size must be " "non-zero\n"); goto fail_out; } if (pdata->record_size && !is_power_of_2(pdata->record_size)) pdata->record_size = rounddown_pow_of_two(pdata->record_size); if (pdata->console_size && !is_power_of_2(pdata->console_size)) pdata->console_size = rounddown_pow_of_two(pdata->console_size); if (pdata->ftrace_size && !is_power_of_2(pdata->ftrace_size)) pdata->ftrace_size = rounddown_pow_of_two(pdata->ftrace_size); if (pdata->pmsg_size && !is_power_of_2(pdata->pmsg_size)) pdata->pmsg_size = rounddown_pow_of_two(pdata->pmsg_size); cxt->size = pdata->mem_size; cxt->phys_addr = pdata->mem_address; cxt->memtype = pdata->mem_type; cxt->record_size = pdata->record_size; cxt->console_size = pdata->console_size; cxt->ftrace_size = pdata->ftrace_size; cxt->pmsg_size = pdata->pmsg_size; cxt->dump_oops = pdata->dump_oops; cxt->ecc_info = pdata->ecc_info; paddr = cxt->phys_addr; dump_mem_sz = cxt->size - cxt->console_size - cxt->ftrace_size - cxt->pmsg_size; err = ramoops_init_przs(dev, cxt, &paddr, dump_mem_sz); if (err) goto fail_out; err = ramoops_init_prz(dev, cxt, &cxt->cprz, &paddr, cxt->console_size, 0); if (err) goto fail_init_cprz; err = ramoops_init_prz(dev, cxt, &cxt->fprz, &paddr, cxt->ftrace_size, LINUX_VERSION_CODE); if (err) goto fail_init_fprz; err = ramoops_init_prz(dev, cxt, &cxt->mprz, &paddr, cxt->pmsg_size, 0); if (err) goto fail_init_mprz; cxt->pstore.data = cxt; /* * Console can handle any buffer size, so prefer LOG_LINE_MAX. If we * have to handle dumps, we must have at least record_size buffer. And * for ftrace, bufsize is irrelevant (if bufsize is 0, buf will be * ZERO_SIZE_PTR). */ if (cxt->console_size) cxt->pstore.bufsize = 1024; /* LOG_LINE_MAX */ cxt->pstore.bufsize = max(cxt->record_size, cxt->pstore.bufsize); cxt->pstore.buf = kmalloc(cxt->pstore.bufsize, GFP_KERNEL); if (!cxt->pstore.buf) { pr_err("cannot allocate pstore buffer\n"); err = -ENOMEM; goto fail_clear; } spin_lock_init(&cxt->pstore.buf_lock); cxt->pstore.flags = PSTORE_FLAGS_DMESG; if (cxt->console_size) cxt->pstore.flags |= PSTORE_FLAGS_CONSOLE; if (cxt->ftrace_size) cxt->pstore.flags |= PSTORE_FLAGS_FTRACE; if (cxt->pmsg_size) cxt->pstore.flags |= PSTORE_FLAGS_PMSG; err = pstore_register(&cxt->pstore); if (err) { pr_err("registering with pstore failed\n"); goto fail_buf; } /* * Update the module parameter variables as well so they are visible * through /sys/module/ramoops/parameters/ */ mem_size = pdata->mem_size; mem_address = pdata->mem_address; record_size = pdata->record_size; dump_oops = pdata->dump_oops; ramoops_console_size = pdata->console_size; ramoops_pmsg_size = pdata->pmsg_size; ramoops_ftrace_size = pdata->ftrace_size; pr_info("attached 0x%lx@0x%llx, ecc: %d/%d\n", cxt->size, (unsigned long long)cxt->phys_addr, cxt->ecc_info.ecc_size, cxt->ecc_info.block_size); return 0; fail_buf: kfree(cxt->pstore.buf); fail_clear: cxt->pstore.bufsize = 0; persistent_ram_free(cxt->mprz); fail_init_mprz: persistent_ram_free(cxt->fprz); fail_init_fprz: persistent_ram_free(cxt->cprz); fail_init_cprz: ramoops_free_przs(cxt); fail_out: return err; }
struct mlx4_dev *dev = mdev->dev; struct mlx4_en_port_profile *prof = NULL; mlx4_foreach_port(i, dev, MLX4_PORT_TYPE_ETH) { prof = &mdev->profile.prof[i]; if (!dev->caps.comp_pool) num_of_eqs = max_t(int, MIN_RX_RINGS, min_t(int, dev->caps.num_comp_vectors, DEF_RX_RINGS)); else num_of_eqs = min_t(int, MAX_MSIX_P_PORT, dev->caps.comp_pool/ dev->caps.num_ports); prof->rx_ring_num = rounddown_pow_of_two(num_of_eqs); } } #ifdef CONFIG_COMPAT_LRO_ENABLED static int mlx4_en_get_frag_hdr(struct skb_frag_struct *frags, void **mac_hdr, void **ip_hdr, void **tcpudp_hdr, u64 *hdr_flags, void *priv) { #if (LINUX_VERSION_CODE < KERNEL_VERSION(3,1,0)) *mac_hdr = page_address(frags->page) + frags->page_offset; #else *mac_hdr = page_address(skb_frag_page(frags)) + frags->page_offset; #endif *ip_hdr = *mac_hdr + ETH_HLEN; *tcpudp_hdr = (struct tcphdr *)(*ip_hdr + sizeof(struct iphdr));
static int ramoops_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct ramoops_platform_data *pdata = pdev->dev.platform_data; struct ramoops_context *cxt = &oops_cxt; size_t dump_mem_sz; phys_addr_t paddr; int err = -EINVAL; if (cxt->max_dump_cnt) goto fail_out; if (!pdata->mem_size || (!pdata->record_size && !pdata->console_size && !pdata->ftrace_size)) { pr_err("The memory size and the record/console size must be " "non-zero\n"); goto fail_out; } if (!is_power_of_2(pdata->mem_size)) pdata->mem_size = rounddown_pow_of_two(pdata->mem_size); if (!is_power_of_2(pdata->record_size)) pdata->record_size = rounddown_pow_of_two(pdata->record_size); if (!is_power_of_2(pdata->console_size)) pdata->console_size = rounddown_pow_of_two(pdata->console_size); if (!is_power_of_2(pdata->ftrace_size)) pdata->ftrace_size = rounddown_pow_of_two(pdata->ftrace_size); cxt->dump_read_cnt = 0; cxt->size = pdata->mem_size; cxt->phys_addr = pdata->mem_address; cxt->record_size = pdata->record_size; cxt->console_size = pdata->console_size; cxt->ftrace_size = pdata->ftrace_size; cxt->dump_oops = pdata->dump_oops; cxt->ecc_info = pdata->ecc_info; paddr = cxt->phys_addr; dump_mem_sz = cxt->size - cxt->console_size - cxt->ftrace_size; err = ramoops_init_przs(dev, cxt, &paddr, dump_mem_sz); if (err) goto fail_out; err = ramoops_init_prz(dev, cxt, &cxt->cprz, &paddr, cxt->console_size, 0); if (err) goto fail_init_cprz; err = ramoops_init_prz(dev, cxt, &cxt->fprz, &paddr, cxt->ftrace_size, LINUX_VERSION_CODE); if (err) goto fail_init_fprz; if (!cxt->przs && !cxt->cprz && !cxt->fprz) { pr_err("memory size too small, minimum is %zu\n", cxt->console_size + cxt->record_size + cxt->ftrace_size); err = -EINVAL; goto fail_cnt; } cxt->pstore.data = cxt; if (cxt->console_size) cxt->pstore.bufsize = 1024; cxt->pstore.bufsize = max(cxt->record_size, cxt->pstore.bufsize); cxt->pstore.buf = kmalloc(cxt->pstore.bufsize, GFP_KERNEL); spin_lock_init(&cxt->pstore.buf_lock); if (!cxt->pstore.buf) { pr_err("cannot allocate pstore buffer\n"); err = -ENOMEM; goto fail_clear; } err = pstore_register(&cxt->pstore); if (err) { pr_err("registering with pstore failed\n"); goto fail_buf; } mem_size = pdata->mem_size; mem_address = pdata->mem_address; record_size = pdata->record_size; dump_oops = pdata->dump_oops; pr_info("attached 0x%lx@0x%llx, ecc: %d/%d\n", cxt->size, (unsigned long long)cxt->phys_addr, cxt->ecc_info.ecc_size, cxt->ecc_info.block_size); #if defined(CONFIG_HTC_DEBUG_HBRAMLOG) if (cxt->console_size) bldr_log_init(); #endif return 0; fail_buf: kfree(cxt->pstore.buf); fail_clear: cxt->pstore.bufsize = 0; cxt->max_dump_cnt = 0; fail_cnt: kfree(cxt->fprz); fail_init_fprz: kfree(cxt->cprz); fail_init_cprz: ramoops_free_przs(cxt); fail_out: return err; }
static int ramoops_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct ramoops_platform_data *pdata = pdev->dev.platform_data; struct ramoops_context *cxt = &oops_cxt; size_t dump_mem_sz; phys_addr_t paddr; int err = -EINVAL; /* Only a single ramoops area allowed at a time, so fail extra * probes. */ if (cxt->max_dump_cnt) goto fail_out; if (!pdata->mem_size || (!pdata->record_size && !pdata->console_size && !pdata->ftrace_size)) { pr_err("The memory size and the record/console size must be " "non-zero\n"); goto fail_out; } #ifndef CONFIG_PSTORE_RAM_SIZE_UNCHECK if (!is_power_of_2(pdata->mem_size)) pdata->mem_size = rounddown_pow_of_two(pdata->mem_size); if (!is_power_of_2(pdata->record_size)) pdata->record_size = rounddown_pow_of_two(pdata->record_size); if (!is_power_of_2(pdata->console_size)) pdata->console_size = rounddown_pow_of_two(pdata->console_size); if (!is_power_of_2(pdata->ftrace_size)) pdata->ftrace_size = rounddown_pow_of_two(pdata->ftrace_size); #endif cxt->dump_read_cnt = 0; cxt->size = pdata->mem_size; cxt->phys_addr = pdata->mem_address; cxt->record_size = pdata->record_size; cxt->console_size = pdata->console_size; cxt->ftrace_size = pdata->ftrace_size; cxt->dump_oops = pdata->dump_oops; cxt->ecc_info = pdata->ecc_info; paddr = cxt->phys_addr; dump_mem_sz = cxt->size - cxt->console_size - cxt->ftrace_size; err = ramoops_init_przs(dev, cxt, &paddr, dump_mem_sz); if (err) goto fail_out; err = ramoops_init_prz(dev, cxt, &cxt->cprz, &paddr, cxt->console_size, 0); if (err) goto fail_init_cprz; err = ramoops_init_prz(dev, cxt, &cxt->fprz, &paddr, cxt->ftrace_size, LINUX_VERSION_CODE); if (err) goto fail_init_fprz; if (!cxt->przs && !cxt->cprz && !cxt->fprz) { pr_err("memory size too small, minimum is %zu\n", cxt->console_size + cxt->record_size + cxt->ftrace_size); err = -EINVAL; goto fail_cnt; } cxt->pstore.data = cxt; /* * Console can handle any buffer size, so prefer LOG_LINE_MAX. If we * have to handle dumps, we must have at least record_size buffer. And * for ftrace, bufsize is irrelevant (if bufsize is 0, buf will be * ZERO_SIZE_PTR). */ if (cxt->console_size) cxt->pstore.bufsize = 1024; /* LOG_LINE_MAX */ cxt->pstore.bufsize = max(cxt->record_size, cxt->pstore.bufsize); cxt->pstore.buf = kmalloc(cxt->pstore.bufsize, GFP_KERNEL); spin_lock_init(&cxt->pstore.buf_lock); if (!cxt->pstore.buf) { pr_err("cannot allocate pstore buffer\n"); err = -ENOMEM; goto fail_clear; } err = pstore_register(&cxt->pstore); if (err) { pr_err("registering with pstore failed\n"); goto fail_buf; } /* * Update the module parameter variables as well so they are visible * through /sys/module/ramoops/parameters/ */ mem_size = pdata->mem_size; mem_address = pdata->mem_address; record_size = pdata->record_size; dump_oops = pdata->dump_oops; pr_info("attached 0x%lx@0x%llx, ecc: %d/%d\n", cxt->size, (unsigned long long)cxt->phys_addr, cxt->ecc_info.ecc_size, cxt->ecc_info.block_size); return 0; fail_buf: kfree(cxt->pstore.buf); fail_clear: cxt->pstore.bufsize = 0; cxt->max_dump_cnt = 0; fail_cnt: kfree(cxt->fprz); fail_init_fprz: kfree(cxt->cprz); fail_init_cprz: ramoops_free_przs(cxt); fail_out: return err; }