static int sn_alloc_queues(struct sn_device *dev, void *rings, uint64_t rings_size, struct tx_queue_opts *txq_opts, struct rx_queue_opts *rxq_opts) { struct sn_queue *queue; char *p = rings; void *memchunk; int num_queues; int i; int ret; ret = netif_set_real_num_tx_queues(dev->netdev, dev->num_txq); if (ret) { log_err("netif_set_real_num_tx_queues() failed\n"); return ret; } ret = netif_set_real_num_rx_queues(dev->netdev, dev->num_rxq); if (ret) { log_err("netif_set_real_num_rx_queues() failed\n"); return ret; } num_queues = dev->num_txq + dev->num_rxq; memchunk = kzalloc(sizeof(struct sn_queue) * num_queues, GFP_KERNEL); if (!memchunk) return -ENOMEM; queue = memchunk; for (i = 0; i < dev->num_txq; i++) { dev->tx_queues[i] = queue; queue->dev = dev; queue->queue_id = i; queue->tx.opts = *txq_opts; queue->tx.netdev_txq = netdev_get_tx_queue(dev->netdev, i); queue->drv_to_sn = (struct llring *)p; p += llring_bytes(queue->drv_to_sn); queue->sn_to_drv = (struct llring *)p; p += llring_bytes(queue->sn_to_drv); queue++; } for (i = 0; i < dev->num_rxq; i++) { dev->rx_queues[i] = queue; queue->dev = dev; queue->queue_id = i; queue->rx.opts = *rxq_opts; queue->rx.rx_regs = (struct sn_rxq_registers *)p; p += sizeof(struct sn_rxq_registers); queue->drv_to_sn = (struct llring *)p; p += llring_bytes(queue->drv_to_sn); queue->sn_to_drv = (struct llring *)p; p += llring_bytes(queue->sn_to_drv); queue++; } if ((uintptr_t)p != (uintptr_t)rings + rings_size) { log_err("Invalid ring space size: %llu, not %llu, at%p)\n", rings_size, (uint64_t)((uintptr_t)p - (uintptr_t)rings), rings); kfree(memchunk); return -EFAULT; } for (i = 0; i < dev->num_rxq; i++) { netif_napi_add(dev->netdev, &dev->rx_queues[i]->rx.napi, sn_poll, NAPI_POLL_WEIGHT); #ifdef CONFIG_NET_RX_BUSY_POLL napi_hash_add(&dev->rx_queues[i]->rx.napi); #endif spin_lock_init(&dev->rx_queues[i]->rx.lock); } sn_test_cache_alignment(dev); return 0; }
static int sn_alloc_queues(struct sn_device *dev, void *rings, u64 rings_size) { struct sn_queue *queue; char *p = rings; void *memchunk; int num_queues; int i; int ret; ret = netif_set_real_num_tx_queues(dev->netdev, dev->num_txq); if (ret) { log_err("netif_set_real_num_tx_queues() failed\n"); return ret; } ret = netif_set_real_num_rx_queues(dev->netdev, dev->num_rxq); if (ret) { log_err("netif_set_real_num_rx_queues() failed\n"); return ret; } num_queues = dev->num_txq + dev->num_rxq; memchunk = kzalloc(sizeof(struct sn_queue) * num_queues, GFP_KERNEL); if (!memchunk) return -ENOMEM; queue = memchunk; for (i = 0; i < dev->num_txq; i++) { dev->tx_queues[i] = queue; queue->dev = dev; queue->queue_id = i; queue->is_rx = false; queue->drv_to_sn = (struct llring *)p; p += llring_bytes(queue->drv_to_sn); queue->sn_to_drv = (struct llring *)p; p += llring_bytes(queue->sn_to_drv); queue++; } for (i = 0; i < dev->num_rxq; i++) { dev->rx_queues[i] = queue; queue->dev = dev; queue->queue_id = i; queue->is_rx = true; sn_stack_init(&queue->ready_tx_meta); queue->rx_regs = (struct sn_rxq_registers *)p; p += sizeof(struct sn_rxq_registers); queue->drv_to_sn = (struct llring *)p; p += llring_bytes(queue->drv_to_sn); queue->sn_to_drv = (struct llring *)p; p += llring_bytes(queue->sn_to_drv); queue++; } if ((uint64_t)p != (uint64_t)rings + rings_size) { log_err("Invalid ring space size: %llu, not %llu, at%p)\n", rings_size, (uint64_t)p - (uint64_t)rings, rings); kfree(memchunk); return -EFAULT; } for (i = 0; i < dev->num_rxq; i++) { netif_napi_add(dev->netdev, &dev->rx_queues[i]->napi, sn_poll, NAPI_POLL_WEIGHT); napi_hash_add(&dev->rx_queues[i]->napi); spin_lock_init(&dev->rx_queues[i]->lock); } sn_test_cache_alignment(dev); return 0; }