Example #1
0
static void
lio_ethtool_get_channels(struct net_device *dev,
			 struct ethtool_channels *channel)
{
	struct lio *lio = GET_LIO(dev);
	struct octeon_device *oct = lio->oct_dev;
	u32 max_rx = 0, max_tx = 0, tx_count = 0, rx_count = 0;

	if (OCTEON_CN6XXX(oct)) {
		struct octeon_config *conf6x = CHIP_FIELD(oct, cn6xxx, conf);

		max_rx = CFG_GET_OQ_MAX_Q(conf6x);
		max_tx = CFG_GET_IQ_MAX_Q(conf6x);
		rx_count = CFG_GET_NUM_RXQS_NIC_IF(conf6x, lio->ifidx);
		tx_count = CFG_GET_NUM_TXQS_NIC_IF(conf6x, lio->ifidx);
	}

	channel->max_rx = max_rx;
	channel->max_tx = max_tx;
	channel->rx_count = rx_count;
	channel->tx_count = tx_count;
}
Example #2
0
static void
lio_ethtool_get_ringparam(struct net_device *netdev,
			  struct ethtool_ringparam *ering)
{
	struct lio *lio = GET_LIO(netdev);
	struct octeon_device *oct = lio->oct_dev;
	u32 tx_max_pending = 0, rx_max_pending = 0, tx_pending = 0,
	    rx_pending = 0;

	if (OCTEON_CN6XXX(oct)) {
		struct octeon_config *conf6x = CHIP_FIELD(oct, cn6xxx, conf);

		tx_max_pending = CN6XXX_MAX_IQ_DESCRIPTORS;
		rx_max_pending = CN6XXX_MAX_OQ_DESCRIPTORS;
		rx_pending = CFG_GET_NUM_RX_DESCS_NIC_IF(conf6x, lio->ifidx);
		tx_pending = CFG_GET_NUM_TX_DESCS_NIC_IF(conf6x, lio->ifidx);
	}

	if (lio->mtu > OCTNET_DEFAULT_FRM_SIZE) {
		ering->rx_pending = 0;
		ering->rx_max_pending = 0;
		ering->rx_mini_pending = 0;
		ering->rx_jumbo_pending = rx_pending;
		ering->rx_mini_max_pending = 0;
		ering->rx_jumbo_max_pending = rx_max_pending;
	} else {
		ering->rx_pending = rx_pending;
		ering->rx_max_pending = rx_max_pending;
		ering->rx_mini_pending = 0;
		ering->rx_jumbo_pending = 0;
		ering->rx_mini_max_pending = 0;
		ering->rx_jumbo_max_pending = 0;
	}

	ering->tx_pending = tx_pending;
	ering->tx_max_pending = tx_max_pending;
}
Example #3
0
int octeon_delete_instr_queue(struct octeon_device *oct, u32 iq_no)
{
	u64 desc_size = 0, q_size;
	struct octeon_instr_queue *iq = oct->instr_queue[iq_no];

	cancel_delayed_work_sync(&oct->check_db_wq[iq_no].wk.work);
	flush_workqueue(oct->check_db_wq[iq_no].wq);
	destroy_workqueue(oct->check_db_wq[iq_no].wq);

	if (OCTEON_CN6XXX(oct))
		desc_size =
		    CFG_GET_IQ_INSTR_TYPE(CHIP_FIELD(oct, cn6xxx, conf));

	if (iq->request_list)
		vfree(iq->request_list);

	if (iq->base_addr) {
		q_size = iq->max_count * desc_size;
		lio_dma_free(oct, (u32)q_size, iq->base_addr,
			     iq->base_addr_dma);
		return 0;
	}
	return 1;
}
Example #4
0
/* Return 0 on success, 1 on failure */
int octeon_init_instr_queue(struct octeon_device *oct,
			    u32 iq_no, u32 num_descs)
{
	struct octeon_instr_queue *iq;
	struct octeon_iq_config *conf = NULL;
	u32 q_size;
	struct cavium_wq *db_wq;

	if (OCTEON_CN6XXX(oct))
		conf = &(CFG_GET_IQ_CFG(CHIP_FIELD(oct, cn6xxx, conf)));

	if (!conf) {
		dev_err(&oct->pci_dev->dev, "Unsupported Chip %x\n",
			oct->chip_id);
		return 1;
	}

	if (num_descs & (num_descs - 1)) {
		dev_err(&oct->pci_dev->dev,
			"Number of descriptors for instr queue %d not in power of 2.\n",
			iq_no);
		return 1;
	}

	q_size = (u32)conf->instr_type * num_descs;

	iq = oct->instr_queue[iq_no];

	iq->base_addr = lio_dma_alloc(oct, q_size,
				      (dma_addr_t *)&iq->base_addr_dma);
	if (!iq->base_addr) {
		dev_err(&oct->pci_dev->dev, "Cannot allocate memory for instr queue %d\n",
			iq_no);
		return 1;
	}

	iq->max_count = num_descs;

	/* Initialize a list to holds requests that have been posted to Octeon
	 * but has yet to be fetched by octeon
	 */
	iq->request_list = vmalloc(sizeof(*iq->request_list) * num_descs);
	if (!iq->request_list) {
		lio_dma_free(oct, q_size, iq->base_addr, iq->base_addr_dma);
		dev_err(&oct->pci_dev->dev, "Alloc failed for IQ[%d] nr free list\n",
			iq_no);
		return 1;
	}

	memset(iq->request_list, 0, sizeof(*iq->request_list) * num_descs);

	dev_dbg(&oct->pci_dev->dev, "IQ[%d]: base: %p basedma: %llx count: %d\n",
		iq_no, iq->base_addr, iq->base_addr_dma, iq->max_count);

	iq->iq_no = iq_no;
	iq->fill_threshold = (u32)conf->db_min;
	iq->fill_cnt = 0;
	iq->host_write_index = 0;
	iq->octeon_read_index = 0;
	iq->flush_index = 0;
	iq->last_db_time = 0;
	iq->do_auto_flush = 1;
	iq->db_timeout = (u32)conf->db_timeout;
	atomic_set(&iq->instr_pending, 0);

	/* Initialize the spinlock for this instruction queue */
	spin_lock_init(&iq->lock);

	oct->io_qmask.iq |= (1 << iq_no);

	/* Set the 32B/64B mode for each input queue */
	oct->io_qmask.iq64B |= ((conf->instr_type == 64) << iq_no);
	iq->iqcmd_64B = (conf->instr_type == 64);

	oct->fn_list.setup_iq_regs(oct, iq_no);

	oct->check_db_wq[iq_no].wq = create_workqueue("check_iq_db");
	if (!oct->check_db_wq[iq_no].wq) {
		lio_dma_free(oct, q_size, iq->base_addr, iq->base_addr_dma);
		dev_err(&oct->pci_dev->dev, "check db wq create failed for iq %d\n",
			iq_no);
		return 1;
	}

	db_wq = &oct->check_db_wq[iq_no];

	INIT_DELAYED_WORK(&db_wq->wk.work, check_db_timeout);
	db_wq->wk.ctxptr = oct;
	db_wq->wk.ctxul = iq_no;
	queue_delayed_work(db_wq->wq, &db_wq->wk.work, msecs_to_jiffies(1));

	return 0;
}