Beispiel #1
0
/**
 * octeon_mbox_process_cmd:
 * @mbox: Pointer mailbox
 * @mbox_cmd: Pointer to command received
 *
 * Process the cmd received in mbox
 */
static int octeon_mbox_process_cmd(struct octeon_mbox *mbox,
				   struct octeon_mbox_cmd *mbox_cmd)
{
	struct octeon_device *oct = mbox->oct_dev;

	switch (mbox_cmd->msg.s.cmd) {
	case OCTEON_VF_ACTIVE:
		dev_dbg(&oct->pci_dev->dev, "got vfactive sending data back\n");
		mbox_cmd->msg.s.type = OCTEON_MBOX_RESPONSE;
		mbox_cmd->msg.s.resp_needed = 1;
		mbox_cmd->msg.s.len = 2;
		mbox_cmd->data[0] = 0; /* VF version is in mbox_cmd->data[0] */
		((struct lio_version *)&mbox_cmd->data[0])->major =
			LIQUIDIO_BASE_MAJOR_VERSION;
		((struct lio_version *)&mbox_cmd->data[0])->minor =
			LIQUIDIO_BASE_MINOR_VERSION;
		((struct lio_version *)&mbox_cmd->data[0])->micro =
			LIQUIDIO_BASE_MICRO_VERSION;
		memcpy(mbox_cmd->msg.s.params, (uint8_t *)&oct->pfvf_hsword, 6);
		/* Sending core cofig info to the corresponding active VF.*/
		octeon_mbox_write(oct, mbox_cmd);
		break;

	case OCTEON_VF_FLR_REQUEST:
		dev_info(&oct->pci_dev->dev,
			 "got a request for FLR from VF that owns DPI ring %u\n",
			 mbox->q_no);
		pcie_capability_set_word(
			oct->sriov_info.dpiring_to_vfpcidev_lut[mbox->q_no],
			PCI_EXP_DEVCTL, PCI_EXP_DEVCTL_BCR_FLR);
		break;

	case OCTEON_PF_CHANGED_VF_MACADDR:
		if (OCTEON_CN23XX_VF(oct))
			octeon_pf_changed_vf_macaddr(oct,
						     mbox_cmd->msg.s.params);
		break;

	case OCTEON_GET_VF_STATS:
		dev_dbg(&oct->pci_dev->dev, "Got VF stats request. Sending data back\n");
		mbox_cmd->msg.s.type = OCTEON_MBOX_RESPONSE;
		mbox_cmd->msg.s.resp_needed = 1;
		mbox_cmd->msg.s.len = 1 +
			sizeof(struct oct_vf_stats) / sizeof(u64);
		get_vf_stats(oct, (struct oct_vf_stats *)mbox_cmd->data);
		octeon_mbox_write(oct, mbox_cmd);
		break;
	default:
		break;
	}
	return 0;
}
Beispiel #2
0
/**
 * \brief callback when receive interrupt occurs and we are in NAPI mode
 * @param arg pointer to octeon output queue
 */
static void liquidio_napi_drv_callback(void *arg)
{
	struct octeon_device *oct;
	struct octeon_droq *droq = arg;
	int this_cpu = smp_processor_id();

	oct = droq->oct_dev;

	if (OCTEON_CN23XX_PF(oct) || OCTEON_CN23XX_VF(oct) ||
	    droq->cpu_id == this_cpu) {
		napi_schedule_irqoff(&droq->napi);
	} else {
		call_single_data_t *csd = &droq->csd;

		csd->func = napi_schedule_wrapper;
		csd->info = &droq->napi;
		csd->flags = 0;

		smp_call_function_single_async(droq->cpu_id, csd);
	}
}
Beispiel #3
0
static
int liquidio_schedule_msix_droq_pkt_handler(struct octeon_droq *droq, u64 ret)
{
	struct octeon_device *oct = droq->oct_dev;
	struct octeon_device_priv *oct_priv =
	    (struct octeon_device_priv *)oct->priv;

	if (droq->ops.poll_mode) {
		droq->ops.napi_fn(droq);
	} else {
		if (ret & MSIX_PO_INT) {
			if (OCTEON_CN23XX_VF(oct))
				dev_err(&oct->pci_dev->dev,
					"should not come here should not get rx when poll mode = 0 for vf\n");
			tasklet_schedule(&oct_priv->droq_tasklet);
			return 1;
		}
		/* this will be flushed periodically by check iq db */
		if (ret & MSIX_PI_INT)
			return 0;
	}

	return 0;
}
Beispiel #4
0
/**
 * \brief Setup input and output queues
 * @param octeon_dev octeon device
 * @param ifidx Interface index
 *
 * Note: Queues are with respect to the octeon device. Thus
 * an input queue is for egress packets, and output queues
 * are for ingress packets.
 */
int liquidio_setup_io_queues(struct octeon_device *octeon_dev, int ifidx,
			     u32 num_iqs, u32 num_oqs)
{
	struct octeon_droq_ops droq_ops;
	struct net_device *netdev;
	struct octeon_droq *droq;
	struct napi_struct *napi;
	int cpu_id_modulus;
	int num_tx_descs;
	struct lio *lio;
	int retval = 0;
	int q, q_no;
	int cpu_id;

	netdev = octeon_dev->props[ifidx].netdev;

	lio = GET_LIO(netdev);

	memset(&droq_ops, 0, sizeof(struct octeon_droq_ops));

	droq_ops.fptr = liquidio_push_packet;
	droq_ops.farg = netdev;

	droq_ops.poll_mode = 1;
	droq_ops.napi_fn = liquidio_napi_drv_callback;
	cpu_id = 0;
	cpu_id_modulus = num_present_cpus();

	/* set up DROQs. */
	for (q = 0; q < num_oqs; q++) {
		q_no = lio->linfo.rxpciq[q].s.q_no;
		dev_dbg(&octeon_dev->pci_dev->dev,
			"%s index:%d linfo.rxpciq.s.q_no:%d\n",
			__func__, q, q_no);
		retval = octeon_setup_droq(
		    octeon_dev, q_no,
		    CFG_GET_NUM_RX_DESCS_NIC_IF(octeon_get_conf(octeon_dev),
						lio->ifidx),
		    CFG_GET_NUM_RX_BUF_SIZE_NIC_IF(octeon_get_conf(octeon_dev),
						   lio->ifidx),
		    NULL);
		if (retval) {
			dev_err(&octeon_dev->pci_dev->dev,
				"%s : Runtime DROQ(RxQ) creation failed.\n",
				__func__);
			return 1;
		}

		droq = octeon_dev->droq[q_no];
		napi = &droq->napi;
		dev_dbg(&octeon_dev->pci_dev->dev, "netif_napi_add netdev:%llx oct:%llx\n",
			(u64)netdev, (u64)octeon_dev);
		netif_napi_add(netdev, napi, liquidio_napi_poll, 64);

		/* designate a CPU for this droq */
		droq->cpu_id = cpu_id;
		cpu_id++;
		if (cpu_id >= cpu_id_modulus)
			cpu_id = 0;

		octeon_register_droq_ops(octeon_dev, q_no, &droq_ops);
	}

	if (OCTEON_CN23XX_PF(octeon_dev) || OCTEON_CN23XX_VF(octeon_dev)) {
		/* 23XX PF/VF can send/recv control messages (via the first
		 * PF/VF-owned droq) from the firmware even if the ethX
		 * interface is down, so that's why poll_mode must be off
		 * for the first droq.
		 */
		octeon_dev->droq[0]->ops.poll_mode = 0;
	}

	/* set up IQs. */
	for (q = 0; q < num_iqs; q++) {
		num_tx_descs = CFG_GET_NUM_TX_DESCS_NIC_IF(
		    octeon_get_conf(octeon_dev), lio->ifidx);
		retval = octeon_setup_iq(octeon_dev, ifidx, q,
					 lio->linfo.txpciq[q], num_tx_descs,
					 netdev_get_tx_queue(netdev, q));
		if (retval) {
			dev_err(&octeon_dev->pci_dev->dev,
				" %s : Runtime IQ(TxQ) creation failed.\n",
				__func__);
			return 1;
		}

		/* XPS */
		if (!OCTEON_CN23XX_VF(octeon_dev) && octeon_dev->msix_on &&
		    octeon_dev->ioq_vector) {
			struct octeon_ioq_vector    *ioq_vector;

			ioq_vector = &octeon_dev->ioq_vector[q];
			netif_set_xps_queue(netdev,
					    &ioq_vector->affinity_mask,
					    ioq_vector->iq_index);
		}
	}

	return 0;
}
Beispiel #5
0
/**
 * \brief Setup interrupt for octeon device
 * @param oct octeon device
 *
 *  Enable interrupt in Octeon device as given in the PCI interrupt mask.
 */
int octeon_setup_interrupt(struct octeon_device *oct, u32 num_ioqs)
{
	struct msix_entry *msix_entries;
	char *queue_irq_names = NULL;
	int i, num_interrupts = 0;
	int num_alloc_ioq_vectors;
	char *aux_irq_name = NULL;
	int num_ioq_vectors;
	int irqret, err;

	if (oct->msix_on) {
		oct->num_msix_irqs = num_ioqs;
		if (OCTEON_CN23XX_PF(oct)) {
			num_interrupts = MAX_IOQ_INTERRUPTS_PER_PF + 1;

			/* one non ioq interrupt for handling
			 * sli_mac_pf_int_sum
			 */
			oct->num_msix_irqs += 1;
		} else if (OCTEON_CN23XX_VF(oct)) {
			num_interrupts = MAX_IOQ_INTERRUPTS_PER_VF;
		}

		/* allocate storage for the names assigned to each irq */
		oct->irq_name_storage =
			kcalloc(num_interrupts, INTRNAMSIZ, GFP_KERNEL);
		if (!oct->irq_name_storage) {
			dev_err(&oct->pci_dev->dev, "Irq name storage alloc failed...\n");
			return -ENOMEM;
		}

		queue_irq_names = oct->irq_name_storage;

		if (OCTEON_CN23XX_PF(oct))
			aux_irq_name = &queue_irq_names
				[IRQ_NAME_OFF(MAX_IOQ_INTERRUPTS_PER_PF)];

		oct->msix_entries = kcalloc(oct->num_msix_irqs,
					    sizeof(struct msix_entry),
					    GFP_KERNEL);
		if (!oct->msix_entries) {
			dev_err(&oct->pci_dev->dev, "Memory Alloc failed...\n");
			kfree(oct->irq_name_storage);
			oct->irq_name_storage = NULL;
			return -ENOMEM;
		}

		msix_entries = (struct msix_entry *)oct->msix_entries;

		/*Assumption is that pf msix vectors start from pf srn to pf to
		 * trs and not from 0. if not change this code
		 */
		if (OCTEON_CN23XX_PF(oct)) {
			for (i = 0; i < oct->num_msix_irqs - 1; i++)
				msix_entries[i].entry =
					oct->sriov_info.pf_srn + i;

			msix_entries[oct->num_msix_irqs - 1].entry =
				oct->sriov_info.trs;
		} else if (OCTEON_CN23XX_VF(oct)) {
			for (i = 0; i < oct->num_msix_irqs; i++)
				msix_entries[i].entry = i;
		}
		num_alloc_ioq_vectors = pci_enable_msix_range(
						oct->pci_dev, msix_entries,
						oct->num_msix_irqs,
						oct->num_msix_irqs);
		if (num_alloc_ioq_vectors < 0) {
			dev_err(&oct->pci_dev->dev, "unable to Allocate MSI-X interrupts\n");
			kfree(oct->msix_entries);
			oct->msix_entries = NULL;
			kfree(oct->irq_name_storage);
			oct->irq_name_storage = NULL;
			return num_alloc_ioq_vectors;
		}

		dev_dbg(&oct->pci_dev->dev, "OCTEON: Enough MSI-X interrupts are allocated...\n");

		num_ioq_vectors = oct->num_msix_irqs;
		/** For PF, there is one non-ioq interrupt handler */
		if (OCTEON_CN23XX_PF(oct)) {
			num_ioq_vectors -= 1;

			snprintf(aux_irq_name, INTRNAMSIZ,
				 "LiquidIO%u-pf%u-aux", oct->octeon_id,
				 oct->pf_num);
			irqret = request_irq(
					msix_entries[num_ioq_vectors].vector,
					liquidio_legacy_intr_handler, 0,
					aux_irq_name, oct);
			if (irqret) {
				dev_err(&oct->pci_dev->dev,
					"Request_irq failed for MSIX interrupt Error: %d\n",
					irqret);
				pci_disable_msix(oct->pci_dev);
				kfree(oct->msix_entries);
				kfree(oct->irq_name_storage);
				oct->irq_name_storage = NULL;
				oct->msix_entries = NULL;
				return irqret;
			}
		}
		for (i = 0 ; i < num_ioq_vectors ; i++) {
			if (OCTEON_CN23XX_PF(oct))
				snprintf(&queue_irq_names[IRQ_NAME_OFF(i)],
					 INTRNAMSIZ, "LiquidIO%u-pf%u-rxtx-%u",
					 oct->octeon_id, oct->pf_num, i);

			if (OCTEON_CN23XX_VF(oct))
				snprintf(&queue_irq_names[IRQ_NAME_OFF(i)],
					 INTRNAMSIZ, "LiquidIO%u-vf%u-rxtx-%u",
					 oct->octeon_id, oct->vf_num, i);

			irqret = request_irq(msix_entries[i].vector,
					     liquidio_msix_intr_handler, 0,
					     &queue_irq_names[IRQ_NAME_OFF(i)],
					     &oct->ioq_vector[i]);

			if (irqret) {
				dev_err(&oct->pci_dev->dev,
					"Request_irq failed for MSIX interrupt Error: %d\n",
					irqret);
				/** Freeing the non-ioq irq vector here . */
				free_irq(msix_entries[num_ioq_vectors].vector,
					 oct);

				while (i) {
					i--;
					/** clearing affinity mask. */
					irq_set_affinity_hint(
						      msix_entries[i].vector,
						      NULL);
					free_irq(msix_entries[i].vector,
						 &oct->ioq_vector[i]);
				}
				pci_disable_msix(oct->pci_dev);
				kfree(oct->msix_entries);
				kfree(oct->irq_name_storage);
				oct->irq_name_storage = NULL;
				oct->msix_entries = NULL;
				return irqret;
			}
			oct->ioq_vector[i].vector = msix_entries[i].vector;
			/* assign the cpu mask for this msix interrupt vector */
			irq_set_affinity_hint(msix_entries[i].vector,
					      &oct->ioq_vector[i].affinity_mask
					      );
		}
		dev_dbg(&oct->pci_dev->dev, "OCTEON[%d]: MSI-X enabled\n",
			oct->octeon_id);
	} else {
		err = pci_enable_msi(oct->pci_dev);
		if (err)
			dev_warn(&oct->pci_dev->dev, "Reverting to legacy interrupts. Error: %d\n",
				 err);
		else
			oct->flags |= LIO_FLAG_MSI_ENABLED;

		/* allocate storage for the names assigned to the irq */
		oct->irq_name_storage = kcalloc(1, INTRNAMSIZ, GFP_KERNEL);
		if (!oct->irq_name_storage)
			return -ENOMEM;

		queue_irq_names = oct->irq_name_storage;

		if (OCTEON_CN23XX_PF(oct))
			snprintf(&queue_irq_names[IRQ_NAME_OFF(0)], INTRNAMSIZ,
				 "LiquidIO%u-pf%u-rxtx-%u",
				 oct->octeon_id, oct->pf_num, 0);

		if (OCTEON_CN23XX_VF(oct))
			snprintf(&queue_irq_names[IRQ_NAME_OFF(0)], INTRNAMSIZ,
				 "LiquidIO%u-vf%u-rxtx-%u",
				 oct->octeon_id, oct->vf_num, 0);

		irqret = request_irq(oct->pci_dev->irq,
				     liquidio_legacy_intr_handler,
				     IRQF_SHARED,
				     &queue_irq_names[IRQ_NAME_OFF(0)], oct);
		if (irqret) {
			if (oct->flags & LIO_FLAG_MSI_ENABLED)
				pci_disable_msi(oct->pci_dev);
			dev_err(&oct->pci_dev->dev, "Request IRQ failed with code: %d\n",
				irqret);
			kfree(oct->irq_name_storage);
			oct->irq_name_storage = NULL;
			return irqret;
		}
	}
	return 0;
}
Beispiel #6
0
int lio_process_ordered_list(struct octeon_device *octeon_dev,
                             u32 force_quit)
{
    struct octeon_response_list *ordered_sc_list;
    struct octeon_soft_command *sc;
    int request_complete = 0;
    int resp_to_process = MAX_ORD_REQS_TO_PROCESS;
    u32 status;
    u64 status64;
    struct octeon_instr_rdp *rdp;
    u64 rptr;

    ordered_sc_list = &octeon_dev->response_list[OCTEON_ORDERED_SC_LIST];

    do {
        spin_lock_bh(&ordered_sc_list->lock);

        if (ordered_sc_list->head.next == &ordered_sc_list->head) {
            spin_unlock_bh(&ordered_sc_list->lock);
            return 1;
        }

        sc = (struct octeon_soft_command *)ordered_sc_list->
             head.next;
        if (OCTEON_CN23XX_PF(octeon_dev) ||
                OCTEON_CN23XX_VF(octeon_dev)) {
            rdp = (struct octeon_instr_rdp *)&sc->cmd.cmd3.rdp;
            rptr = sc->cmd.cmd3.rptr;
        } else {
            rdp = (struct octeon_instr_rdp *)&sc->cmd.cmd2.rdp;
            rptr = sc->cmd.cmd2.rptr;
        }

        status = OCTEON_REQUEST_PENDING;

        /* check if octeon has finished DMA'ing a response
         * to where rptr is pointing to
         */
        dma_sync_single_for_cpu(&octeon_dev->pci_dev->dev,
                                rptr, rdp->rlen,
                                DMA_FROM_DEVICE);
        status64 = *sc->status_word;

        if (status64 != COMPLETION_WORD_INIT) {
            if ((status64 & 0xff) != 0xff) {
                octeon_swap_8B_data(&status64, 1);
                if (((status64 & 0xff) != 0xff)) {
                    status = (u32)(status64 &
                                   0xffffffffULL);
                }
            }
        } else if (force_quit || (sc->timeout &&
                                  time_after(jiffies, (unsigned long)sc->timeout))) {
            status = OCTEON_REQUEST_TIMEOUT;
        }

        if (status != OCTEON_REQUEST_PENDING) {
            /* we have received a response or we have timed out */
            /* remove node from linked list */
            list_del(&sc->node);
            atomic_dec(&octeon_dev->response_list
                       [OCTEON_ORDERED_SC_LIST].
                       pending_req_count);
            spin_unlock_bh
            (&ordered_sc_list->lock);

            if (sc->callback)
                sc->callback(octeon_dev, status,
                             sc->callback_arg);

            request_complete++;

        } else {
            /* no response yet */
            request_complete = 0;
            spin_unlock_bh
            (&ordered_sc_list->lock);
        }

        /* If we hit the Max Ordered requests to process every loop,
         * we quit
         * and let this function be invoked the next time the poll
         * thread runs
         * to process the remaining requests. This function can take up
         * the entire CPU if there is no upper limit to the requests
         * processed.
         */
        if (request_complete >= resp_to_process)
            break;
    } while (request_complete);

    return 0;
}