コード例 #1
0
ファイル: if_octm.c プロジェクト: AhmadTux/freebsd
static void
octm_rx_intr(void *arg)
{
	struct octm_softc *sc = arg;
	cvmx_mixx_isr_t mixx_isr;
	int len;

	mixx_isr.u64 = cvmx_read_csr(CVMX_MIXX_ISR(sc->sc_port));
	if (!mixx_isr.s.irthresh) {
		device_printf(sc->sc_dev, "stray interrupt.\n");
		return;
	}

	for (;;) {
		struct mbuf *m = m_getcl(M_DONTWAIT, MT_DATA, M_PKTHDR);
		if (m == NULL) {
			device_printf(sc->sc_dev, "no memory for receive mbuf.\n");
			return;
		}


		len = cvmx_mgmt_port_receive(sc->sc_port, MCLBYTES, m->m_data);
		if (len > 0) {
			m->m_pkthdr.rcvif = sc->sc_ifp;
			m->m_pkthdr.len = m->m_len = len;

			sc->sc_ifp->if_ipackets++;

			(*sc->sc_ifp->if_input)(sc->sc_ifp, m);

			continue;
		}

		m_freem(m);

		if (len == 0)
			break;

		sc->sc_ifp->if_ierrors++;
	}

	/* Acknowledge interrupts.  */
	cvmx_write_csr(CVMX_MIXX_ISR(sc->sc_port), mixx_isr.u64);
	cvmx_read_csr(CVMX_MIXX_ISR(sc->sc_port));
}
コード例 #2
0
ファイル: octeon_mgmt.c プロジェクト: andi34/Dhollmen_Kernel
static irqreturn_t octeon_mgmt_interrupt(int cpl, void *dev_id)
{
	struct net_device *netdev = dev_id;
	struct octeon_mgmt *p = netdev_priv(netdev);
	int port = p->port;
	union cvmx_mixx_isr mixx_isr;

	mixx_isr.u64 = cvmx_read_csr(CVMX_MIXX_ISR(port));

	/* Clear any pending interrupts */
	cvmx_write_csr(CVMX_MIXX_ISR(port), mixx_isr.u64);
	cvmx_read_csr(CVMX_MIXX_ISR(port));

	if (mixx_isr.s.irthresh) {
		octeon_mgmt_disable_rx_irq(p);
		napi_schedule(&p->napi);
	}
	if (mixx_isr.s.orthresh) {
		octeon_mgmt_disable_tx_irq(p);
		tasklet_schedule(&p->tx_clean_tasklet);
	}

	return IRQ_HANDLED;
}
コード例 #3
0
ファイル: octeon_mgmt.c プロジェクト: andi34/Dhollmen_Kernel
static int octeon_mgmt_open(struct net_device *netdev)
{
	struct octeon_mgmt *p = netdev_priv(netdev);
	int port = p->port;
	union cvmx_mixx_ctl mix_ctl;
	union cvmx_agl_gmx_inf_mode agl_gmx_inf_mode;
	union cvmx_mixx_oring1 oring1;
	union cvmx_mixx_iring1 iring1;
	union cvmx_agl_gmx_prtx_cfg prtx_cfg;
	union cvmx_agl_gmx_rxx_frm_ctl rxx_frm_ctl;
	union cvmx_mixx_irhwm mix_irhwm;
	union cvmx_mixx_orhwm mix_orhwm;
	union cvmx_mixx_intena mix_intena;
	struct sockaddr sa;

	/* Allocate ring buffers.  */
	p->tx_ring = kzalloc(ring_size_to_bytes(OCTEON_MGMT_TX_RING_SIZE),
			     GFP_KERNEL);
	if (!p->tx_ring)
		return -ENOMEM;
	p->tx_ring_handle =
		dma_map_single(p->dev, p->tx_ring,
			       ring_size_to_bytes(OCTEON_MGMT_TX_RING_SIZE),
			       DMA_BIDIRECTIONAL);
	p->tx_next = 0;
	p->tx_next_clean = 0;
	p->tx_current_fill = 0;

	p->rx_ring = kzalloc(ring_size_to_bytes(OCTEON_MGMT_RX_RING_SIZE),
			     GFP_KERNEL);
	if (!p->rx_ring)
		goto err_nomem;
	p->rx_ring_handle =
		dma_map_single(p->dev, p->rx_ring,
			       ring_size_to_bytes(OCTEON_MGMT_RX_RING_SIZE),
			       DMA_BIDIRECTIONAL);

	p->rx_next = 0;
	p->rx_next_fill = 0;
	p->rx_current_fill = 0;

	octeon_mgmt_reset_hw(p);

	mix_ctl.u64 = cvmx_read_csr(CVMX_MIXX_CTL(port));

	/* Bring it out of reset if needed. */
	if (mix_ctl.s.reset) {
		mix_ctl.s.reset = 0;
		cvmx_write_csr(CVMX_MIXX_CTL(port), mix_ctl.u64);
		do {
			mix_ctl.u64 = cvmx_read_csr(CVMX_MIXX_CTL(port));
		} while (mix_ctl.s.reset);
	}

	agl_gmx_inf_mode.u64 = 0;
	agl_gmx_inf_mode.s.en = 1;
	cvmx_write_csr(CVMX_AGL_GMX_INF_MODE, agl_gmx_inf_mode.u64);

	oring1.u64 = 0;
	oring1.s.obase = p->tx_ring_handle >> 3;
	oring1.s.osize = OCTEON_MGMT_TX_RING_SIZE;
	cvmx_write_csr(CVMX_MIXX_ORING1(port), oring1.u64);

	iring1.u64 = 0;
	iring1.s.ibase = p->rx_ring_handle >> 3;
	iring1.s.isize = OCTEON_MGMT_RX_RING_SIZE;
	cvmx_write_csr(CVMX_MIXX_IRING1(port), iring1.u64);

	/* Disable packet I/O. */
	prtx_cfg.u64 = cvmx_read_csr(CVMX_AGL_GMX_PRTX_CFG(port));
	prtx_cfg.s.en = 0;
	cvmx_write_csr(CVMX_AGL_GMX_PRTX_CFG(port), prtx_cfg.u64);

	memcpy(sa.sa_data, netdev->dev_addr, ETH_ALEN);
	octeon_mgmt_set_mac_address(netdev, &sa);

	octeon_mgmt_change_mtu(netdev, netdev->mtu);

	/*
	 * Enable the port HW. Packets are not allowed until
	 * cvmx_mgmt_port_enable() is called.
	 */
	mix_ctl.u64 = 0;
	mix_ctl.s.crc_strip = 1;    /* Strip the ending CRC */
	mix_ctl.s.en = 1;           /* Enable the port */
	mix_ctl.s.nbtarb = 0;       /* Arbitration mode */
	/* MII CB-request FIFO programmable high watermark */
	mix_ctl.s.mrq_hwm = 1;
	cvmx_write_csr(CVMX_MIXX_CTL(port), mix_ctl.u64);

	if (OCTEON_IS_MODEL(OCTEON_CN56XX_PASS1_X)
	    || OCTEON_IS_MODEL(OCTEON_CN52XX_PASS1_X)) {
		/*
		 * Force compensation values, as they are not
		 * determined properly by HW
		 */
		union cvmx_agl_gmx_drv_ctl drv_ctl;

		drv_ctl.u64 = cvmx_read_csr(CVMX_AGL_GMX_DRV_CTL);
		if (port) {
			drv_ctl.s.byp_en1 = 1;
			drv_ctl.s.nctl1 = 6;
			drv_ctl.s.pctl1 = 6;
		} else {
			drv_ctl.s.byp_en = 1;
			drv_ctl.s.nctl = 6;
			drv_ctl.s.pctl = 6;
		}
		cvmx_write_csr(CVMX_AGL_GMX_DRV_CTL, drv_ctl.u64);
	}

	octeon_mgmt_rx_fill_ring(netdev);

	/* Clear statistics. */
	/* Clear on read. */
	cvmx_write_csr(CVMX_AGL_GMX_RXX_STATS_CTL(port), 1);
	cvmx_write_csr(CVMX_AGL_GMX_RXX_STATS_PKTS_DRP(port), 0);
	cvmx_write_csr(CVMX_AGL_GMX_RXX_STATS_PKTS_BAD(port), 0);

	cvmx_write_csr(CVMX_AGL_GMX_TXX_STATS_CTL(port), 1);
	cvmx_write_csr(CVMX_AGL_GMX_TXX_STAT0(port), 0);
	cvmx_write_csr(CVMX_AGL_GMX_TXX_STAT1(port), 0);

	/* Clear any pending interrupts */
	cvmx_write_csr(CVMX_MIXX_ISR(port), cvmx_read_csr(CVMX_MIXX_ISR(port)));

	if (request_irq(p->irq, octeon_mgmt_interrupt, 0, netdev->name,
			netdev)) {
		dev_err(p->dev, "request_irq(%d) failed.\n", p->irq);
		goto err_noirq;
	}

	/* Interrupt every single RX packet */
	mix_irhwm.u64 = 0;
	mix_irhwm.s.irhwm = 0;
	cvmx_write_csr(CVMX_MIXX_IRHWM(port), mix_irhwm.u64);

	/* Interrupt when we have 1 or more packets to clean.  */
	mix_orhwm.u64 = 0;
	mix_orhwm.s.orhwm = 1;
	cvmx_write_csr(CVMX_MIXX_ORHWM(port), mix_orhwm.u64);

	/* Enable receive and transmit interrupts */
	mix_intena.u64 = 0;
	mix_intena.s.ithena = 1;
	mix_intena.s.othena = 1;
	cvmx_write_csr(CVMX_MIXX_INTENA(port), mix_intena.u64);

	/* Enable packet I/O. */

	rxx_frm_ctl.u64 = 0;
	rxx_frm_ctl.s.pre_align = 1;
	/*
	 * When set, disables the length check for non-min sized pkts
	 * with padding in the client data.
	 */
	rxx_frm_ctl.s.pad_len = 1;
	/* When set, disables the length check for VLAN pkts */
	rxx_frm_ctl.s.vlan_len = 1;
	/* When set, PREAMBLE checking is  less strict */
	rxx_frm_ctl.s.pre_free = 1;
	/* Control Pause Frames can match station SMAC */
	rxx_frm_ctl.s.ctl_smac = 0;
	/* Control Pause Frames can match globally assign Multicast address */
	rxx_frm_ctl.s.ctl_mcst = 1;
	/* Forward pause information to TX block */
	rxx_frm_ctl.s.ctl_bck = 1;
	/* Drop Control Pause Frames */
	rxx_frm_ctl.s.ctl_drp = 1;
	/* Strip off the preamble */
	rxx_frm_ctl.s.pre_strp = 1;
	/*
	 * This port is configured to send PREAMBLE+SFD to begin every
	 * frame.  GMX checks that the PREAMBLE is sent correctly.
	 */
	rxx_frm_ctl.s.pre_chk = 1;
	cvmx_write_csr(CVMX_AGL_GMX_RXX_FRM_CTL(port), rxx_frm_ctl.u64);

	/* Enable the AGL block */
	agl_gmx_inf_mode.u64 = 0;
	agl_gmx_inf_mode.s.en = 1;
	cvmx_write_csr(CVMX_AGL_GMX_INF_MODE, agl_gmx_inf_mode.u64);

	/* Configure the port duplex and enables */
	prtx_cfg.u64 = cvmx_read_csr(CVMX_AGL_GMX_PRTX_CFG(port));
	prtx_cfg.s.tx_en = 1;
	prtx_cfg.s.rx_en = 1;
	prtx_cfg.s.en = 1;
	p->last_duplex = 1;
	prtx_cfg.s.duplex = p->last_duplex;
	cvmx_write_csr(CVMX_AGL_GMX_PRTX_CFG(port), prtx_cfg.u64);

	p->last_link = 0;
	netif_carrier_off(netdev);

	if (octeon_mgmt_init_phy(netdev)) {
		dev_err(p->dev, "Cannot initialize PHY.\n");
		goto err_noirq;
	}

	netif_wake_queue(netdev);
	napi_enable(&p->napi);

	return 0;
err_noirq:
	octeon_mgmt_reset_hw(p);
	dma_unmap_single(p->dev, p->rx_ring_handle,
			 ring_size_to_bytes(OCTEON_MGMT_RX_RING_SIZE),
			 DMA_BIDIRECTIONAL);
	kfree(p->rx_ring);
err_nomem:
	dma_unmap_single(p->dev, p->tx_ring_handle,
			 ring_size_to_bytes(OCTEON_MGMT_TX_RING_SIZE),
			 DMA_BIDIRECTIONAL);
	kfree(p->tx_ring);
	return -ENOMEM;
}