Exemplo n.º 1
0
static void octeon_mgmt_adjust_link(struct net_device *netdev)
{
	struct octeon_mgmt *p = netdev_priv(netdev);
	int port = p->port;
	union cvmx_agl_gmx_prtx_cfg prtx_cfg;
	unsigned long flags;
	int link_changed = 0;

	spin_lock_irqsave(&p->lock, flags);
	if (p->phydev->link) {
		if (!p->last_link)
			link_changed = 1;
		if (p->last_duplex != p->phydev->duplex) {
			p->last_duplex = p->phydev->duplex;
			prtx_cfg.u64 =
				cvmx_read_csr(CVMX_AGL_GMX_PRTX_CFG(port));
			prtx_cfg.s.duplex = p->phydev->duplex;
			cvmx_write_csr(CVMX_AGL_GMX_PRTX_CFG(port),
				       prtx_cfg.u64);
		}
	} else {
		if (p->last_link)
			link_changed = -1;
	}
	p->last_link = p->phydev->link;
	spin_unlock_irqrestore(&p->lock, flags);

	if (link_changed != 0) {
		if (link_changed > 0) {
			netif_carrier_on(netdev);
			pr_info("%s: Link is up - %d/%s\n", netdev->name,
				p->phydev->speed,
				DUPLEX_FULL == p->phydev->duplex ?
				"Full" : "Half");
		} else {
			netif_carrier_off(netdev);
			pr_info("%s: Link is down\n", netdev->name);
		}
	}
}
Exemplo n.º 2
0
/*
 * Set MII/RGMII link based on mode.
 *
 * @param port   interface port to set the link.
 * @param link_info  Link status
 * @param mode   0 = MII, 1 = RGMII mode
 *
 * @return       0 on success and 1 on failure
 */
int cvmx_agl_link_set(int port, cvmx_helper_link_info_t link_info, int mode)
{
	cvmx_agl_gmx_prtx_cfg_t agl_gmx_prtx;

	/* Disable GMX before we make any changes. */
	agl_gmx_prtx.u64 = cvmx_read_csr(CVMX_AGL_GMX_PRTX_CFG(port));
	agl_gmx_prtx.s.en = 0;
	agl_gmx_prtx.s.tx_en = 0;
	agl_gmx_prtx.s.rx_en = 0;
	cvmx_write_csr(CVMX_AGL_GMX_PRTX_CFG(port), agl_gmx_prtx.u64);

	if (OCTEON_IS_MODEL(OCTEON_CN6XXX) || OCTEON_IS_OCTEON3()) {
		uint64_t one_second = cvmx_clock_get_rate(CVMX_CLOCK_CORE);
		/* Wait for GMX to be idle */
		if (CVMX_WAIT_FOR_FIELD64(CVMX_AGL_GMX_PRTX_CFG(port),
					  cvmx_agl_gmx_prtx_cfg_t, rx_idle,
					  ==, 1, one_second)
		    || CVMX_WAIT_FOR_FIELD64(CVMX_AGL_GMX_PRTX_CFG(port),
					     cvmx_agl_gmx_prtx_cfg_t, tx_idle,
					     ==, 1, one_second)) {
			cvmx_dprintf("AGL%d: Timeout waiting for GMX to be idle\n", port);
			return -1;
		}
	}
Exemplo n.º 3
0
/**
 * @INTERNAL
 * Probe a RGMII interface and determine the number of ports
 * connected to it. The RGMII interface should still be down
 * after this call.
 *
 * @param interface Interface to probe
 *
 * @return Number of ports on the interface. Zero to disable.
 */
int __cvmx_helper_agl_probe(int interface)
{
	int port = cvmx_helper_agl_get_port(interface);
	union cvmx_agl_gmx_bist gmx_bist;
	union cvmx_agl_gmx_prtx_cfg gmx_prtx_cfg;
	union cvmx_agl_prtx_ctl agl_prtx_ctl;
	uint64_t clock_scale;
	int result;

	result = __cvmx_helper_agl_enumerate(interface);
	if (result == 0)
		return 0;

	/* Check BIST status */
	gmx_bist.u64 = cvmx_read_csr(CVMX_AGL_GMX_BIST);
	if (gmx_bist.u64)
		cvmx_warn("Managment port AGL failed BIST (0x%016llx) on AGL%d\n",
			  CAST64(gmx_bist.u64), port);

	/* Disable the external input/output */
	gmx_prtx_cfg.u64 = cvmx_read_csr(CVMX_AGL_GMX_PRTX_CFG(port));
        gmx_prtx_cfg.s.en = 0;
	cvmx_write_csr(CVMX_AGL_GMX_PRTX_CFG(port), gmx_prtx_cfg.u64);

	/* Set the rgx_ref_clk MUX with AGL_PRTx_CTL[REFCLK_SEL]. Default value
	   is 0 (RGMII REFCLK). Recommended to use RGMII RXC(1) or sclk/4 (2)
	   to save cost.
	 */

	/* MII clocks counts are based on the 125Mhz reference, so our
	 * delays need to be scaled to match the core clock rate. The
	 * "+1" is to make sure rounding always waits a little too
	 * long. FIXME.
	 */
	clock_scale = cvmx_clock_get_rate(CVMX_CLOCK_CORE) / 125000000 + 1;

	agl_prtx_ctl.u64 = cvmx_read_csr(CVMX_AGL_PRTX_CTL(port));
	agl_prtx_ctl.s.clkrst = 0;
	agl_prtx_ctl.s.dllrst = 0;
	agl_prtx_ctl.s.clktx_byp = 0;


	if (OCTEON_IS_MODEL(OCTEON_CN70XX)) {
		agl_prtx_ctl.s.refclk_sel = 0;
		agl_prtx_ctl.s.clkrx_set =
			cvmx_helper_get_agl_rx_clock_skew(interface, port);
		agl_prtx_ctl.s.clkrx_byp =
			cvmx_helper_get_agl_rx_clock_delay_bypass(interface,
								  port);
	}
	cvmx_write_csr(CVMX_AGL_PRTX_CTL(port), agl_prtx_ctl.u64);
	/* Force write out before wait */
	cvmx_read_csr(CVMX_AGL_PRTX_CTL(port));
	/*
	 * Wait for the DLL to lock. External 125 MHz reference clock must be
	 * stable at this point.
	 */
	cvmx_wait(256 * clock_scale);

	/* Enable the componsation controller */
	agl_prtx_ctl.u64 = cvmx_read_csr(CVMX_AGL_PRTX_CTL(port));
	agl_prtx_ctl.s.drv_byp = 0;
	cvmx_write_csr(CVMX_AGL_PRTX_CTL(port), agl_prtx_ctl.u64);
	/* Force write out before wait */
	cvmx_read_csr(CVMX_AGL_PRTX_CTL(port));

	if (!OCTEON_IS_OCTEON3()) {
		/* Enable the interface */
		agl_prtx_ctl.u64 = cvmx_read_csr(CVMX_AGL_PRTX_CTL(port));
		agl_prtx_ctl.s.enable = 1;
		cvmx_write_csr(CVMX_AGL_PRTX_CTL(port), agl_prtx_ctl.u64);
		/* Read the value back to force the previous write */
		agl_prtx_ctl.u64 = cvmx_read_csr(CVMX_AGL_PRTX_CTL(port));
	}

	/* Enable the compensation controller */
	agl_prtx_ctl.u64 = cvmx_read_csr(CVMX_AGL_PRTX_CTL(port));
	agl_prtx_ctl.s.comp = 1;
	cvmx_write_csr(CVMX_AGL_PRTX_CTL(port), agl_prtx_ctl.u64);
	/* Force write out before wait */
	cvmx_read_csr(CVMX_AGL_PRTX_CTL(port));

	/* for componsation state to lock. */
	cvmx_wait(1024 * clock_scale);

	return result;
}
Exemplo n.º 4
0
	if (OCTEON_IS_MODEL(OCTEON_CN6XXX) || OCTEON_IS_OCTEON3()) {
		uint64_t one_second = cvmx_clock_get_rate(CVMX_CLOCK_CORE);
		/* Wait for GMX to be idle */
		if (CVMX_WAIT_FOR_FIELD64(CVMX_AGL_GMX_PRTX_CFG(port),
					  cvmx_agl_gmx_prtx_cfg_t, rx_idle,
					  ==, 1, one_second)
		    || CVMX_WAIT_FOR_FIELD64(CVMX_AGL_GMX_PRTX_CFG(port),
					     cvmx_agl_gmx_prtx_cfg_t, tx_idle,
					     ==, 1, one_second)) {
			cvmx_dprintf("AGL%d: Timeout waiting for GMX to be idle\n", port);
			return -1;
		}
	}

	agl_gmx_prtx.u64 = cvmx_read_csr(CVMX_AGL_GMX_PRTX_CFG(port));

	/* Set duplex mode */
	if (!link_info.s.link_up)
		agl_gmx_prtx.s.duplex = 1;	/* Force full duplex on down links */
	else
		agl_gmx_prtx.s.duplex = link_info.s.full_duplex;

	switch (link_info.s.speed) {
	case 10:
		agl_gmx_prtx.s.speed = 0;
		agl_gmx_prtx.s.slottime = 0;
		if (OCTEON_IS_MODEL(OCTEON_CN6XXX) || OCTEON_IS_OCTEON3()) {
			agl_gmx_prtx.s.speed_msb = 1;
			agl_gmx_prtx.s.burst = 1;
		}
Exemplo n.º 5
0
static int octeon_mgmt_open(struct net_device *netdev)
{
	struct octeon_mgmt *p = netdev_priv(netdev);
	int port = p->port;
	union cvmx_mixx_ctl mix_ctl;
	union cvmx_agl_gmx_inf_mode agl_gmx_inf_mode;
	union cvmx_mixx_oring1 oring1;
	union cvmx_mixx_iring1 iring1;
	union cvmx_agl_gmx_prtx_cfg prtx_cfg;
	union cvmx_agl_gmx_rxx_frm_ctl rxx_frm_ctl;
	union cvmx_mixx_irhwm mix_irhwm;
	union cvmx_mixx_orhwm mix_orhwm;
	union cvmx_mixx_intena mix_intena;
	struct sockaddr sa;

	/* Allocate ring buffers.  */
	p->tx_ring = kzalloc(ring_size_to_bytes(OCTEON_MGMT_TX_RING_SIZE),
			     GFP_KERNEL);
	if (!p->tx_ring)
		return -ENOMEM;
	p->tx_ring_handle =
		dma_map_single(p->dev, p->tx_ring,
			       ring_size_to_bytes(OCTEON_MGMT_TX_RING_SIZE),
			       DMA_BIDIRECTIONAL);
	p->tx_next = 0;
	p->tx_next_clean = 0;
	p->tx_current_fill = 0;

	p->rx_ring = kzalloc(ring_size_to_bytes(OCTEON_MGMT_RX_RING_SIZE),
			     GFP_KERNEL);
	if (!p->rx_ring)
		goto err_nomem;
	p->rx_ring_handle =
		dma_map_single(p->dev, p->rx_ring,
			       ring_size_to_bytes(OCTEON_MGMT_RX_RING_SIZE),
			       DMA_BIDIRECTIONAL);

	p->rx_next = 0;
	p->rx_next_fill = 0;
	p->rx_current_fill = 0;

	octeon_mgmt_reset_hw(p);

	mix_ctl.u64 = cvmx_read_csr(CVMX_MIXX_CTL(port));

	/* Bring it out of reset if needed. */
	if (mix_ctl.s.reset) {
		mix_ctl.s.reset = 0;
		cvmx_write_csr(CVMX_MIXX_CTL(port), mix_ctl.u64);
		do {
			mix_ctl.u64 = cvmx_read_csr(CVMX_MIXX_CTL(port));
		} while (mix_ctl.s.reset);
	}

	agl_gmx_inf_mode.u64 = 0;
	agl_gmx_inf_mode.s.en = 1;
	cvmx_write_csr(CVMX_AGL_GMX_INF_MODE, agl_gmx_inf_mode.u64);

	oring1.u64 = 0;
	oring1.s.obase = p->tx_ring_handle >> 3;
	oring1.s.osize = OCTEON_MGMT_TX_RING_SIZE;
	cvmx_write_csr(CVMX_MIXX_ORING1(port), oring1.u64);

	iring1.u64 = 0;
	iring1.s.ibase = p->rx_ring_handle >> 3;
	iring1.s.isize = OCTEON_MGMT_RX_RING_SIZE;
	cvmx_write_csr(CVMX_MIXX_IRING1(port), iring1.u64);

	/* Disable packet I/O. */
	prtx_cfg.u64 = cvmx_read_csr(CVMX_AGL_GMX_PRTX_CFG(port));
	prtx_cfg.s.en = 0;
	cvmx_write_csr(CVMX_AGL_GMX_PRTX_CFG(port), prtx_cfg.u64);

	memcpy(sa.sa_data, netdev->dev_addr, ETH_ALEN);
	octeon_mgmt_set_mac_address(netdev, &sa);

	octeon_mgmt_change_mtu(netdev, netdev->mtu);

	/*
	 * Enable the port HW. Packets are not allowed until
	 * cvmx_mgmt_port_enable() is called.
	 */
	mix_ctl.u64 = 0;
	mix_ctl.s.crc_strip = 1;    /* Strip the ending CRC */
	mix_ctl.s.en = 1;           /* Enable the port */
	mix_ctl.s.nbtarb = 0;       /* Arbitration mode */
	/* MII CB-request FIFO programmable high watermark */
	mix_ctl.s.mrq_hwm = 1;
	cvmx_write_csr(CVMX_MIXX_CTL(port), mix_ctl.u64);

	if (OCTEON_IS_MODEL(OCTEON_CN56XX_PASS1_X)
	    || OCTEON_IS_MODEL(OCTEON_CN52XX_PASS1_X)) {
		/*
		 * Force compensation values, as they are not
		 * determined properly by HW
		 */
		union cvmx_agl_gmx_drv_ctl drv_ctl;

		drv_ctl.u64 = cvmx_read_csr(CVMX_AGL_GMX_DRV_CTL);
		if (port) {
			drv_ctl.s.byp_en1 = 1;
			drv_ctl.s.nctl1 = 6;
			drv_ctl.s.pctl1 = 6;
		} else {
			drv_ctl.s.byp_en = 1;
			drv_ctl.s.nctl = 6;
			drv_ctl.s.pctl = 6;
		}
		cvmx_write_csr(CVMX_AGL_GMX_DRV_CTL, drv_ctl.u64);
	}

	octeon_mgmt_rx_fill_ring(netdev);

	/* Clear statistics. */
	/* Clear on read. */
	cvmx_write_csr(CVMX_AGL_GMX_RXX_STATS_CTL(port), 1);
	cvmx_write_csr(CVMX_AGL_GMX_RXX_STATS_PKTS_DRP(port), 0);
	cvmx_write_csr(CVMX_AGL_GMX_RXX_STATS_PKTS_BAD(port), 0);

	cvmx_write_csr(CVMX_AGL_GMX_TXX_STATS_CTL(port), 1);
	cvmx_write_csr(CVMX_AGL_GMX_TXX_STAT0(port), 0);
	cvmx_write_csr(CVMX_AGL_GMX_TXX_STAT1(port), 0);

	/* Clear any pending interrupts */
	cvmx_write_csr(CVMX_MIXX_ISR(port), cvmx_read_csr(CVMX_MIXX_ISR(port)));

	if (request_irq(p->irq, octeon_mgmt_interrupt, 0, netdev->name,
			netdev)) {
		dev_err(p->dev, "request_irq(%d) failed.\n", p->irq);
		goto err_noirq;
	}

	/* Interrupt every single RX packet */
	mix_irhwm.u64 = 0;
	mix_irhwm.s.irhwm = 0;
	cvmx_write_csr(CVMX_MIXX_IRHWM(port), mix_irhwm.u64);

	/* Interrupt when we have 1 or more packets to clean.  */
	mix_orhwm.u64 = 0;
	mix_orhwm.s.orhwm = 1;
	cvmx_write_csr(CVMX_MIXX_ORHWM(port), mix_orhwm.u64);

	/* Enable receive and transmit interrupts */
	mix_intena.u64 = 0;
	mix_intena.s.ithena = 1;
	mix_intena.s.othena = 1;
	cvmx_write_csr(CVMX_MIXX_INTENA(port), mix_intena.u64);

	/* Enable packet I/O. */

	rxx_frm_ctl.u64 = 0;
	rxx_frm_ctl.s.pre_align = 1;
	/*
	 * When set, disables the length check for non-min sized pkts
	 * with padding in the client data.
	 */
	rxx_frm_ctl.s.pad_len = 1;
	/* When set, disables the length check for VLAN pkts */
	rxx_frm_ctl.s.vlan_len = 1;
	/* When set, PREAMBLE checking is  less strict */
	rxx_frm_ctl.s.pre_free = 1;
	/* Control Pause Frames can match station SMAC */
	rxx_frm_ctl.s.ctl_smac = 0;
	/* Control Pause Frames can match globally assign Multicast address */
	rxx_frm_ctl.s.ctl_mcst = 1;
	/* Forward pause information to TX block */
	rxx_frm_ctl.s.ctl_bck = 1;
	/* Drop Control Pause Frames */
	rxx_frm_ctl.s.ctl_drp = 1;
	/* Strip off the preamble */
	rxx_frm_ctl.s.pre_strp = 1;
	/*
	 * This port is configured to send PREAMBLE+SFD to begin every
	 * frame.  GMX checks that the PREAMBLE is sent correctly.
	 */
	rxx_frm_ctl.s.pre_chk = 1;
	cvmx_write_csr(CVMX_AGL_GMX_RXX_FRM_CTL(port), rxx_frm_ctl.u64);

	/* Enable the AGL block */
	agl_gmx_inf_mode.u64 = 0;
	agl_gmx_inf_mode.s.en = 1;
	cvmx_write_csr(CVMX_AGL_GMX_INF_MODE, agl_gmx_inf_mode.u64);

	/* Configure the port duplex and enables */
	prtx_cfg.u64 = cvmx_read_csr(CVMX_AGL_GMX_PRTX_CFG(port));
	prtx_cfg.s.tx_en = 1;
	prtx_cfg.s.rx_en = 1;
	prtx_cfg.s.en = 1;
	p->last_duplex = 1;
	prtx_cfg.s.duplex = p->last_duplex;
	cvmx_write_csr(CVMX_AGL_GMX_PRTX_CFG(port), prtx_cfg.u64);

	p->last_link = 0;
	netif_carrier_off(netdev);

	if (octeon_mgmt_init_phy(netdev)) {
		dev_err(p->dev, "Cannot initialize PHY.\n");
		goto err_noirq;
	}

	netif_wake_queue(netdev);
	napi_enable(&p->napi);

	return 0;
err_noirq:
	octeon_mgmt_reset_hw(p);
	dma_unmap_single(p->dev, p->rx_ring_handle,
			 ring_size_to_bytes(OCTEON_MGMT_RX_RING_SIZE),
			 DMA_BIDIRECTIONAL);
	kfree(p->rx_ring);
err_nomem:
	dma_unmap_single(p->dev, p->tx_ring_handle,
			 ring_size_to_bytes(OCTEON_MGMT_TX_RING_SIZE),
			 DMA_BIDIRECTIONAL);
	kfree(p->tx_ring);
	return -ENOMEM;
}
Exemplo n.º 6
0
static void octeon_mgmt_set_rx_filtering(struct net_device *netdev)
{
	struct octeon_mgmt *p = netdev_priv(netdev);
	int port = p->port;
	union cvmx_agl_gmx_rxx_adr_ctl adr_ctl;
	union cvmx_agl_gmx_prtx_cfg agl_gmx_prtx;
	unsigned long flags;
	unsigned int prev_packet_enable;
	unsigned int cam_mode = 1; /* 1 - Accept on CAM match */
	unsigned int multicast_mode = 1; /* 1 - Reject all multicast.  */
	struct octeon_mgmt_cam_state cam_state;
	struct netdev_hw_addr *ha;
	int available_cam_entries;

	memset(&cam_state, 0, sizeof(cam_state));

	if ((netdev->flags & IFF_PROMISC) || netdev->uc.count > 7) {
		cam_mode = 0;
		available_cam_entries = 8;
	} else {
		/*
		 * One CAM entry for the primary address, leaves seven
		 * for the secondary addresses.
		 */
		available_cam_entries = 7 - netdev->uc.count;
	}

	if (netdev->flags & IFF_MULTICAST) {
		if (cam_mode == 0 || (netdev->flags & IFF_ALLMULTI) ||
		    netdev_mc_count(netdev) > available_cam_entries)
			multicast_mode = 2; /* 2 - Accept all multicast.  */
		else
			multicast_mode = 0; /* 0 - Use CAM.  */
	}

	if (cam_mode == 1) {
		/* Add primary address. */
		octeon_mgmt_cam_state_add(&cam_state, netdev->dev_addr);
		netdev_for_each_uc_addr(ha, netdev)
			octeon_mgmt_cam_state_add(&cam_state, ha->addr);
	}
	if (multicast_mode == 0) {
		netdev_for_each_mc_addr(ha, netdev)
			octeon_mgmt_cam_state_add(&cam_state, ha->addr);
	}

	spin_lock_irqsave(&p->lock, flags);

	/* Disable packet I/O. */
	agl_gmx_prtx.u64 = cvmx_read_csr(CVMX_AGL_GMX_PRTX_CFG(port));
	prev_packet_enable = agl_gmx_prtx.s.en;
	agl_gmx_prtx.s.en = 0;
	cvmx_write_csr(CVMX_AGL_GMX_PRTX_CFG(port), agl_gmx_prtx.u64);

	adr_ctl.u64 = 0;
	adr_ctl.s.cam_mode = cam_mode;
	adr_ctl.s.mcst = multicast_mode;
	adr_ctl.s.bcst = 1;     /* Allow broadcast */

	cvmx_write_csr(CVMX_AGL_GMX_RXX_ADR_CTL(port), adr_ctl.u64);

	cvmx_write_csr(CVMX_AGL_GMX_RXX_ADR_CAM0(port), cam_state.cam[0]);
	cvmx_write_csr(CVMX_AGL_GMX_RXX_ADR_CAM1(port), cam_state.cam[1]);
	cvmx_write_csr(CVMX_AGL_GMX_RXX_ADR_CAM2(port), cam_state.cam[2]);
	cvmx_write_csr(CVMX_AGL_GMX_RXX_ADR_CAM3(port), cam_state.cam[3]);
	cvmx_write_csr(CVMX_AGL_GMX_RXX_ADR_CAM4(port), cam_state.cam[4]);
	cvmx_write_csr(CVMX_AGL_GMX_RXX_ADR_CAM5(port), cam_state.cam[5]);
	cvmx_write_csr(CVMX_AGL_GMX_RXX_ADR_CAM_EN(port), cam_state.cam_mask);

	/* Restore packet I/O. */
	agl_gmx_prtx.s.en = prev_packet_enable;
	cvmx_write_csr(CVMX_AGL_GMX_PRTX_CFG(port), agl_gmx_prtx.u64);

	spin_unlock_irqrestore(&p->lock, flags);
}