Esempio n. 1
0
/**
 * igb_ptp_systim_to_hwtstamp - convert system time value to hw timestamp
 * @adapter: board private structure
 * @hwtstamps: timestamp structure to update
 * @systim: unsigned 64bit system time value.
 *
 * We need to convert the system time value stored in the RX/TXSTMP registers
 * into a hwtstamp which can be used by the upper level timestamping functions.
 *
 * The 'tmreg_lock' spinlock is used to protect the consistency of the
 * system time value. This is needed because reading the 64 bit time
 * value involves reading two (or three) 32 bit registers. The first
 * read latches the value. Ditto for writing.
 *
 * In addition, here have extended the system time with an overflow
 * counter in software.
 **/
static void igb_ptp_systim_to_hwtstamp(struct igb_adapter *adapter,
				       struct skb_shared_hwtstamps *hwtstamps,
				       u64 systim)
{
	unsigned long flags;
	u64 ns;

	switch (adapter->hw.mac.type) {
	case e1000_82576:
	case e1000_82580:
	case e1000_i350:
	case e1000_i354:
		spin_lock_irqsave(&adapter->tmreg_lock, flags);

		ns = timecounter_cyc2time(&adapter->tc, systim);

		spin_unlock_irqrestore(&adapter->tmreg_lock, flags);

		memset(hwtstamps, 0, sizeof(*hwtstamps));
		hwtstamps->hwtstamp = ns_to_ktime(ns);
		break;
	case e1000_i210:
	case e1000_i211:
		memset(hwtstamps, 0, sizeof(*hwtstamps));
		/* Upper 32 bits contain s, lower 32 bits contain ns. */
		hwtstamps->hwtstamp = ktime_set(systim >> 32,
						systim & 0xFFFFFFFF);
		break;
	default:
		break;
	}
}
Esempio n. 2
0
/**
 * ixgbe_ptp_setup_sdp
 * @hw: the hardware private structure
 *
 * this function enables or disables the clock out feature on SDP0 for
 * the X540 device. It will create a 1second periodic output that can
 * be used as the PPS (via an interrupt).
 *
 * It calculates when the systime will be on an exact second, and then
 * aligns the start of the PPS signal to that value. The shift is
 * necessary because it can change based on the link speed.
 */
static void ixgbe_ptp_setup_sdp(struct ixgbe_adapter *adapter)
{
	struct ixgbe_hw *hw = &adapter->hw;
	int shift = adapter->hw_cc.shift;
	u32 esdp, tsauxc, clktiml, clktimh, trgttiml, trgttimh, rem;
	u64 ns = 0, clock_edge = 0;

	if ((adapter->flags2 & IXGBE_FLAG2_PTP_PPS_ENABLED) &&
	    (hw->mac.type == ixgbe_mac_X540)) {

		/* disable the pin first */
		IXGBE_WRITE_REG(hw, IXGBE_TSAUXC, 0x0);
		IXGBE_WRITE_FLUSH(hw);

		esdp = IXGBE_READ_REG(hw, IXGBE_ESDP);

		/*
		 * enable the SDP0 pin as output, and connected to the
		 * native function for Timesync (ClockOut)
		 */
		esdp |= IXGBE_ESDP_SDP0_DIR |
			IXGBE_ESDP_SDP0_NATIVE;

		/*
		 * enable the Clock Out feature on SDP0, and allow
		 * interrupts to occur when the pin changes
		 */
		tsauxc = IXGBE_TSAUXC_EN_CLK |
			 IXGBE_TSAUXC_SYNCLK |
			 IXGBE_TSAUXC_SDP0_INT;

		/* clock period (or pulse length) */
		clktiml = (u32)(NSEC_PER_SEC << shift);
		clktimh = (u32)((NSEC_PER_SEC << shift) >> 32);

		/*
		 * Account for the cyclecounter wrap-around value by
		 * using the converted ns value of the current time to
		 * check for when the next aligned second would occur.
		 */
		clock_edge |= (u64)IXGBE_READ_REG(hw, IXGBE_SYSTIML);
		clock_edge |= (u64)IXGBE_READ_REG(hw, IXGBE_SYSTIMH) << 32;
		ns = timecounter_cyc2time(&adapter->hw_tc, clock_edge);

		div_u64_rem(ns, NSEC_PER_SEC, &rem);
		clock_edge += ((NSEC_PER_SEC - (u64)rem) << shift);

		/* specify the initial clock start time */
		trgttiml = (u32)clock_edge;
		trgttimh = (u32)(clock_edge >> 32);

		IXGBE_WRITE_REG(hw, IXGBE_CLKTIML, clktiml);
		IXGBE_WRITE_REG(hw, IXGBE_CLKTIMH, clktimh);
		IXGBE_WRITE_REG(hw, IXGBE_TRGTTIML0, trgttiml);
		IXGBE_WRITE_REG(hw, IXGBE_TRGTTIMH0, trgttimh);

		IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp);
		IXGBE_WRITE_REG(hw, IXGBE_TSAUXC, tsauxc);
	} else {
Esempio n. 3
0
static void mv88e6xxx_get_rxts(struct mv88e6xxx_chip *chip,
			       struct mv88e6xxx_port_hwtstamp *ps,
			       struct sk_buff *skb, u16 reg,
			       struct sk_buff_head *rxq)
{
	u16 buf[4] = { 0 }, status, seq_id;
	struct skb_shared_hwtstamps *shwt;
	struct sk_buff_head received;
	u64 ns, timelo, timehi;
	unsigned long flags;
	int err;

	/* The latched timestamp belongs to one of the received frames. */
	__skb_queue_head_init(&received);
	spin_lock_irqsave(&rxq->lock, flags);
	skb_queue_splice_tail_init(rxq, &received);
	spin_unlock_irqrestore(&rxq->lock, flags);

	mutex_lock(&chip->reg_lock);
	err = mv88e6xxx_port_ptp_read(chip, ps->port_id,
				      reg, buf, ARRAY_SIZE(buf));
	mutex_unlock(&chip->reg_lock);
	if (err)
		pr_err("failed to get the receive time stamp\n");

	status = buf[0];
	timelo = buf[1];
	timehi = buf[2];
	seq_id = buf[3];

	if (status & MV88E6XXX_PTP_TS_VALID) {
		mutex_lock(&chip->reg_lock);
		err = mv88e6xxx_port_ptp_write(chip, ps->port_id, reg, 0);
		mutex_unlock(&chip->reg_lock);
		if (err)
			pr_err("failed to clear the receive status\n");
	}
	/* Since the device can only handle one time stamp at a time,
	 * we purge any extra frames from the queue.
	 */
	for ( ; skb; skb = __skb_dequeue(&received)) {
		if (mv88e6xxx_ts_valid(status) && seq_match(skb, seq_id)) {
			ns = timehi << 16 | timelo;

			mutex_lock(&chip->reg_lock);
			ns = timecounter_cyc2time(&chip->tstamp_tc, ns);
			mutex_unlock(&chip->reg_lock);
			shwt = skb_hwtstamps(skb);
			memset(shwt, 0, sizeof(*shwt));
			shwt->hwtstamp = ns_to_ktime(ns);
			status &= ~MV88E6XXX_PTP_TS_VALID;
		}
		netif_rx_ni(skb);
	}
}
Esempio n. 4
0
void mlx5e_fill_hwstamp(struct mlx5e_tstamp *tstamp, u64 timestamp,
			struct skb_shared_hwtstamps *hwts)
{
	u64 nsec;

	read_lock(&tstamp->lock);
	nsec = timecounter_cyc2time(&tstamp->clock, timestamp);
	read_unlock(&tstamp->lock);

	hwts->hwtstamp = ns_to_ktime(nsec);
}
Esempio n. 5
0
void mlx4_en_fill_hwtstamps(struct mlx4_en_dev *mdev,
                            struct skb_shared_hwtstamps *hwts,
                            u64 timestamp)
{
    u64 nsec;

    nsec = timecounter_cyc2time(&mdev->clock, timestamp);

    memset(hwts, 0, sizeof(struct skb_shared_hwtstamps));
    hwts->hwtstamp = ns_to_ktime(nsec);
}
Esempio n. 6
0
void mlx4_en_fill_hwtstamps(struct mlx4_en_dev *mdev,
			    struct skb_shared_hwtstamps *hwts,
			    u64 timestamp)
{
	unsigned long flags;
	u64 nsec;

	read_lock_irqsave(&mdev->clock_lock, flags);
	nsec = timecounter_cyc2time(&mdev->clock, timestamp);
	read_unlock_irqrestore(&mdev->clock_lock, flags);

	memset(hwts, 0, sizeof(struct skb_shared_hwtstamps));
	hwts->hwtstamp = ns_to_ktime(nsec);
}
Esempio n. 7
0
void mlx4_en_fill_hwtstamps(struct mlx4_en_dev *mdev,
			    struct skb_shared_hwtstamps *hwts,
			    uint64_t timestamp)
{
	panic("Disabled");
#if 0 // AKAROS_PORT
	unsigned long flags;
	uint64_t nsec;

	read_lock_irqsave(&mdev->clock_lock, flags);
	nsec = timecounter_cyc2time(&mdev->clock, timestamp);
	read_unlock_irqrestore(&mdev->clock_lock, flags);

	memset(hwts, 0, sizeof(struct skb_shared_hwtstamps));
	hwts->hwtstamp = ns_to_ktime(nsec);
#endif
}
static void bfin_tx_hwtstamp(struct net_device *netdev, struct sk_buff *skb)
{
	struct bfin_mac_local *lp = netdev_priv(netdev);
	union skb_shared_tx *shtx = skb_tx(skb);

	if (shtx->hardware) {
		int timeout_cnt = MAX_TIMEOUT_CNT;

		/* When doing time stamping, keep the connection to the socket
		 * a while longer
		 */
		shtx->in_progress = 1;

		/*
		 * The timestamping is done at the EMAC module's MII/RMII interface
		 * when the module sees the Start of Frame of an event message packet. This
		 * interface is the closest possible place to the physical Ethernet transmission
		 * medium, providing the best timing accuracy.
		 */
		while ((!(bfin_read_EMAC_PTP_ISTAT() & TXTL)) && (--timeout_cnt))
			udelay(1);
		if (timeout_cnt == 0)
			printk(KERN_ERR DRV_NAME
					": fails to timestamp the TX packet\n");
		else {
			struct skb_shared_hwtstamps shhwtstamps;
			u64 ns;
			u64 regval;

			regval = bfin_read_EMAC_PTP_TXSNAPLO();
			regval |= (u64)bfin_read_EMAC_PTP_TXSNAPHI() << 32;
			memset(&shhwtstamps, 0, sizeof(shhwtstamps));
			ns = timecounter_cyc2time(&lp->clock,
					regval);
			timecompare_update(&lp->compare, ns);
			shhwtstamps.hwtstamp = ns_to_ktime(ns);
			shhwtstamps.syststamp =
				timecompare_transform(&lp->compare, ns);
			skb_tstamp_tx(skb, &shhwtstamps);

			bfin_dump_hwtamp("TX", &shhwtstamps.hwtstamp, &shhwtstamps.syststamp, &lp->compare);
		}
	}
}
Esempio n. 9
0
/**
 * ixgbe_ptp_rx_hwtstamp - utility function which checks for RX time stamp
 * @q_vector: structure containing interrupt and ring information
 * @skb: particular skb to send timestamp with
 *
 * if the timestamp is valid, we convert it into the timecounter ns
 * value, then store that result into the shhwtstamps structure which
 * is passed up the network stack
 */
void ixgbe_ptp_rx_hwtstamp(struct ixgbe_q_vector *q_vector,
			   struct sk_buff *skb)
{
	struct ixgbe_adapter *adapter;
	struct ixgbe_hw *hw;
	struct skb_shared_hwtstamps *shhwtstamps;
	u64 regval = 0, ns;
	u32 tsyncrxctl;
	unsigned long flags;

	/* we cannot process timestamps on a ring without a q_vector */
	if (!q_vector || !q_vector->adapter)
		return;

	adapter = q_vector->adapter;
	hw = &adapter->hw;

	tsyncrxctl = IXGBE_READ_REG(hw, IXGBE_TSYNCRXCTL);
	regval |= (u64)IXGBE_READ_REG(hw, IXGBE_RXSTMPL);
	regval |= (u64)IXGBE_READ_REG(hw, IXGBE_RXSTMPH) << 32;

	/*
	 * If this bit is set, then the RX registers contain the time stamp. No
	 * other packet will be time stamped until we read these registers, so
	 * read the registers to make them available again. Because only one
	 * packet can be time stamped at a time, we know that the register
	 * values must belong to this one here and therefore we don't need to
	 * compare any of the additional attributes stored for it.
	 *
	 * If nothing went wrong, then it should have a skb_shared_tx that we
	 * can turn into a skb_shared_hwtstamps.
	 */
	if (!(tsyncrxctl & IXGBE_TSYNCRXCTL_VALID))
		return;

	spin_lock_irqsave(&adapter->tmreg_lock, flags);
	ns = timecounter_cyc2time(&adapter->tc, regval);
	spin_unlock_irqrestore(&adapter->tmreg_lock, flags);

	shhwtstamps = skb_hwtstamps(skb);
	shhwtstamps->hwtstamp = ns_to_ktime(ns);
}
Esempio n. 10
0
void mlx4_en_fill_hwtstamps(struct mlx4_en_dev *mdev,
			    struct skb_shared_hwtstamps *hwts,
			    u64 timestamp)
{
	u64 nsec;

	nsec = timecounter_cyc2time(&mdev->clock, timestamp);

	/*
	 * force a timecompare_update here (even if less than a second
	 * has passed) in order to prevent the case when ptpd or other
	 * software jumps the clock offset. othwerise there is a small
	 * window when the timestamp would be based on previous skew
	 * and invalid results would be pushed to the network stack.
	 */
	timecompare_update(&mdev->compare, 0);
	memset(hwts, 0, sizeof(struct skb_shared_hwtstamps));
	hwts->hwtstamp = ns_to_ktime(nsec);
	hwts->syststamp = timecompare_transform(&mdev->compare, nsec);
}
Esempio n. 11
0
void mlx5e_fill_hwstamp(struct mlx5e_tstamp *tstamp,
			struct skb_shared_hwtstamps *hwts,
			u64 timestamp)
{
#if defined (HAVE_PTP_CLOCK_INFO) && (defined (CONFIG_PTP_1588_CLOCK) || defined(CONFIG_PTP_1588_CLOCK_MODULE))
	unsigned long flags;
	u64 nsec;

	memset(hwts, 0, sizeof(struct skb_shared_hwtstamps));
	if (!tstamp->ptp)
		return;

	read_lock_irqsave(&tstamp->lock, flags);
	nsec = timecounter_cyc2time(&tstamp->clock, timestamp);
	read_unlock_irqrestore(&tstamp->lock, flags);

	hwts->hwtstamp = ns_to_ktime(nsec);
#else
	memset(hwts, 0, sizeof(struct skb_shared_hwtstamps));
#endif
}
Esempio n. 12
0
/**
 * e1000e_phc_get_syncdevicetime - Callback given to timekeeping code reads system/device registers
 * @device: current device time
 * @system: system counter value read synchronously with device time
 * @ctx: context provided by timekeeping code
 *
 * Read device and system (ART) clock simultaneously and return the corrected
 * clock values in ns.
 **/
static int e1000e_phc_get_syncdevicetime(ktime_t *device,
					 struct system_counterval_t *system,
					 void *ctx)
{
	struct e1000_adapter *adapter = (struct e1000_adapter *)ctx;
	struct e1000_hw *hw = &adapter->hw;
	unsigned long flags;
	int i;
	u32 tsync_ctrl;
	u64 dev_cycles;
	u64 sys_cycles;

	tsync_ctrl = er32(TSYNCTXCTL);
	tsync_ctrl |= E1000_TSYNCTXCTL_START_SYNC |
		E1000_TSYNCTXCTL_MAX_ALLOWED_DLY_MASK;
	ew32(TSYNCTXCTL, tsync_ctrl);
	for (i = 0; i < MAX_HW_WAIT_COUNT; ++i) {
		udelay(1);
		tsync_ctrl = er32(TSYNCTXCTL);
		if (tsync_ctrl & E1000_TSYNCTXCTL_SYNC_COMP)
			break;
	}

	if (i == MAX_HW_WAIT_COUNT)
		return -ETIMEDOUT;

	dev_cycles = er32(SYSSTMPH);
	dev_cycles <<= 32;
	dev_cycles |= er32(SYSSTMPL);
	spin_lock_irqsave(&adapter->systim_lock, flags);
	*device = ns_to_ktime(timecounter_cyc2time(&adapter->tc, dev_cycles));
	spin_unlock_irqrestore(&adapter->systim_lock, flags);

	sys_cycles = er32(PLTSTMPH);
	sys_cycles <<= 32;
	sys_cycles |= er32(PLTSTMPL);
	*system = convert_art_to_tsc(sys_cycles);

	return 0;
}
/**
 * ixgbe_ptp_convert_to_hwtstamp - convert register value to hw timestamp
 * @adapter: private adapter structure
 * @hwtstamp: stack timestamp structure
 * @systim: unsigned 64bit system time value
 *
 * We need to convert the adapter's RX/TXSTMP registers into a hwtstamp value
 * which can be used by the stack's ptp functions.
 *
 * The lock is used to protect consistency of the cyclecounter and the SYSTIME
 * registers. However, it does not need to protect against the Rx or Tx
 * timestamp registers, as there can't be a new timestamp until the old one is
 * unlatched by reading.
 *
 * In addition to the timestamp in hardware, some controllers need a software
 * overflow cyclecounter, and this function takes this into account as well.
 **/
static void ixgbe_ptp_convert_to_hwtstamp(struct ixgbe_adapter *adapter,
				     struct skb_shared_hwtstamps *hwtstamp,
				     u64 timestamp)
{
	unsigned long flags;
	u64 ns;

	memset(hwtstamp, 0, sizeof(*hwtstamp));

	switch (adapter->hw.mac.type) {
	case ixgbe_mac_82599EB:
	case ixgbe_mac_X540:
		spin_lock_irqsave(&adapter->tmreg_lock, flags);
		ns = timecounter_cyc2time(&adapter->hw_tc, timestamp);
		spin_unlock_irqrestore(&adapter->tmreg_lock, flags);

		hwtstamp->hwtstamp = ns_to_ktime(ns);
		break;
	default:
		break;
	}
}
Esempio n. 14
0
/**
 * ixgbe_ptp_tx_hwtstamp - utility function which checks for TX time stamp
 * @q_vector: structure containing interrupt and ring information
 * @skb: particular skb to send timestamp with
 *
 * if the timestamp is valid, we convert it into the timecounter ns
 * value, then store that result into the shhwtstamps structure which
 * is passed up the network stack
 */
void ixgbe_ptp_tx_hwtstamp(struct ixgbe_q_vector *q_vector,
			   struct sk_buff *skb)
{
	struct ixgbe_adapter *adapter;
	struct ixgbe_hw *hw;
	struct skb_shared_hwtstamps shhwtstamps;
	u64 regval = 0, ns;
	u32 tsynctxctl;
	unsigned long flags;

	/* we cannot process timestamps on a ring without a q_vector */
	if (!q_vector || !q_vector->adapter)
		return;

	adapter = q_vector->adapter;
	hw = &adapter->hw;

	tsynctxctl = IXGBE_READ_REG(hw, IXGBE_TSYNCTXCTL);
	regval |= (u64)IXGBE_READ_REG(hw, IXGBE_TXSTMPL);
	regval |= (u64)IXGBE_READ_REG(hw, IXGBE_TXSTMPH) << 32;

	/*
	 * if TX timestamp is not valid, exit after clearing the
	 * timestamp registers
	 */
	if (!(tsynctxctl & IXGBE_TSYNCTXCTL_VALID))
		return;

	spin_lock_irqsave(&adapter->tmreg_lock, flags);
	ns = timecounter_cyc2time(&adapter->tc, regval);
	spin_unlock_irqrestore(&adapter->tmreg_lock, flags);

	memset(&shhwtstamps, 0, sizeof(shhwtstamps));
	shhwtstamps.hwtstamp = ns_to_ktime(ns);
	skb_tstamp_tx(skb, &shhwtstamps);
}
Esempio n. 15
0
/* During a receive, the cur_rx points to the current incoming buffer.
 * When we update through the ring, if the next incoming buffer has
 * not been given to the system, we just set the empty indicator,
 * effectively tossing the packet.
 */
static int
fec_enet_rx(struct net_device *ndev, int budget)
{
	struct fec_enet_private *fep = netdev_priv(ndev);
	const struct platform_device_id *id_entry =
				platform_get_device_id(fep->pdev);
	struct bufdesc *bdp;
	unsigned short status;
	struct	sk_buff	*skb;
	ushort	pkt_len;
	__u8 *data;
	int	pkt_received = 0;

#ifdef CONFIG_M532x
	flush_cache_all();
#endif

	/* First, grab all of the stats for the incoming packet.
	 * These get messed up if we get called due to a busy condition.
	 */
	bdp = fep->cur_rx;

	while (!((status = bdp->cbd_sc) & BD_ENET_RX_EMPTY)) {

		if (pkt_received >= budget)
			break;
		pkt_received++;

		/* Since we have allocated space to hold a complete frame,
		 * the last indicator should be set.
		 */
		if ((status & BD_ENET_RX_LAST) == 0)
			printk("FEC ENET: rcv is not +last\n");

		if (!fep->opened)
			goto rx_processing_done;

		/* Check for errors. */
		if (status & (BD_ENET_RX_LG | BD_ENET_RX_SH | BD_ENET_RX_NO |
			   BD_ENET_RX_CR | BD_ENET_RX_OV)) {
			ndev->stats.rx_errors++;
			if (status & (BD_ENET_RX_LG | BD_ENET_RX_SH)) {
				/* Frame too long or too short. */
				ndev->stats.rx_length_errors++;
			}
			if (status & BD_ENET_RX_NO)	/* Frame alignment */
				ndev->stats.rx_frame_errors++;
			if (status & BD_ENET_RX_CR)	/* CRC Error */
				ndev->stats.rx_crc_errors++;
			if (status & BD_ENET_RX_OV)	/* FIFO overrun */
				ndev->stats.rx_fifo_errors++;
		}

		/* Report late collisions as a frame error.
		 * On this error, the BD is closed, but we don't know what we
		 * have in the buffer.  So, just drop this frame on the floor.
		 */
		if (status & BD_ENET_RX_CL) {
			ndev->stats.rx_errors++;
			ndev->stats.rx_frame_errors++;
			goto rx_processing_done;
		}

		/* Process the incoming frame. */
		ndev->stats.rx_packets++;
		pkt_len = bdp->cbd_datlen;
		ndev->stats.rx_bytes += pkt_len;
		data = (__u8*)__va(bdp->cbd_bufaddr);

		dma_unmap_single(&fep->pdev->dev, bdp->cbd_bufaddr,
				FEC_ENET_TX_FRSIZE, DMA_FROM_DEVICE);

		if (id_entry->driver_data & FEC_QUIRK_SWAP_FRAME)
			swap_buffer(data, pkt_len);

		/* This does 16 byte alignment, exactly what we need.
		 * The packet length includes FCS, but we don't want to
		 * include that when passing upstream as it messes up
		 * bridging applications.
		 */
		skb = netdev_alloc_skb(ndev, pkt_len - 4 + NET_IP_ALIGN);

		if (unlikely(!skb)) {
			printk("%s: Memory squeeze, dropping packet.\n",
					ndev->name);
			ndev->stats.rx_dropped++;
		} else {
			skb_reserve(skb, NET_IP_ALIGN);
			skb_put(skb, pkt_len - 4);	/* Make room */
			skb_copy_to_linear_data(skb, data, pkt_len - 4);
			skb->protocol = eth_type_trans(skb, ndev);

			/* Get receive timestamp from the skb */
			if (fep->hwts_rx_en && fep->bufdesc_ex) {
				struct skb_shared_hwtstamps *shhwtstamps =
							    skb_hwtstamps(skb);
				unsigned long flags;
				struct bufdesc_ex *ebdp =
					(struct bufdesc_ex *)bdp;

				memset(shhwtstamps, 0, sizeof(*shhwtstamps));

				spin_lock_irqsave(&fep->tmreg_lock, flags);
				shhwtstamps->hwtstamp = ns_to_ktime(
				    timecounter_cyc2time(&fep->tc, ebdp->ts));
				spin_unlock_irqrestore(&fep->tmreg_lock, flags);
			}

			if (!skb_defer_rx_timestamp(skb))
				napi_gro_receive(&fep->napi, skb);
		}

		bdp->cbd_bufaddr = dma_map_single(&fep->pdev->dev, data,
				FEC_ENET_TX_FRSIZE, DMA_FROM_DEVICE);
rx_processing_done:
		/* Clear the status flags for this buffer */
		status &= ~BD_ENET_RX_STATS;

		/* Mark the buffer empty */
		status |= BD_ENET_RX_EMPTY;
		bdp->cbd_sc = status;

		if (fep->bufdesc_ex) {
			struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp;

			ebdp->cbd_esc = BD_ENET_RX_INT;
			ebdp->cbd_prot = 0;
			ebdp->cbd_bdu = 0;
		}

		/* Update BD pointer to next entry */
		if (status & BD_ENET_RX_WRAP)
			bdp = fep->rx_bd_base;
		else
			bdp = fec_enet_get_nextdesc(bdp, fep->bufdesc_ex);
		/* Doing this here will keep the FEC running while we process
		 * incoming frames.  On a heavily loaded network, we should be
		 * able to keep up at the expense of system resources.
		 */
		writel(0, fep->hwp + FEC_R_DES_ACTIVE);
	}
	fep->cur_rx = bdp;

	return pkt_received;
}
Esempio n. 16
0
static void
fec_enet_tx(struct net_device *ndev)
{
	struct	fec_enet_private *fep;
	struct bufdesc *bdp;
	unsigned short status;
	struct	sk_buff	*skb;

	fep = netdev_priv(ndev);
	spin_lock(&fep->hw_lock);
	bdp = fep->dirty_tx;

	while (((status = bdp->cbd_sc) & BD_ENET_TX_READY) == 0) {
		if (bdp == fep->cur_tx && fep->tx_full == 0)
			break;

		dma_unmap_single(&fep->pdev->dev, bdp->cbd_bufaddr,
				FEC_ENET_TX_FRSIZE, DMA_TO_DEVICE);
		bdp->cbd_bufaddr = 0;

		skb = fep->tx_skbuff[fep->skb_dirty];
		/* Check for errors. */
		if (status & (BD_ENET_TX_HB | BD_ENET_TX_LC |
				   BD_ENET_TX_RL | BD_ENET_TX_UN |
				   BD_ENET_TX_CSL)) {
			ndev->stats.tx_errors++;
			if (status & BD_ENET_TX_HB)  /* No heartbeat */
				ndev->stats.tx_heartbeat_errors++;
			if (status & BD_ENET_TX_LC)  /* Late collision */
				ndev->stats.tx_window_errors++;
			if (status & BD_ENET_TX_RL)  /* Retrans limit */
				ndev->stats.tx_aborted_errors++;
			if (status & BD_ENET_TX_UN)  /* Underrun */
				ndev->stats.tx_fifo_errors++;
			if (status & BD_ENET_TX_CSL) /* Carrier lost */
				ndev->stats.tx_carrier_errors++;
		} else {
			ndev->stats.tx_packets++;
		}

		if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS) &&
			fep->bufdesc_ex) {
			struct skb_shared_hwtstamps shhwtstamps;
			unsigned long flags;
			struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp;

			memset(&shhwtstamps, 0, sizeof(shhwtstamps));
			spin_lock_irqsave(&fep->tmreg_lock, flags);
			shhwtstamps.hwtstamp = ns_to_ktime(
				timecounter_cyc2time(&fep->tc, ebdp->ts));
			spin_unlock_irqrestore(&fep->tmreg_lock, flags);
			skb_tstamp_tx(skb, &shhwtstamps);
		}

		if (status & BD_ENET_TX_READY)
			printk("HEY! Enet xmit interrupt and TX_READY.\n");

		/* Deferred means some collisions occurred during transmit,
		 * but we eventually sent the packet OK.
		 */
		if (status & BD_ENET_TX_DEF)
			ndev->stats.collisions++;

		/* Free the sk buffer associated with this last transmit */
		dev_kfree_skb_any(skb);
		fep->tx_skbuff[fep->skb_dirty] = NULL;
		fep->skb_dirty = (fep->skb_dirty + 1) & TX_RING_MOD_MASK;

		/* Update pointer to next buffer descriptor to be transmitted */
		if (status & BD_ENET_TX_WRAP)
			bdp = fep->tx_bd_base;
		else
			bdp = fec_enet_get_nextdesc(bdp, fep->bufdesc_ex);

		/* Since we have freed up a buffer, the ring is no longer full
		 */
		if (fep->tx_full) {
			fep->tx_full = 0;
			if (netif_queue_stopped(ndev))
				netif_wake_queue(ndev);
		}
	}
	fep->dirty_tx = bdp;
	spin_unlock(&fep->hw_lock);
}
Esempio n. 17
0
/**
 * fec_ptp_enable_pps
 * @fep: the fec_enet_private structure handle
 * @enable: enable the channel pps output
 *
 * This function enble the PPS ouput on the timer channel.
 */
static int fec_ptp_enable_pps(struct fec_enet_private *fep, uint enable)
{
	unsigned long flags;
	u32 val, tempval;
	int inc;
	struct timespec ts;
	u64 ns;
	u32 remainder;
	val = 0;

	if (!(fep->hwts_tx_en || fep->hwts_rx_en)) {
		dev_err(&fep->pdev->dev, "No ptp stack is running\n");
		return -EINVAL;
	}

	if (fep->pps_enable == enable)
		return 0;

	fep->pps_channel = DEFAULT_PPS_CHANNEL;
	fep->reload_period = PPS_OUPUT_RELOAD_PERIOD;
	inc = fep->ptp_inc;

	spin_lock_irqsave(&fep->tmreg_lock, flags);

	if (enable) {
		/* clear capture or output compare interrupt status if have.
		 */
		writel(FEC_T_TF_MASK, fep->hwp + FEC_TCSR(fep->pps_channel));

		/* It is recommended to double check the TMODE field in the
		 * TCSR register to be cleared before the first compare counter
		 * is written into TCCR register. Just add a double check.
		 */
		val = readl(fep->hwp + FEC_TCSR(fep->pps_channel));
		do {
			val &= ~(FEC_T_TMODE_MASK);
			writel(val, fep->hwp + FEC_TCSR(fep->pps_channel));
			val = readl(fep->hwp + FEC_TCSR(fep->pps_channel));
		} while (val & FEC_T_TMODE_MASK);

		/* Dummy read counter to update the counter */
		timecounter_read(&fep->tc);
		/* We want to find the first compare event in the next
		 * second point. So we need to know what the ptp time
		 * is now and how many nanoseconds is ahead to get next second.
		 * The remaining nanosecond ahead before the next second would be
		 * NSEC_PER_SEC - ts.tv_nsec. Add the remaining nanoseconds
		 * to current timer would be next second.
		 */
		tempval = readl(fep->hwp + FEC_ATIME_CTRL);
		tempval |= FEC_T_CTRL_CAPTURE;
		writel(tempval, fep->hwp + FEC_ATIME_CTRL);

		tempval = readl(fep->hwp + FEC_ATIME);
		/* Convert the ptp local counter to 1588 timestamp */
		ns = timecounter_cyc2time(&fep->tc, tempval);
		ts.tv_sec = div_u64_rem(ns, 1000000000ULL, &remainder);
		ts.tv_nsec = remainder;

		/* The tempval is  less than 3 seconds, and  so val is less than
		 * 4 seconds. No overflow for 32bit calculation.
		 */
		val = NSEC_PER_SEC - (u32)ts.tv_nsec + tempval;

		/* Need to consider the situation that the current time is
		 * very close to the second point, which means NSEC_PER_SEC
		 * - ts.tv_nsec is close to be zero(For example 20ns); Since the timer
		 * is still running when we calculate the first compare event, it is
		 * possible that the remaining nanoseonds run out before the compare
		 * counter is calculated and written into TCCR register. To avoid
		 * this possibility, we will set the compare event to be the next
		 * of next second. The current setting is 31-bit timer and wrap
		 * around over 2 seconds. So it is okay to set the next of next
		 * seond for the timer.
		 */
		val += NSEC_PER_SEC;

		/* We add (2 * NSEC_PER_SEC - (u32)ts.tv_nsec) to current
		 * ptp counter, which maybe cause 32-bit wrap. Since the
		 * (NSEC_PER_SEC - (u32)ts.tv_nsec) is less than 2 second.
		 * We can ensure the wrap will not cause issue. If the offset
		 * is bigger than fep->cc.mask would be a error.
		 */
		val &= fep->cc.mask;
		writel(val, fep->hwp + FEC_TCCR(fep->pps_channel));

		/* Calculate the second the compare event timestamp */
		fep->next_counter = (val + fep->reload_period) & fep->cc.mask;

		/* * Enable compare event when overflow */
		val = readl(fep->hwp + FEC_ATIME_CTRL);
		val |= FEC_T_CTRL_PINPER;
		writel(val, fep->hwp + FEC_ATIME_CTRL);

		/* Compare channel setting. */
		val = readl(fep->hwp + FEC_TCSR(fep->pps_channel));
		val |= (1 << FEC_T_TF_OFFSET | 1 << FEC_T_TIE_OFFSET);
		val &= ~(1 << FEC_T_TDRE_OFFSET);
		val &= ~(FEC_T_TMODE_MASK);
		val |= (FEC_HIGH_PULSE << FEC_T_TMODE_OFFSET);
		writel(val, fep->hwp + FEC_TCSR(fep->pps_channel));

		/* Write the second compare event timestamp and calculate
		 * the third timestamp. Refer the TCCR register detail in the spec.
		 */
		writel(fep->next_counter, fep->hwp + FEC_TCCR(fep->pps_channel));
		fep->next_counter = (fep->next_counter + fep->reload_period) & fep->cc.mask;
	} else {
		writel(0, fep->hwp + FEC_TCSR(fep->pps_channel));
	}

	fep->pps_enable = enable;
	spin_unlock_irqrestore(&fep->tmreg_lock, flags);

	return 0;
}
Esempio n. 18
0
static int mv88e6xxx_txtstamp_work(struct mv88e6xxx_chip *chip,
				   struct mv88e6xxx_port_hwtstamp *ps)
{
	const struct mv88e6xxx_ptp_ops *ptp_ops = chip->info->ops->ptp_ops;
	struct skb_shared_hwtstamps shhwtstamps;
	u16 departure_block[4], status;
	struct sk_buff *tmp_skb;
	u32 time_raw;
	int err;
	u64 ns;

	if (!ps->tx_skb)
		return 0;

	mutex_lock(&chip->reg_lock);
	err = mv88e6xxx_port_ptp_read(chip, ps->port_id,
				      ptp_ops->dep_sts_reg,
				      departure_block,
				      ARRAY_SIZE(departure_block));
	mutex_unlock(&chip->reg_lock);

	if (err)
		goto free_and_clear_skb;

	if (!(departure_block[0] & MV88E6XXX_PTP_TS_VALID)) {
		if (time_is_before_jiffies(ps->tx_tstamp_start +
					   TX_TSTAMP_TIMEOUT)) {
			dev_warn(chip->dev, "p%d: clearing tx timestamp hang\n",
				 ps->port_id);
			goto free_and_clear_skb;
		}
		/* The timestamp should be available quickly, while getting it
		 * is high priority and time bounded to only 10ms. A poll is
		 * warranted so restart the work.
		 */
		return 1;
	}

	/* We have the timestamp; go ahead and clear valid now */
	mutex_lock(&chip->reg_lock);
	mv88e6xxx_port_ptp_write(chip, ps->port_id, ptp_ops->dep_sts_reg, 0);
	mutex_unlock(&chip->reg_lock);

	status = departure_block[0] & MV88E6XXX_PTP_TS_STATUS_MASK;
	if (status != MV88E6XXX_PTP_TS_STATUS_NORMAL) {
		dev_warn(chip->dev, "p%d: tx timestamp overrun\n", ps->port_id);
		goto free_and_clear_skb;
	}

	if (departure_block[3] != ps->tx_seq_id) {
		dev_warn(chip->dev, "p%d: unexpected seq. id\n", ps->port_id);
		goto free_and_clear_skb;
	}

	memset(&shhwtstamps, 0, sizeof(shhwtstamps));
	time_raw = ((u32)departure_block[2] << 16) | departure_block[1];
	mutex_lock(&chip->reg_lock);
	ns = timecounter_cyc2time(&chip->tstamp_tc, time_raw);
	mutex_unlock(&chip->reg_lock);
	shhwtstamps.hwtstamp = ns_to_ktime(ns);

	dev_dbg(chip->dev,
		"p%d: txtstamp %llx status 0x%04x skb ID 0x%04x hw ID 0x%04x\n",
		ps->port_id, ktime_to_ns(shhwtstamps.hwtstamp),
		departure_block[0], ps->tx_seq_id, departure_block[3]);

	/* skb_complete_tx_timestamp() will free up the client to make
	 * another timestamp-able transmit. We have to be ready for it
	 * -- by clearing the ps->tx_skb "flag" -- beforehand.
	 */

	tmp_skb = ps->tx_skb;
	ps->tx_skb = NULL;
	clear_bit_unlock(MV88E6XXX_HWTSTAMP_TX_IN_PROGRESS, &ps->state);
	skb_complete_tx_timestamp(tmp_skb, &shhwtstamps);

	return 0;

free_and_clear_skb:
	dev_kfree_skb_any(ps->tx_skb);
	ps->tx_skb = NULL;
	clear_bit_unlock(MV88E6XXX_HWTSTAMP_TX_IN_PROGRESS, &ps->state);

	return 0;
}