Пример #1
0
/**
 * ixgbe_ipsec_tx - setup Tx flags for ipsec offload
 * @tx_ring: outgoing context
 * @first: current data packet
 * @itd: ipsec Tx data for later use in building context descriptor
 **/
int ixgbe_ipsec_tx(struct ixgbe_ring *tx_ring,
		   struct ixgbe_tx_buffer *first,
		   struct ixgbe_ipsec_tx_data *itd)
{
	struct ixgbe_adapter *adapter = netdev_priv(tx_ring->netdev);
	struct ixgbe_ipsec *ipsec = adapter->ipsec;
	struct xfrm_state *xs;
	struct tx_sa *tsa;

	if (unlikely(!first->skb->sp->len)) {
		netdev_err(tx_ring->netdev, "%s: no xfrm state len = %d\n",
			   __func__, first->skb->sp->len);
		return 0;
	}

	xs = xfrm_input_state(first->skb);
	if (unlikely(!xs)) {
		netdev_err(tx_ring->netdev, "%s: no xfrm_input_state() xs = %p\n",
			   __func__, xs);
		return 0;
	}

	itd->sa_idx = xs->xso.offload_handle - IXGBE_IPSEC_BASE_TX_INDEX;
	if (unlikely(itd->sa_idx > IXGBE_IPSEC_MAX_SA_COUNT)) {
		netdev_err(tx_ring->netdev, "%s: bad sa_idx=%d handle=%lu\n",
			   __func__, itd->sa_idx, xs->xso.offload_handle);
		return 0;
	}

	tsa = &ipsec->tx_tbl[itd->sa_idx];
	if (unlikely(!tsa->used)) {
		netdev_err(tx_ring->netdev, "%s: unused sa_idx=%d\n",
			   __func__, itd->sa_idx);
		return 0;
	}

	first->tx_flags |= IXGBE_TX_FLAGS_IPSEC | IXGBE_TX_FLAGS_CC;

	if (xs->id.proto == IPPROTO_ESP) {

		itd->flags |= IXGBE_ADVTXD_TUCMD_IPSEC_TYPE_ESP |
			      IXGBE_ADVTXD_TUCMD_L4T_TCP;
		if (first->protocol == htons(ETH_P_IP))
			itd->flags |= IXGBE_ADVTXD_TUCMD_IPV4;

		/* The actual trailer length is authlen (16 bytes) plus
		 * 2 bytes for the proto and the padlen values, plus
		 * padlen bytes of padding.  This ends up not the same
		 * as the static value found in xs->props.trailer_len (21).
		 *
		 * ... but if we're doing GSO, don't bother as the stack
		 * doesn't add a trailer for those.
		 */
		if (!skb_is_gso(first->skb)) {
			/* The "correct" way to get the auth length would be
			 * to use
			 *    authlen = crypto_aead_authsize(xs->data);
			 * but since we know we only have one size to worry
			 * about * we can let the compiler use the constant
			 * and save us a few CPU cycles.
			 */
			const int authlen = IXGBE_IPSEC_AUTH_BITS / 8;
			struct sk_buff *skb = first->skb;
			u8 padlen;
			int ret;

			ret = skb_copy_bits(skb, skb->len - (authlen + 2),
					    &padlen, 1);
			if (unlikely(ret))
				return 0;
			itd->trailer_len = authlen + 2 + padlen;
		}
	}
	if (tsa->encrypt)
		itd->flags |= IXGBE_ADVTXD_TUCMD_IPSEC_ENCRYPT_EN;

	return 1;
}
Пример #2
0
static int ixgbe_set_ringparam(struct net_device *netdev,
                               struct ethtool_ringparam *ring)
{
	struct ixgbe_adapter *adapter = netdev_priv(netdev);
	struct ixgbe_ring *temp_tx_ring, *temp_rx_ring;
	int i, err;
	u32 new_rx_count, new_tx_count;
	bool need_update = false;

	if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
		return -EINVAL;

	new_rx_count = max(ring->rx_pending, (u32)IXGBE_MIN_RXD);
	new_rx_count = min(new_rx_count, (u32)IXGBE_MAX_RXD);
	new_rx_count = ALIGN(new_rx_count, IXGBE_REQ_RX_DESCRIPTOR_MULTIPLE);

	new_tx_count = max(ring->tx_pending, (u32)IXGBE_MIN_TXD);
	new_tx_count = min(new_tx_count, (u32)IXGBE_MAX_TXD);
	new_tx_count = ALIGN(new_tx_count, IXGBE_REQ_TX_DESCRIPTOR_MULTIPLE);

	if ((new_tx_count == adapter->tx_ring->count) &&
	    (new_rx_count == adapter->rx_ring->count)) {
		/* nothing to do */
		return 0;
	}

	while (test_and_set_bit(__IXGBE_RESETTING, &adapter->state))
		msleep(1);

	temp_tx_ring = kcalloc(adapter->num_tx_queues,
	                       sizeof(struct ixgbe_ring), GFP_KERNEL);
	if (!temp_tx_ring) {
		err = -ENOMEM;
		goto err_setup;
	}

	if (new_tx_count != adapter->tx_ring_count) {
		memcpy(temp_tx_ring, adapter->tx_ring,
		       adapter->num_tx_queues * sizeof(struct ixgbe_ring));
		for (i = 0; i < adapter->num_tx_queues; i++) {
			temp_tx_ring[i].count = new_tx_count;
			err = ixgbe_setup_tx_resources(adapter,
			                               &temp_tx_ring[i]);
			if (err) {
				while (i) {
					i--;
					ixgbe_free_tx_resources(adapter,
					                        &temp_tx_ring[i]);
				}
				goto err_setup;
			}
			temp_tx_ring[i].v_idx = adapter->tx_ring[i].v_idx;
		}
		need_update = true;
	}

	temp_rx_ring = kcalloc(adapter->num_rx_queues,
	                       sizeof(struct ixgbe_ring), GFP_KERNEL);
	if ((!temp_rx_ring) && (need_update)) {
		for (i = 0; i < adapter->num_tx_queues; i++)
			ixgbe_free_tx_resources(adapter, &temp_tx_ring[i]);
		kfree(temp_tx_ring);
		err = -ENOMEM;
		goto err_setup;
	}

	if (new_rx_count != adapter->rx_ring_count) {
		memcpy(temp_rx_ring, adapter->rx_ring,
		       adapter->num_rx_queues * sizeof(struct ixgbe_ring));
		for (i = 0; i < adapter->num_rx_queues; i++) {
			temp_rx_ring[i].count = new_rx_count;
			err = ixgbe_setup_rx_resources(adapter,
			                               &temp_rx_ring[i]);
			if (err) {
				while (i) {
					i--;
					ixgbe_free_rx_resources(adapter,
					                      &temp_rx_ring[i]);
				}
				goto err_setup;
			}
			temp_rx_ring[i].v_idx = adapter->rx_ring[i].v_idx;
		}
		need_update = true;
	}

	/* if rings need to be updated, here's the place to do it in one shot */
	if (need_update) {
		if (netif_running(netdev))
			ixgbe_down(adapter);

		/* tx */
		if (new_tx_count != adapter->tx_ring_count) {
			kfree(adapter->tx_ring);
			adapter->tx_ring = temp_tx_ring;
			temp_tx_ring = NULL;
			adapter->tx_ring_count = new_tx_count;
		}

		/* rx */
		if (new_rx_count != adapter->rx_ring_count) {
			kfree(adapter->rx_ring);
			adapter->rx_ring = temp_rx_ring;
			temp_rx_ring = NULL;
			adapter->rx_ring_count = new_rx_count;
		}
	}

	/* success! */
	err = 0;
	if (netif_running(netdev))
		ixgbe_up(adapter);

err_setup:
	clear_bit(__IXGBE_RESETTING, &adapter->state);
	return err;
}
Пример #3
0
static netdev_tx_t ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
{
	struct ip_tunnel *tunnel = netdev_priv(dev);
	struct net_device_stats *stats = &tunnel->dev->stats;
	struct iphdr  *old_iph = ip_hdr(skb);
	struct iphdr  *tiph;
	u8     tos;
	__be16 df;
	struct rtable *rt;     			/* Route to the other host */
	struct net_device *tdev;			/* Device to other host */
	struct iphdr  *iph;			/* Our new IP header */
	unsigned int max_headroom;		/* The extra header space needed */
	int    gre_hlen;
	__be32 dst;
	int    mtu;

	if (dev->type == ARPHRD_ETHER)
		IPCB(skb)->flags = 0;

	if (dev->header_ops && dev->type == ARPHRD_IPGRE) {
		gre_hlen = 0;
		tiph = (struct iphdr *)skb->data;
	} else {
		gre_hlen = tunnel->hlen;
		tiph = &tunnel->parms.iph;
	}

	if ((dst = tiph->daddr) == 0) {
		/* NBMA tunnel */

		if (skb_dst(skb) == NULL) {
			stats->tx_fifo_errors++;
			goto tx_error;
		}

		if (skb->protocol == htons(ETH_P_IP)) {
			rt = skb_rtable(skb);
			if ((dst = rt->rt_gateway) == 0)
				goto tx_error_icmp;
		}
#ifdef CONFIG_IPV6
		else if (skb->protocol == htons(ETH_P_IPV6)) {
			struct in6_addr *addr6;
			int addr_type;
			struct neighbour *neigh = skb_dst(skb)->neighbour;

			if (neigh == NULL)
				goto tx_error;

			addr6 = (struct in6_addr *)&neigh->primary_key;
			addr_type = ipv6_addr_type(addr6);

			if (addr_type == IPV6_ADDR_ANY) {
				addr6 = &ipv6_hdr(skb)->daddr;
				addr_type = ipv6_addr_type(addr6);
			}

			if ((addr_type & IPV6_ADDR_COMPATv4) == 0)
				goto tx_error_icmp;

			dst = addr6->s6_addr32[3];
		}
#endif
		else
			goto tx_error;
	}

	tos = tiph->tos;
	if (tos == 1) {
		tos = 0;
		if (skb->protocol == htons(ETH_P_IP))
			tos = old_iph->tos;
	}

	{
		struct flowi fl = { .oif = tunnel->parms.link,
				    .nl_u = { .ip4_u =
					      { .daddr = dst,
						.saddr = tiph->saddr,
						.tos = RT_TOS(tos) } },
				    .proto = IPPROTO_GRE };
		if (ip_route_output_key(dev_net(dev), &rt, &fl)) {
			stats->tx_carrier_errors++;
			goto tx_error;
		}
	}
Пример #4
0
static u32 ixgbe_get_msglevel(struct net_device *netdev)
{
	struct ixgbe_adapter *adapter = netdev_priv(netdev);
	return adapter->msg_enable;
}
Пример #5
0
static void ixgbe_get_regs(struct net_device *netdev,
                           struct ethtool_regs *regs, void *p)
{
	struct ixgbe_adapter *adapter = netdev_priv(netdev);
	struct ixgbe_hw *hw = &adapter->hw;
	u32 *regs_buff = p;
	u8 i;

	memset(p, 0, IXGBE_REGS_LEN * sizeof(u32));

	regs->version = (1 << 24) | hw->revision_id << 16 | hw->device_id;

	/* General Registers */
	regs_buff[0] = IXGBE_READ_REG(hw, IXGBE_CTRL);
	regs_buff[1] = IXGBE_READ_REG(hw, IXGBE_STATUS);
	regs_buff[2] = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT);
	regs_buff[3] = IXGBE_READ_REG(hw, IXGBE_ESDP);
	regs_buff[4] = IXGBE_READ_REG(hw, IXGBE_EODSDP);
	regs_buff[5] = IXGBE_READ_REG(hw, IXGBE_LEDCTL);
	regs_buff[6] = IXGBE_READ_REG(hw, IXGBE_FRTIMER);
	regs_buff[7] = IXGBE_READ_REG(hw, IXGBE_TCPTIMER);

	/* NVM Register */
	regs_buff[8] = IXGBE_READ_REG(hw, IXGBE_EEC);
	regs_buff[9] = IXGBE_READ_REG(hw, IXGBE_EERD);
	regs_buff[10] = IXGBE_READ_REG(hw, IXGBE_FLA);
	regs_buff[11] = IXGBE_READ_REG(hw, IXGBE_EEMNGCTL);
	regs_buff[12] = IXGBE_READ_REG(hw, IXGBE_EEMNGDATA);
	regs_buff[13] = IXGBE_READ_REG(hw, IXGBE_FLMNGCTL);
	regs_buff[14] = IXGBE_READ_REG(hw, IXGBE_FLMNGDATA);
	regs_buff[15] = IXGBE_READ_REG(hw, IXGBE_FLMNGCNT);
	regs_buff[16] = IXGBE_READ_REG(hw, IXGBE_FLOP);
	regs_buff[17] = IXGBE_READ_REG(hw, IXGBE_GRC);

	/* Interrupt */
	/* don't read EICR because it can clear interrupt causes, instead
	 * read EICS which is a shadow but doesn't clear EICR */
	regs_buff[18] = IXGBE_READ_REG(hw, IXGBE_EICS);
	regs_buff[19] = IXGBE_READ_REG(hw, IXGBE_EICS);
	regs_buff[20] = IXGBE_READ_REG(hw, IXGBE_EIMS);
	regs_buff[21] = IXGBE_READ_REG(hw, IXGBE_EIMC);
	regs_buff[22] = IXGBE_READ_REG(hw, IXGBE_EIAC);
	regs_buff[23] = IXGBE_READ_REG(hw, IXGBE_EIAM);
	regs_buff[24] = IXGBE_READ_REG(hw, IXGBE_EITR(0));
	regs_buff[25] = IXGBE_READ_REG(hw, IXGBE_IVAR(0));
	regs_buff[26] = IXGBE_READ_REG(hw, IXGBE_MSIXT);
	regs_buff[27] = IXGBE_READ_REG(hw, IXGBE_MSIXPBA);
	regs_buff[28] = IXGBE_READ_REG(hw, IXGBE_PBACL(0));
	regs_buff[29] = IXGBE_READ_REG(hw, IXGBE_GPIE);

	/* Flow Control */
	regs_buff[30] = IXGBE_READ_REG(hw, IXGBE_PFCTOP);
	regs_buff[31] = IXGBE_READ_REG(hw, IXGBE_FCTTV(0));
	regs_buff[32] = IXGBE_READ_REG(hw, IXGBE_FCTTV(1));
	regs_buff[33] = IXGBE_READ_REG(hw, IXGBE_FCTTV(2));
	regs_buff[34] = IXGBE_READ_REG(hw, IXGBE_FCTTV(3));
	for (i = 0; i < 8; i++)
		regs_buff[35 + i] = IXGBE_READ_REG(hw, IXGBE_FCRTL(i));
	for (i = 0; i < 8; i++)
		regs_buff[43 + i] = IXGBE_READ_REG(hw, IXGBE_FCRTH(i));
	regs_buff[51] = IXGBE_READ_REG(hw, IXGBE_FCRTV);
	regs_buff[52] = IXGBE_READ_REG(hw, IXGBE_TFCS);

	/* Receive DMA */
	for (i = 0; i < 64; i++)
		regs_buff[53 + i] = IXGBE_READ_REG(hw, IXGBE_RDBAL(i));
	for (i = 0; i < 64; i++)
		regs_buff[117 + i] = IXGBE_READ_REG(hw, IXGBE_RDBAH(i));
	for (i = 0; i < 64; i++)
		regs_buff[181 + i] = IXGBE_READ_REG(hw, IXGBE_RDLEN(i));
	for (i = 0; i < 64; i++)
		regs_buff[245 + i] = IXGBE_READ_REG(hw, IXGBE_RDH(i));
	for (i = 0; i < 64; i++)
		regs_buff[309 + i] = IXGBE_READ_REG(hw, IXGBE_RDT(i));
	for (i = 0; i < 64; i++)
		regs_buff[373 + i] = IXGBE_READ_REG(hw, IXGBE_RXDCTL(i));
	for (i = 0; i < 16; i++)
		regs_buff[437 + i] = IXGBE_READ_REG(hw, IXGBE_SRRCTL(i));
	for (i = 0; i < 16; i++)
		regs_buff[453 + i] = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i));
	regs_buff[469] = IXGBE_READ_REG(hw, IXGBE_RDRXCTL);
	for (i = 0; i < 8; i++)
		regs_buff[470 + i] = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(i));
	regs_buff[478] = IXGBE_READ_REG(hw, IXGBE_RXCTRL);
	regs_buff[479] = IXGBE_READ_REG(hw, IXGBE_DROPEN);

	/* Receive */
	regs_buff[480] = IXGBE_READ_REG(hw, IXGBE_RXCSUM);
	regs_buff[481] = IXGBE_READ_REG(hw, IXGBE_RFCTL);
	for (i = 0; i < 16; i++)
		regs_buff[482 + i] = IXGBE_READ_REG(hw, IXGBE_RAL(i));
	for (i = 0; i < 16; i++)
		regs_buff[498 + i] = IXGBE_READ_REG(hw, IXGBE_RAH(i));
	regs_buff[514] = IXGBE_READ_REG(hw, IXGBE_PSRTYPE(0));
	regs_buff[515] = IXGBE_READ_REG(hw, IXGBE_FCTRL);
	regs_buff[516] = IXGBE_READ_REG(hw, IXGBE_VLNCTRL);
	regs_buff[517] = IXGBE_READ_REG(hw, IXGBE_MCSTCTRL);
	regs_buff[518] = IXGBE_READ_REG(hw, IXGBE_MRQC);
	regs_buff[519] = IXGBE_READ_REG(hw, IXGBE_VMD_CTL);
	for (i = 0; i < 8; i++)
		regs_buff[520 + i] = IXGBE_READ_REG(hw, IXGBE_IMIR(i));
	for (i = 0; i < 8; i++)
		regs_buff[528 + i] = IXGBE_READ_REG(hw, IXGBE_IMIREXT(i));
	regs_buff[536] = IXGBE_READ_REG(hw, IXGBE_IMIRVP);

	/* Transmit */
	for (i = 0; i < 32; i++)
		regs_buff[537 + i] = IXGBE_READ_REG(hw, IXGBE_TDBAL(i));
	for (i = 0; i < 32; i++)
		regs_buff[569 + i] = IXGBE_READ_REG(hw, IXGBE_TDBAH(i));
	for (i = 0; i < 32; i++)
		regs_buff[601 + i] = IXGBE_READ_REG(hw, IXGBE_TDLEN(i));
	for (i = 0; i < 32; i++)
		regs_buff[633 + i] = IXGBE_READ_REG(hw, IXGBE_TDH(i));
	for (i = 0; i < 32; i++)
		regs_buff[665 + i] = IXGBE_READ_REG(hw, IXGBE_TDT(i));
	for (i = 0; i < 32; i++)
		regs_buff[697 + i] = IXGBE_READ_REG(hw, IXGBE_TXDCTL(i));
	for (i = 0; i < 32; i++)
		regs_buff[729 + i] = IXGBE_READ_REG(hw, IXGBE_TDWBAL(i));
	for (i = 0; i < 32; i++)
		regs_buff[761 + i] = IXGBE_READ_REG(hw, IXGBE_TDWBAH(i));
	regs_buff[793] = IXGBE_READ_REG(hw, IXGBE_DTXCTL);
	for (i = 0; i < 16; i++)
		regs_buff[794 + i] = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(i));
	regs_buff[810] = IXGBE_READ_REG(hw, IXGBE_TIPG);
	for (i = 0; i < 8; i++)
		regs_buff[811 + i] = IXGBE_READ_REG(hw, IXGBE_TXPBSIZE(i));
	regs_buff[819] = IXGBE_READ_REG(hw, IXGBE_MNGTXMAP);

	/* Wake Up */
	regs_buff[820] = IXGBE_READ_REG(hw, IXGBE_WUC);
	regs_buff[821] = IXGBE_READ_REG(hw, IXGBE_WUFC);
	regs_buff[822] = IXGBE_READ_REG(hw, IXGBE_WUS);
	regs_buff[823] = IXGBE_READ_REG(hw, IXGBE_IPAV);
	regs_buff[824] = IXGBE_READ_REG(hw, IXGBE_IP4AT);
	regs_buff[825] = IXGBE_READ_REG(hw, IXGBE_IP6AT);
	regs_buff[826] = IXGBE_READ_REG(hw, IXGBE_WUPL);
	regs_buff[827] = IXGBE_READ_REG(hw, IXGBE_WUPM);
	regs_buff[828] = IXGBE_READ_REG(hw, IXGBE_FHFT(0));

	regs_buff[829] = IXGBE_READ_REG(hw, IXGBE_RMCS);
	regs_buff[830] = IXGBE_READ_REG(hw, IXGBE_DPMCS);
	regs_buff[831] = IXGBE_READ_REG(hw, IXGBE_PDPMCS);
	regs_buff[832] = IXGBE_READ_REG(hw, IXGBE_RUPPBMR);
	for (i = 0; i < 8; i++)
		regs_buff[833 + i] = IXGBE_READ_REG(hw, IXGBE_RT2CR(i));
	for (i = 0; i < 8; i++)
		regs_buff[841 + i] = IXGBE_READ_REG(hw, IXGBE_RT2SR(i));
	for (i = 0; i < 8; i++)
		regs_buff[849 + i] = IXGBE_READ_REG(hw, IXGBE_TDTQ2TCCR(i));
	for (i = 0; i < 8; i++)
		regs_buff[857 + i] = IXGBE_READ_REG(hw, IXGBE_TDTQ2TCSR(i));
	for (i = 0; i < 8; i++)
		regs_buff[865 + i] = IXGBE_READ_REG(hw, IXGBE_TDPT2TCCR(i));
	for (i = 0; i < 8; i++)
		regs_buff[873 + i] = IXGBE_READ_REG(hw, IXGBE_TDPT2TCSR(i));

	/* Statistics */
	regs_buff[881] = IXGBE_GET_STAT(adapter, crcerrs);
	regs_buff[882] = IXGBE_GET_STAT(adapter, illerrc);
	regs_buff[883] = IXGBE_GET_STAT(adapter, errbc);
	regs_buff[884] = IXGBE_GET_STAT(adapter, mspdc);
	for (i = 0; i < 8; i++)
		regs_buff[885 + i] = IXGBE_GET_STAT(adapter, mpc[i]);
	regs_buff[893] = IXGBE_GET_STAT(adapter, mlfc);
	regs_buff[894] = IXGBE_GET_STAT(adapter, mrfc);
	regs_buff[895] = IXGBE_GET_STAT(adapter, rlec);
	regs_buff[896] = IXGBE_GET_STAT(adapter, lxontxc);
	regs_buff[897] = IXGBE_GET_STAT(adapter, lxonrxc);
	regs_buff[898] = IXGBE_GET_STAT(adapter, lxofftxc);
	regs_buff[899] = IXGBE_GET_STAT(adapter, lxoffrxc);
	for (i = 0; i < 8; i++)
		regs_buff[900 + i] = IXGBE_GET_STAT(adapter, pxontxc[i]);
	for (i = 0; i < 8; i++)
		regs_buff[908 + i] = IXGBE_GET_STAT(adapter, pxonrxc[i]);
	for (i = 0; i < 8; i++)
		regs_buff[916 + i] = IXGBE_GET_STAT(adapter, pxofftxc[i]);
	for (i = 0; i < 8; i++)
		regs_buff[924 + i] = IXGBE_GET_STAT(adapter, pxoffrxc[i]);
	regs_buff[932] = IXGBE_GET_STAT(adapter, prc64);
	regs_buff[933] = IXGBE_GET_STAT(adapter, prc127);
	regs_buff[934] = IXGBE_GET_STAT(adapter, prc255);
	regs_buff[935] = IXGBE_GET_STAT(adapter, prc511);
	regs_buff[936] = IXGBE_GET_STAT(adapter, prc1023);
	regs_buff[937] = IXGBE_GET_STAT(adapter, prc1522);
	regs_buff[938] = IXGBE_GET_STAT(adapter, gprc);
	regs_buff[939] = IXGBE_GET_STAT(adapter, bprc);
	regs_buff[940] = IXGBE_GET_STAT(adapter, mprc);
	regs_buff[941] = IXGBE_GET_STAT(adapter, gptc);
	regs_buff[942] = IXGBE_GET_STAT(adapter, gorc);
	regs_buff[944] = IXGBE_GET_STAT(adapter, gotc);
	for (i = 0; i < 8; i++)
		regs_buff[946 + i] = IXGBE_GET_STAT(adapter, rnbc[i]);
	regs_buff[954] = IXGBE_GET_STAT(adapter, ruc);
	regs_buff[955] = IXGBE_GET_STAT(adapter, rfc);
	regs_buff[956] = IXGBE_GET_STAT(adapter, roc);
	regs_buff[957] = IXGBE_GET_STAT(adapter, rjc);
	regs_buff[958] = IXGBE_GET_STAT(adapter, mngprc);
	regs_buff[959] = IXGBE_GET_STAT(adapter, mngpdc);
	regs_buff[960] = IXGBE_GET_STAT(adapter, mngptc);
	regs_buff[961] = IXGBE_GET_STAT(adapter, tor);
	regs_buff[963] = IXGBE_GET_STAT(adapter, tpr);
	regs_buff[964] = IXGBE_GET_STAT(adapter, tpt);
	regs_buff[965] = IXGBE_GET_STAT(adapter, ptc64);
	regs_buff[966] = IXGBE_GET_STAT(adapter, ptc127);
	regs_buff[967] = IXGBE_GET_STAT(adapter, ptc255);
	regs_buff[968] = IXGBE_GET_STAT(adapter, ptc511);
	regs_buff[969] = IXGBE_GET_STAT(adapter, ptc1023);
	regs_buff[970] = IXGBE_GET_STAT(adapter, ptc1522);
	regs_buff[971] = IXGBE_GET_STAT(adapter, mptc);
	regs_buff[972] = IXGBE_GET_STAT(adapter, bptc);
	regs_buff[973] = IXGBE_GET_STAT(adapter, xec);
	for (i = 0; i < 16; i++)
		regs_buff[974 + i] = IXGBE_GET_STAT(adapter, qprc[i]);
	for (i = 0; i < 16; i++)
		regs_buff[990 + i] = IXGBE_GET_STAT(adapter, qptc[i]);
	for (i = 0; i < 16; i++)
		regs_buff[1006 + i] = IXGBE_GET_STAT(adapter, qbrc[i]);
	for (i = 0; i < 16; i++)
		regs_buff[1022 + i] = IXGBE_GET_STAT(adapter, qbtc[i]);

	/* MAC */
	regs_buff[1038] = IXGBE_READ_REG(hw, IXGBE_PCS1GCFIG);
	regs_buff[1039] = IXGBE_READ_REG(hw, IXGBE_PCS1GLCTL);
	regs_buff[1040] = IXGBE_READ_REG(hw, IXGBE_PCS1GLSTA);
	regs_buff[1041] = IXGBE_READ_REG(hw, IXGBE_PCS1GDBG0);
	regs_buff[1042] = IXGBE_READ_REG(hw, IXGBE_PCS1GDBG1);
	regs_buff[1043] = IXGBE_READ_REG(hw, IXGBE_PCS1GANA);
	regs_buff[1044] = IXGBE_READ_REG(hw, IXGBE_PCS1GANLP);
	regs_buff[1045] = IXGBE_READ_REG(hw, IXGBE_PCS1GANNP);
	regs_buff[1046] = IXGBE_READ_REG(hw, IXGBE_PCS1GANLPNP);
	regs_buff[1047] = IXGBE_READ_REG(hw, IXGBE_HLREG0);
	regs_buff[1048] = IXGBE_READ_REG(hw, IXGBE_HLREG1);
	regs_buff[1049] = IXGBE_READ_REG(hw, IXGBE_PAP);
	regs_buff[1050] = IXGBE_READ_REG(hw, IXGBE_MACA);
	regs_buff[1051] = IXGBE_READ_REG(hw, IXGBE_APAE);
	regs_buff[1052] = IXGBE_READ_REG(hw, IXGBE_ARD);
	regs_buff[1053] = IXGBE_READ_REG(hw, IXGBE_AIS);
	regs_buff[1054] = IXGBE_READ_REG(hw, IXGBE_MSCA);
	regs_buff[1055] = IXGBE_READ_REG(hw, IXGBE_MSRWD);
	regs_buff[1056] = IXGBE_READ_REG(hw, IXGBE_MLADD);
	regs_buff[1057] = IXGBE_READ_REG(hw, IXGBE_MHADD);
	regs_buff[1058] = IXGBE_READ_REG(hw, IXGBE_TREG);
	regs_buff[1059] = IXGBE_READ_REG(hw, IXGBE_PCSS1);
	regs_buff[1060] = IXGBE_READ_REG(hw, IXGBE_PCSS2);
	regs_buff[1061] = IXGBE_READ_REG(hw, IXGBE_XPCSS);
	regs_buff[1062] = IXGBE_READ_REG(hw, IXGBE_SERDESC);
	regs_buff[1063] = IXGBE_READ_REG(hw, IXGBE_MACS);
	regs_buff[1064] = IXGBE_READ_REG(hw, IXGBE_AUTOC);
	regs_buff[1065] = IXGBE_READ_REG(hw, IXGBE_LINKS);
	regs_buff[1066] = IXGBE_READ_REG(hw, IXGBE_AUTOC2);
	regs_buff[1067] = IXGBE_READ_REG(hw, IXGBE_AUTOC3);
	regs_buff[1068] = IXGBE_READ_REG(hw, IXGBE_ANLP1);
	regs_buff[1069] = IXGBE_READ_REG(hw, IXGBE_ANLP2);
	regs_buff[1070] = IXGBE_READ_REG(hw, IXGBE_ATLASCTL);

	/* Diagnostic */
	regs_buff[1071] = IXGBE_READ_REG(hw, IXGBE_RDSTATCTL);
	for (i = 0; i < 8; i++)
		regs_buff[1072 + i] = IXGBE_READ_REG(hw, IXGBE_RDSTAT(i));
	regs_buff[1080] = IXGBE_READ_REG(hw, IXGBE_RDHMPN);
	for (i = 0; i < 4; i++)
		regs_buff[1081 + i] = IXGBE_READ_REG(hw, IXGBE_RIC_DW(i));
	regs_buff[1085] = IXGBE_READ_REG(hw, IXGBE_RDPROBE);
	regs_buff[1086] = IXGBE_READ_REG(hw, IXGBE_TDSTATCTL);
	for (i = 0; i < 8; i++)
		regs_buff[1087 + i] = IXGBE_READ_REG(hw, IXGBE_TDSTAT(i));
	regs_buff[1095] = IXGBE_READ_REG(hw, IXGBE_TDHMPN);
	for (i = 0; i < 4; i++)
		regs_buff[1096 + i] = IXGBE_READ_REG(hw, IXGBE_TIC_DW(i));
	regs_buff[1100] = IXGBE_READ_REG(hw, IXGBE_TDPROBE);
	regs_buff[1101] = IXGBE_READ_REG(hw, IXGBE_TXBUFCTRL);
	regs_buff[1102] = IXGBE_READ_REG(hw, IXGBE_TXBUFDATA0);
	regs_buff[1103] = IXGBE_READ_REG(hw, IXGBE_TXBUFDATA1);
	regs_buff[1104] = IXGBE_READ_REG(hw, IXGBE_TXBUFDATA2);
	regs_buff[1105] = IXGBE_READ_REG(hw, IXGBE_TXBUFDATA3);
	regs_buff[1106] = IXGBE_READ_REG(hw, IXGBE_RXBUFCTRL);
	regs_buff[1107] = IXGBE_READ_REG(hw, IXGBE_RXBUFDATA0);
	regs_buff[1108] = IXGBE_READ_REG(hw, IXGBE_RXBUFDATA1);
	regs_buff[1109] = IXGBE_READ_REG(hw, IXGBE_RXBUFDATA2);
	regs_buff[1110] = IXGBE_READ_REG(hw, IXGBE_RXBUFDATA3);
	for (i = 0; i < 8; i++)
		regs_buff[1111 + i] = IXGBE_READ_REG(hw, IXGBE_PCIE_DIAG(i));
	regs_buff[1119] = IXGBE_READ_REG(hw, IXGBE_RFVAL);
	regs_buff[1120] = IXGBE_READ_REG(hw, IXGBE_MDFTC1);
	regs_buff[1121] = IXGBE_READ_REG(hw, IXGBE_MDFTC2);
	regs_buff[1122] = IXGBE_READ_REG(hw, IXGBE_MDFTFIFO1);
	regs_buff[1123] = IXGBE_READ_REG(hw, IXGBE_MDFTFIFO2);
	regs_buff[1124] = IXGBE_READ_REG(hw, IXGBE_MDFTS);
	regs_buff[1125] = IXGBE_READ_REG(hw, IXGBE_PCIEECCCTL);
	regs_buff[1126] = IXGBE_READ_REG(hw, IXGBE_PBTXECC);
	regs_buff[1127] = IXGBE_READ_REG(hw, IXGBE_PBRXECC);
}
Пример #6
0
int ipoib_init_qp(struct net_device *dev)
{
	struct ipoib_dev_priv *priv = netdev_priv(dev);
	int ret;
	u16 pkey_index;
	struct ib_qp_attr qp_attr;
	int attr_mask;

	/*
	 * Search through the port P_Key table for the requested pkey value.
	 * The port has to be assigned to the respective IB partition in
	 * advance.
	 */
	ret = ib_find_cached_pkey(priv->ca, priv->port, priv->pkey, &pkey_index);
	if (ret) {
		clear_bit(IPOIB_PKEY_ASSIGNED, &priv->flags);
		return ret;
	}
	set_bit(IPOIB_PKEY_ASSIGNED, &priv->flags);

	qp_attr.qp_state = IB_QPS_INIT;
	qp_attr.qkey = 0;
	qp_attr.port_num = priv->port;
	qp_attr.pkey_index = pkey_index;
	attr_mask =
	    IB_QP_QKEY |
	    IB_QP_PORT |
	    IB_QP_PKEY_INDEX |
	    IB_QP_STATE;
	ret = ib_modify_qp(priv->qp, &qp_attr, attr_mask);
	if (ret) {
		ipoib_warn(priv, "failed to modify QP to init, ret = %d\n", ret);
		goto out_fail;
	}

	qp_attr.qp_state = IB_QPS_RTR;
	/* Can't set this in a INIT->RTR transition */
	attr_mask &= ~IB_QP_PORT;
	ret = ib_modify_qp(priv->qp, &qp_attr, attr_mask);
	if (ret) {
		ipoib_warn(priv, "failed to modify QP to RTR, ret = %d\n", ret);
		goto out_fail;
	}

	qp_attr.qp_state = IB_QPS_RTS;
	qp_attr.sq_psn = 0;
	attr_mask |= IB_QP_SQ_PSN;
	attr_mask &= ~IB_QP_PKEY_INDEX;
	ret = ib_modify_qp(priv->qp, &qp_attr, attr_mask);
	if (ret) {
		ipoib_warn(priv, "failed to modify QP to RTS, ret = %d\n", ret);
		goto out_fail;
	}

	return 0;

out_fail:
	qp_attr.qp_state = IB_QPS_RESET;
	if (ib_modify_qp(priv->qp, &qp_attr, IB_QP_STATE))
		ipoib_warn(priv, "Failed to modify QP to RESET state\n");

	return ret;
}
Пример #7
0
static int ixgbe_get_settings(struct net_device *netdev,
                              struct ethtool_cmd *ecmd)
{
	struct ixgbe_adapter *adapter = netdev_priv(netdev);
	struct ixgbe_hw *hw = &adapter->hw;
	u32 link_speed = 0;
	bool link_up;

	ecmd->supported = SUPPORTED_10000baseT_Full;
	ecmd->autoneg = AUTONEG_ENABLE;
	ecmd->transceiver = XCVR_EXTERNAL;
	if (hw->phy.media_type == ixgbe_media_type_copper) {
		ecmd->supported |= (SUPPORTED_1000baseT_Full |
		                    SUPPORTED_TP | SUPPORTED_Autoneg);

		ecmd->advertising = (ADVERTISED_TP | ADVERTISED_Autoneg);
		if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_10GB_FULL)
			ecmd->advertising |= ADVERTISED_10000baseT_Full;
		if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_1GB_FULL)
			ecmd->advertising |= ADVERTISED_1000baseT_Full;
		/*
		 * It's possible that phy.autoneg_advertised may not be
		 * set yet.  If so display what the default would be -
		 * both 1G and 10G supported.
		 */
		if (!(ecmd->advertising & (ADVERTISED_1000baseT_Full |
					   ADVERTISED_10000baseT_Full)))
			ecmd->advertising |= (ADVERTISED_10000baseT_Full |
					      ADVERTISED_1000baseT_Full);

		ecmd->port = PORT_TP;
	} else if (hw->phy.media_type == ixgbe_media_type_backplane) {
		/* Set as FIBRE until SERDES defined in kernel */
		switch (hw->device_id) {
		case IXGBE_DEV_ID_82598:
			ecmd->supported |= (SUPPORTED_1000baseT_Full |
				SUPPORTED_FIBRE);
			ecmd->advertising = (ADVERTISED_10000baseT_Full |
				ADVERTISED_1000baseT_Full |
				ADVERTISED_FIBRE);
			ecmd->port = PORT_FIBRE;
			break;
		case IXGBE_DEV_ID_82598_BX:
			ecmd->supported = (SUPPORTED_1000baseT_Full |
					   SUPPORTED_FIBRE);
			ecmd->advertising = (ADVERTISED_1000baseT_Full |
					     ADVERTISED_FIBRE);
			ecmd->port = PORT_FIBRE;
			ecmd->autoneg = AUTONEG_DISABLE;
			break;
		}
	} else {
		ecmd->supported |= SUPPORTED_FIBRE;
		ecmd->advertising = (ADVERTISED_10000baseT_Full |
		                     ADVERTISED_FIBRE);
		ecmd->port = PORT_FIBRE;
		ecmd->autoneg = AUTONEG_DISABLE;
	}

	hw->mac.ops.check_link(hw, &link_speed, &link_up, false);
	if (link_up) {
		ecmd->speed = (link_speed == IXGBE_LINK_SPEED_10GB_FULL) ?
		               SPEED_10000 : SPEED_1000;
		ecmd->duplex = DUPLEX_FULL;
	} else {
		ecmd->speed = -1;
		ecmd->duplex = -1;
	}

	return 0;
}
Пример #8
0
static void vxge_ethtool_get_strings(struct net_device *dev,
			      u32 stringset, u8 *data)
{
	int stat_size = 0;
	int i, j;
	struct vxgedev *vdev = (struct vxgedev *)netdev_priv(dev);
	switch (stringset) {
	case ETH_SS_STATS:
		vxge_add_string("VPATH STATISTICS%s\t\t\t",
			&stat_size, data, "");
		for (i = 0; i < vdev->no_of_vpath; i++) {
			vxge_add_string("tx_ttl_eth_frms_%d\t\t\t",
					&stat_size, data, i);
			vxge_add_string("tx_ttl_eth_octects_%d\t\t",
					&stat_size, data, i);
			vxge_add_string("tx_data_octects_%d\t\t\t",
					&stat_size, data, i);
			vxge_add_string("tx_mcast_frms_%d\t\t\t",
					&stat_size, data, i);
			vxge_add_string("tx_bcast_frms_%d\t\t\t",
					&stat_size, data, i);
			vxge_add_string("tx_ucast_frms_%d\t\t\t",
					&stat_size, data, i);
			vxge_add_string("tx_tagged_frms_%d\t\t\t",
					&stat_size, data, i);
			vxge_add_string("tx_vld_ip_%d\t\t\t",
					&stat_size, data, i);
			vxge_add_string("tx_vld_ip_octects_%d\t\t",
					&stat_size, data, i);
			vxge_add_string("tx_icmp_%d\t\t\t\t",
					&stat_size, data, i);
			vxge_add_string("tx_tcp_%d\t\t\t\t",
					&stat_size, data, i);
			vxge_add_string("tx_rst_tcp_%d\t\t\t",
					&stat_size, data, i);
			vxge_add_string("tx_udp_%d\t\t\t\t",
					&stat_size, data, i);
			vxge_add_string("tx_unknown_proto_%d\t\t\t",
					&stat_size, data, i);
			vxge_add_string("tx_lost_ip_%d\t\t\t",
					&stat_size, data, i);
			vxge_add_string("tx_parse_error_%d\t\t\t",
					&stat_size, data, i);
			vxge_add_string("tx_tcp_offload_%d\t\t\t",
					&stat_size, data, i);
			vxge_add_string("tx_retx_tcp_offload_%d\t\t",
					&stat_size, data, i);
			vxge_add_string("tx_lost_ip_offload_%d\t\t",
					&stat_size, data, i);
			vxge_add_string("rx_ttl_eth_frms_%d\t\t\t",
					&stat_size, data, i);
			vxge_add_string("rx_vld_frms_%d\t\t\t",
					&stat_size, data, i);
			vxge_add_string("rx_offload_frms_%d\t\t\t",
					&stat_size, data, i);
			vxge_add_string("rx_ttl_eth_octects_%d\t\t",
					&stat_size, data, i);
			vxge_add_string("rx_data_octects_%d\t\t\t",
					&stat_size, data, i);
			vxge_add_string("rx_offload_octects_%d\t\t",
					&stat_size, data, i);
			vxge_add_string("rx_vld_mcast_frms_%d\t\t",
					&stat_size, data, i);
			vxge_add_string("rx_vld_bcast_frms_%d\t\t",
					&stat_size, data, i);
			vxge_add_string("rx_accepted_ucast_frms_%d\t\t",
					&stat_size, data, i);
			vxge_add_string("rx_accepted_nucast_frms_%d\t\t",
					&stat_size, data, i);
			vxge_add_string("rx_tagged_frms_%d\t\t\t",
					&stat_size, data, i);
			vxge_add_string("rx_long_frms_%d\t\t\t",
					&stat_size, data, i);
			vxge_add_string("rx_usized_frms_%d\t\t\t",
					&stat_size, data, i);
			vxge_add_string("rx_osized_frms_%d\t\t\t",
					&stat_size, data, i);
			vxge_add_string("rx_frag_frms_%d\t\t\t",
					&stat_size, data, i);
			vxge_add_string("rx_jabber_frms_%d\t\t\t",
					&stat_size, data, i);
			vxge_add_string("rx_ttl_64_frms_%d\t\t\t",
					&stat_size, data, i);
			vxge_add_string("rx_ttl_65_127_frms_%d\t\t",
					&stat_size, data, i);
			vxge_add_string("rx_ttl_128_255_frms_%d\t\t",
					&stat_size, data, i);
			vxge_add_string("rx_ttl_256_511_frms_%d\t\t",
					&stat_size, data, i);
			vxge_add_string("rx_ttl_512_1023_frms_%d\t\t",
					&stat_size, data, i);
			vxge_add_string("rx_ttl_1024_1518_frms_%d\t\t",
					&stat_size, data, i);
			vxge_add_string("rx_ttl_1519_4095_frms_%d\t\t",
					&stat_size, data, i);
			vxge_add_string("rx_ttl_4096_8191_frms_%d\t\t",
					&stat_size, data, i);
			vxge_add_string("rx_ttl_8192_max_frms_%d\t\t",
					&stat_size, data, i);
			vxge_add_string("rx_ttl_gt_max_frms_%d\t\t",
					&stat_size, data, i);
			vxge_add_string("rx_ip%d\t\t\t\t",
					&stat_size, data, i);
			vxge_add_string("rx_accepted_ip_%d\t\t\t",
					&stat_size, data, i);
			vxge_add_string("rx_ip_octects_%d\t\t\t",
					&stat_size, data, i);
			vxge_add_string("rx_err_ip_%d\t\t\t",
					&stat_size, data, i);
			vxge_add_string("rx_icmp_%d\t\t\t\t",
					&stat_size, data, i);
			vxge_add_string("rx_tcp_%d\t\t\t\t",
					&stat_size, data, i);
			vxge_add_string("rx_udp_%d\t\t\t\t",
					&stat_size, data, i);
			vxge_add_string("rx_err_tcp_%d\t\t\t",
					&stat_size, data, i);
			vxge_add_string("rx_lost_frms_%d\t\t\t",
					&stat_size, data, i);
			vxge_add_string("rx_lost_ip_%d\t\t\t",
					&stat_size, data, i);
			vxge_add_string("rx_lost_ip_offload_%d\t\t",
					&stat_size, data, i);
			vxge_add_string("rx_various_discard_%d\t\t",
					&stat_size, data, i);
			vxge_add_string("rx_sleep_discard_%d\t\t\t",
					&stat_size, data, i);
			vxge_add_string("rx_red_discard_%d\t\t\t",
					&stat_size, data, i);
			vxge_add_string("rx_queue_full_discard_%d\t\t",
					&stat_size, data, i);
			vxge_add_string("rx_mpa_ok_frms_%d\t\t\t",
					&stat_size, data, i);
		}

		vxge_add_string("\nAGGR STATISTICS%s\t\t\t\t",
			&stat_size, data, "");
		for (i = 0; i < vdev->max_config_port; i++) {
			vxge_add_string("tx_frms_%d\t\t\t\t",
				&stat_size, data, i);
			vxge_add_string("tx_data_octects_%d\t\t\t",
				&stat_size, data, i);
			vxge_add_string("tx_mcast_frms_%d\t\t\t",
				&stat_size, data, i);
			vxge_add_string("tx_bcast_frms_%d\t\t\t",
				&stat_size, data, i);
			vxge_add_string("tx_discarded_frms_%d\t\t",
				&stat_size, data, i);
			vxge_add_string("tx_errored_frms_%d\t\t\t",
				&stat_size, data, i);
			vxge_add_string("rx_frms_%d\t\t\t\t",
				&stat_size, data, i);
			vxge_add_string("rx_data_octects_%d\t\t\t",
				&stat_size, data, i);
			vxge_add_string("rx_mcast_frms_%d\t\t\t",
				&stat_size, data, i);
			vxge_add_string("rx_bcast_frms_%d\t\t\t",
				&stat_size, data, i);
			vxge_add_string("rx_discarded_frms_%d\t\t",
				&stat_size, data, i);
			vxge_add_string("rx_errored_frms_%d\t\t\t",
				&stat_size, data, i);
			vxge_add_string("rx_unknown_slow_proto_frms_%d\t",
				&stat_size, data, i);
		}

		vxge_add_string("\nPORT STATISTICS%s\t\t\t\t",
			&stat_size, data, "");
		for (i = 0; i < vdev->max_config_port; i++) {
			vxge_add_string("tx_ttl_frms_%d\t\t\t",
				&stat_size, data, i);
			vxge_add_string("tx_ttl_octects_%d\t\t\t",
				&stat_size, data, i);
			vxge_add_string("tx_data_octects_%d\t\t\t",
				&stat_size, data, i);
			vxge_add_string("tx_mcast_frms_%d\t\t\t",
				&stat_size, data, i);
			vxge_add_string("tx_bcast_frms_%d\t\t\t",
				&stat_size, data, i);
			vxge_add_string("tx_ucast_frms_%d\t\t\t",
				&stat_size, data, i);
			vxge_add_string("tx_tagged_frms_%d\t\t\t",
				&stat_size, data, i);
			vxge_add_string("tx_vld_ip_%d\t\t\t",
				&stat_size, data, i);
			vxge_add_string("tx_vld_ip_octects_%d\t\t",
				&stat_size, data, i);
			vxge_add_string("tx_icmp_%d\t\t\t\t",
				&stat_size, data, i);
			vxge_add_string("tx_tcp_%d\t\t\t\t",
				&stat_size, data, i);
			vxge_add_string("tx_rst_tcp_%d\t\t\t",
				&stat_size, data, i);
			vxge_add_string("tx_udp_%d\t\t\t\t",
				&stat_size, data, i);
			vxge_add_string("tx_parse_error_%d\t\t\t",
				&stat_size, data, i);
			vxge_add_string("tx_unknown_protocol_%d\t\t",
				&stat_size, data, i);
			vxge_add_string("tx_pause_ctrl_frms_%d\t\t",
				&stat_size, data, i);
			vxge_add_string("tx_marker_pdu_frms_%d\t\t",
				&stat_size, data, i);
			vxge_add_string("tx_lacpdu_frms_%d\t\t\t",
				&stat_size, data, i);
			vxge_add_string("tx_drop_ip_%d\t\t\t",
				&stat_size, data, i);
			vxge_add_string("tx_marker_resp_pdu_frms_%d\t\t",
				&stat_size, data, i);
			vxge_add_string("tx_xgmii_char2_match_%d\t\t",
				&stat_size, data, i);
			vxge_add_string("tx_xgmii_char1_match_%d\t\t",
				&stat_size, data, i);
			vxge_add_string("tx_xgmii_column2_match_%d\t\t",
				&stat_size, data, i);
			vxge_add_string("tx_xgmii_column1_match_%d\t\t",
				&stat_size, data, i);
			vxge_add_string("tx_any_err_frms_%d\t\t\t",
				&stat_size, data, i);
			vxge_add_string("tx_drop_frms_%d\t\t\t",
				&stat_size, data, i);
			vxge_add_string("rx_ttl_frms_%d\t\t\t",
				&stat_size, data, i);
			vxge_add_string("rx_vld_frms_%d\t\t\t",
				&stat_size, data, i);
			vxge_add_string("rx_offload_frms_%d\t\t\t",
				&stat_size, data, i);
			vxge_add_string("rx_ttl_octects_%d\t\t\t",
				&stat_size, data, i);
			vxge_add_string("rx_data_octects_%d\t\t\t",
				&stat_size, data, i);
			vxge_add_string("rx_offload_octects_%d\t\t",
				&stat_size, data, i);
			vxge_add_string("rx_vld_mcast_frms_%d\t\t",
				&stat_size, data, i);
			vxge_add_string("rx_vld_bcast_frms_%d\t\t",
				&stat_size, data, i);
			vxge_add_string("rx_accepted_ucast_frms_%d\t\t",
				&stat_size, data, i);
			vxge_add_string("rx_accepted_nucast_frms_%d\t\t",
				&stat_size, data, i);
			vxge_add_string("rx_tagged_frms_%d\t\t\t",
				&stat_size, data, i);
			vxge_add_string("rx_long_frms_%d\t\t\t",
				&stat_size, data, i);
			vxge_add_string("rx_usized_frms_%d\t\t\t",
				&stat_size, data, i);
			vxge_add_string("rx_osized_frms_%d\t\t\t",
				&stat_size, data, i);
			vxge_add_string("rx_frag_frms_%d\t\t\t",
				&stat_size, data, i);
			vxge_add_string("rx_jabber_frms_%d\t\t\t",
				&stat_size, data, i);
			vxge_add_string("rx_ttl_64_frms_%d\t\t\t",
				&stat_size, data, i);
			vxge_add_string("rx_ttl_65_127_frms_%d\t\t",
				&stat_size, data, i);
			vxge_add_string("rx_ttl_128_255_frms_%d\t\t",
				&stat_size, data, i);
			vxge_add_string("rx_ttl_256_511_frms_%d\t\t",
				&stat_size, data, i);
			vxge_add_string("rx_ttl_512_1023_frms_%d\t\t",
				&stat_size, data, i);
			vxge_add_string("rx_ttl_1024_1518_frms_%d\t\t",
				&stat_size, data, i);
			vxge_add_string("rx_ttl_1519_4095_frms_%d\t\t",
				&stat_size, data, i);
			vxge_add_string("rx_ttl_4096_8191_frms_%d\t\t",
				&stat_size, data, i);
			vxge_add_string("rx_ttl_8192_max_frms_%d\t\t",
				&stat_size, data, i);
			vxge_add_string("rx_ttl_gt_max_frms_%d\t\t",
				&stat_size, data, i);
			vxge_add_string("rx_ip_%d\t\t\t\t",
				&stat_size, data, i);
			vxge_add_string("rx_accepted_ip_%d\t\t\t",
				&stat_size, data, i);
			vxge_add_string("rx_ip_octets_%d\t\t\t",
				&stat_size, data, i);
			vxge_add_string("rx_err_ip_%d\t\t\t",
				&stat_size, data, i);
			vxge_add_string("rx_icmp_%d\t\t\t\t",
				&stat_size, data, i);
			vxge_add_string("rx_tcp_%d\t\t\t\t",
				&stat_size, data, i);
			vxge_add_string("rx_udp_%d\t\t\t\t",
				&stat_size, data, i);
			vxge_add_string("rx_err_tcp_%d\t\t\t",
				&stat_size, data, i);
			vxge_add_string("rx_pause_count_%d\t\t\t",
				&stat_size, data, i);
			vxge_add_string("rx_pause_ctrl_frms_%d\t\t",
				&stat_size, data, i);
			vxge_add_string("rx_unsup_ctrl_frms_%d\t\t",
				&stat_size, data, i);
			vxge_add_string("rx_fcs_err_frms_%d\t\t\t",
				&stat_size, data, i);
			vxge_add_string("rx_in_rng_len_err_frms_%d\t\t",
				&stat_size, data, i);
			vxge_add_string("rx_out_rng_len_err_frms_%d\t\t",
				&stat_size, data, i);
			vxge_add_string("rx_drop_frms_%d\t\t\t",
				&stat_size, data, i);
			vxge_add_string("rx_discard_frms_%d\t\t\t",
				&stat_size, data, i);
			vxge_add_string("rx_drop_ip_%d\t\t\t",
				&stat_size, data, i);
			vxge_add_string("rx_drop_udp_%d\t\t\t",
				&stat_size, data, i);
			vxge_add_string("rx_marker_pdu_frms_%d\t\t",
				&stat_size, data, i);
			vxge_add_string("rx_lacpdu_frms_%d\t\t\t",
				&stat_size, data, i);
			vxge_add_string("rx_unknown_pdu_frms_%d\t\t",
				&stat_size, data, i);
			vxge_add_string("rx_marker_resp_pdu_frms_%d\t\t",
				&stat_size, data, i);
			vxge_add_string("rx_fcs_discard_%d\t\t\t",
				&stat_size, data, i);
			vxge_add_string("rx_illegal_pdu_frms_%d\t\t",
				&stat_size, data, i);
			vxge_add_string("rx_switch_discard_%d\t\t",
				&stat_size, data, i);
			vxge_add_string("rx_len_discard_%d\t\t\t",
				&stat_size, data, i);
			vxge_add_string("rx_rpa_discard_%d\t\t\t",
				&stat_size, data, i);
			vxge_add_string("rx_l2_mgmt_discard_%d\t\t",
				&stat_size, data, i);
			vxge_add_string("rx_rts_discard_%d\t\t\t",
				&stat_size, data, i);
			vxge_add_string("rx_trash_discard_%d\t\t\t",
				&stat_size, data, i);
			vxge_add_string("rx_buff_full_discard_%d\t\t",
				&stat_size, data, i);
			vxge_add_string("rx_red_discard_%d\t\t\t",
				&stat_size, data, i);
			vxge_add_string("rx_xgmii_ctrl_err_cnt_%d\t\t",
				&stat_size, data, i);
			vxge_add_string("rx_xgmii_data_err_cnt_%d\t\t",
				&stat_size, data, i);
			vxge_add_string("rx_xgmii_char1_match_%d\t\t",
				&stat_size, data, i);
			vxge_add_string("rx_xgmii_err_sym_%d\t\t\t",
				&stat_size, data, i);
			vxge_add_string("rx_xgmii_column1_match_%d\t\t",
				&stat_size, data, i);
			vxge_add_string("rx_xgmii_char2_match_%d\t\t",
				&stat_size, data, i);
			vxge_add_string("rx_local_fault_%d\t\t\t",
				&stat_size, data, i);
			vxge_add_string("rx_xgmii_column2_match_%d\t\t",
				&stat_size, data, i);
			vxge_add_string("rx_jettison_%d\t\t\t",
				&stat_size, data, i);
			vxge_add_string("rx_remote_fault_%d\t\t\t",
				&stat_size, data, i);
		}

		vxge_add_string("\n SOFTWARE STATISTICS%s\t\t\t",
			&stat_size, data, "");
		for (i = 0; i < vdev->no_of_vpath; i++) {
			vxge_add_string("soft_reset_cnt_%d\t\t\t",
				&stat_size, data, i);
			vxge_add_string("unknown_alarms_%d\t\t\t",
				&stat_size, data, i);
			vxge_add_string("network_sustained_fault_%d\t\t",
				&stat_size, data, i);
			vxge_add_string("network_sustained_ok_%d\t\t",
				&stat_size, data, i);
			vxge_add_string("kdfcctl_fifo0_overwrite_%d\t\t",
				&stat_size, data, i);
			vxge_add_string("kdfcctl_fifo0_poison_%d\t\t",
				&stat_size, data, i);
			vxge_add_string("kdfcctl_fifo0_dma_error_%d\t\t",
				&stat_size, data, i);
			vxge_add_string("dblgen_fifo0_overflow_%d\t\t",
				&stat_size, data, i);
			vxge_add_string("statsb_pif_chain_error_%d\t\t",
				&stat_size, data, i);
			vxge_add_string("statsb_drop_timeout_%d\t\t",
				&stat_size, data, i);
			vxge_add_string("target_illegal_access_%d\t\t",
				&stat_size, data, i);
			vxge_add_string("ini_serr_det_%d\t\t\t",
				&stat_size, data, i);
			vxge_add_string("prc_ring_bumps_%d\t\t\t",
				&stat_size, data, i);
			vxge_add_string("prc_rxdcm_sc_err_%d\t\t\t",
				&stat_size, data, i);
			vxge_add_string("prc_rxdcm_sc_abort_%d\t\t",
				&stat_size, data, i);
			vxge_add_string("prc_quanta_size_err_%d\t\t",
				&stat_size, data, i);
			vxge_add_string("ring_full_cnt_%d\t\t\t",
				&stat_size, data, i);
			vxge_add_string("ring_usage_cnt_%d\t\t\t",
				&stat_size, data, i);
			vxge_add_string("ring_usage_max_%d\t\t\t",
				&stat_size, data, i);
			vxge_add_string("ring_reserve_free_swaps_cnt_%d\t",
				&stat_size, data, i);
			vxge_add_string("ring_total_compl_cnt_%d\t\t",
				&stat_size, data, i);
			for (j = 0; j < VXGE_HW_DTR_MAX_T_CODE; j++)
				vxge_add_string("rxd_t_code_err_cnt%d_%d\t\t",
					&stat_size, data, j, i);
			vxge_add_string("fifo_full_cnt_%d\t\t\t",
				&stat_size, data, i);
			vxge_add_string("fifo_usage_cnt_%d\t\t\t",
				&stat_size, data, i);
			vxge_add_string("fifo_usage_max_%d\t\t\t",
				&stat_size, data, i);
			vxge_add_string("fifo_reserve_free_swaps_cnt_%d\t",
				&stat_size, data, i);
			vxge_add_string("fifo_total_compl_cnt_%d\t\t",
				&stat_size, data, i);
			vxge_add_string("fifo_total_posts_%d\t\t\t",
				&stat_size, data, i);
			vxge_add_string("fifo_total_buffers_%d\t\t",
				&stat_size, data, i);
			for (j = 0; j < VXGE_HW_DTR_MAX_T_CODE; j++)
				vxge_add_string("txd_t_code_err_cnt%d_%d\t\t",
					&stat_size, data, j, i);
		}

		vxge_add_string("\n HARDWARE STATISTICS%s\t\t\t",
				&stat_size, data, "");
		for (i = 0; i < vdev->no_of_vpath; i++) {
			vxge_add_string("ini_num_mwr_sent_%d\t\t\t",
					&stat_size, data, i);
			vxge_add_string("ini_num_mrd_sent_%d\t\t\t",
					&stat_size, data, i);
			vxge_add_string("ini_num_cpl_rcvd_%d\t\t\t",
					&stat_size, data, i);
			vxge_add_string("ini_num_mwr_byte_sent_%d\t\t",
					&stat_size, data, i);
			vxge_add_string("ini_num_cpl_byte_rcvd_%d\t\t",
					&stat_size, data, i);
			vxge_add_string("wrcrdtarb_xoff_%d\t\t\t",
					&stat_size, data, i);
			vxge_add_string("rdcrdtarb_xoff_%d\t\t\t",
					&stat_size, data, i);
			vxge_add_string("vpath_genstats_count0_%d\t\t",
					&stat_size, data, i);
			vxge_add_string("vpath_genstats_count1_%d\t\t",
					&stat_size, data, i);
			vxge_add_string("vpath_genstats_count2_%d\t\t",
					&stat_size, data, i);
			vxge_add_string("vpath_genstats_count3_%d\t\t",
					&stat_size, data, i);
			vxge_add_string("vpath_genstats_count4_%d\t\t",
					&stat_size, data, i);
			vxge_add_string("vpath_genstats_count5_%d\t\t",
					&stat_size, data, i);
			vxge_add_string("prog_event_vnum0_%d\t\t\t",
					&stat_size, data, i);
			vxge_add_string("prog_event_vnum1_%d\t\t\t",
					&stat_size, data, i);
			vxge_add_string("prog_event_vnum2_%d\t\t\t",
					&stat_size, data, i);
			vxge_add_string("prog_event_vnum3_%d\t\t\t",
					&stat_size, data, i);
			vxge_add_string("rx_multi_cast_frame_discard_%d\t",
					&stat_size, data, i);
			vxge_add_string("rx_frm_transferred_%d\t\t",
					&stat_size, data, i);
			vxge_add_string("rxd_returned_%d\t\t\t",
					&stat_size, data, i);
			vxge_add_string("rx_mpa_len_fail_frms_%d\t\t",
					&stat_size, data, i);
			vxge_add_string("rx_mpa_mrk_fail_frms_%d\t\t",
					&stat_size, data, i);
			vxge_add_string("rx_mpa_crc_fail_frms_%d\t\t",
					&stat_size, data, i);
			vxge_add_string("rx_permitted_frms_%d\t\t",
					&stat_size, data, i);
			vxge_add_string("rx_vp_reset_discarded_frms_%d\t",
					&stat_size, data, i);
			vxge_add_string("rx_wol_frms_%d\t\t\t",
					&stat_size, data, i);
			vxge_add_string("tx_vp_reset_discarded_frms_%d\t",
					&stat_size, data, i);
		}

		memcpy(data + stat_size, &ethtool_driver_stats_keys,
			sizeof(ethtool_driver_stats_keys));
	}
}
static int rmnet_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
{
	struct usbnet	*unet = netdev_priv(dev);
	u32		old_opmode;
	int		prev_mtu = dev->mtu;
	int		rc = 0;

	old_opmode = unet->data[0]; /*data[0] saves operation mode*/
	/* Process IOCTL command */
	switch (cmd) {
	case RMNET_IOCTL_SET_LLP_ETHERNET:	/*Set Ethernet protocol*/
		/* Perform Ethernet config only if in IP mode currently*/
		if (test_bit(RMNET_MODE_LLP_IP, &unet->data[0])) {
			ether_setup(dev);
			random_ether_addr(dev->dev_addr);
			dev->mtu = prev_mtu;
			dev->netdev_ops = &rmnet_usb_ops_ether;
			clear_bit(RMNET_MODE_LLP_IP, &unet->data[0]);
			set_bit(RMNET_MODE_LLP_ETH, &unet->data[0]);
			DBG0("[%s] rmnet_ioctl(): set Ethernet protocol mode\n",
					dev->name);
		}
		break;

	case RMNET_IOCTL_SET_LLP_IP:		/* Set RAWIP protocol*/
		/* Perform IP config only if in Ethernet mode currently*/
		if (test_bit(RMNET_MODE_LLP_ETH, &unet->data[0])) {

			/* Undo config done in ether_setup() */
			dev->header_ops = 0;  /* No header */
			dev->type = ARPHRD_RAWIP;
			dev->hard_header_len = 0;
			dev->mtu = prev_mtu;
			dev->addr_len = 0;
			dev->flags &= ~(IFF_BROADCAST | IFF_MULTICAST);
			dev->needed_headroom = HEADROOM_FOR_QOS;
			dev->netdev_ops = &rmnet_usb_ops_ip;
			clear_bit(RMNET_MODE_LLP_ETH, &unet->data[0]);
			set_bit(RMNET_MODE_LLP_IP, &unet->data[0]);
			DBG0("[%s] rmnet_ioctl(): set IP protocol mode\n",
					dev->name);
		}
		break;

	case RMNET_IOCTL_GET_LLP:	/* Get link protocol state */
		ifr->ifr_ifru.ifru_data = (void *)(unet->data[0]
						& (RMNET_MODE_LLP_ETH
						| RMNET_MODE_LLP_IP));
		break;

	case RMNET_IOCTL_SET_QOS_ENABLE:	/* Set QoS header enabled*/
		set_bit(RMNET_MODE_QOS, &unet->data[0]);
		DBG0("[%s] rmnet_ioctl(): set QMI QOS header enable\n",
				dev->name);
		break;

	case RMNET_IOCTL_SET_QOS_DISABLE:	/* Set QoS header disabled */
		clear_bit(RMNET_MODE_QOS, &unet->data[0]);
		DBG0("[%s] rmnet_ioctl(): set QMI QOS header disable\n",
				dev->name);
		break;

	case RMNET_IOCTL_GET_QOS:		/* Get QoS header state */
		ifr->ifr_ifru.ifru_data = (void *)(unet->data[0]
						& RMNET_MODE_QOS);
		break;

	case RMNET_IOCTL_GET_OPMODE:		/* Get operation mode*/
		ifr->ifr_ifru.ifru_data = (void *)unet->data[0];
		break;

	case RMNET_IOCTL_OPEN:			/* Open transport port */
		rc = usbnet_open(dev);
		DBG0("[%s] rmnet_ioctl(): open transport port\n", dev->name);
		break;

	case RMNET_IOCTL_CLOSE:			/* Close transport port*/
		rc = usbnet_stop(dev);
		DBG0("[%s] rmnet_ioctl(): close transport port\n", dev->name);
		break;

	default:
		dev_err(&unet->intf->dev, "[%s] error: "
			"rmnet_ioct called for unsupported cmd[%d]",
			dev->name, cmd);
		return -EINVAL;
	}

	DBG2("[%s] %s: cmd=0x%x opmode old=0x%08x new=0x%08lx\n",
		dev->name, __func__, cmd, old_opmode, unet->data[0]);

	return rc;
}
Пример #10
0
static u32 vxge_get_rx_csum(struct net_device *dev)
{
	struct vxgedev *vdev = (struct vxgedev *)netdev_priv(dev);

	return vdev->rx_csum;
}
Пример #11
0
static void vxge_get_ethtool_stats(struct net_device *dev,
				   struct ethtool_stats *estats, u64 *tmp_stats)
{
	int j, k;
	enum vxge_hw_status status;
	enum vxge_hw_status swstatus;
	struct vxge_vpath *vpath = NULL;

	struct vxgedev *vdev = (struct vxgedev *)netdev_priv(dev);
	struct __vxge_hw_device  *hldev = vdev->devh;
	struct vxge_hw_xmac_stats *xmac_stats;
	struct vxge_hw_device_stats_sw_info *sw_stats;
	struct vxge_hw_device_stats_hw_info *hw_stats;

	u64 *ptr = tmp_stats;

	memset(tmp_stats, 0,
		vxge_ethtool_get_sset_count(dev, ETH_SS_STATS) * sizeof(u64));

	xmac_stats = kzalloc(sizeof(struct vxge_hw_xmac_stats), GFP_KERNEL);
	if (xmac_stats == NULL) {
		vxge_debug_init(VXGE_ERR,
			"%s : %d Memory Allocation failed for xmac_stats",
				 __func__, __LINE__);
		return;
	}

	sw_stats = kzalloc(sizeof(struct vxge_hw_device_stats_sw_info),
				GFP_KERNEL);
	if (sw_stats == NULL) {
		kfree(xmac_stats);
		vxge_debug_init(VXGE_ERR,
			"%s : %d Memory Allocation failed for sw_stats",
			__func__, __LINE__);
		return;
	}

	hw_stats = kzalloc(sizeof(struct vxge_hw_device_stats_hw_info),
				GFP_KERNEL);
	if (hw_stats == NULL) {
		kfree(xmac_stats);
		kfree(sw_stats);
		vxge_debug_init(VXGE_ERR,
			"%s : %d Memory Allocation failed for hw_stats",
			__func__, __LINE__);
		return;
	}

	*ptr++ = 0;
	status = vxge_hw_device_xmac_stats_get(hldev, xmac_stats);
	if (status != VXGE_HW_OK) {
		if (status != VXGE_HW_ERR_PRIVILAGED_OPEARATION) {
			vxge_debug_init(VXGE_ERR,
				"%s : %d Failure in getting xmac stats",
				__func__, __LINE__);
		}
	}
	swstatus = vxge_hw_driver_stats_get(hldev, sw_stats);
	if (swstatus != VXGE_HW_OK) {
		vxge_debug_init(VXGE_ERR,
			"%s : %d Failure in getting sw stats",
			__func__, __LINE__);
	}

	status = vxge_hw_device_stats_get(hldev, hw_stats);
	if (status != VXGE_HW_OK) {
		vxge_debug_init(VXGE_ERR,
			"%s : %d hw_stats_get error", __func__, __LINE__);
	}

	for (k = 0; k < vdev->no_of_vpath; k++) {
		struct vxge_hw_vpath_stats_hw_info *vpath_info;

		vpath = &vdev->vpaths[k];
		j = vpath->device_id;
		vpath_info = hw_stats->vpath_info[j];
		if (!vpath_info) {
			memset(ptr, 0, (VXGE_HW_VPATH_TX_STATS_LEN +
				VXGE_HW_VPATH_RX_STATS_LEN) * sizeof(u64));
			ptr += (VXGE_HW_VPATH_TX_STATS_LEN +
				VXGE_HW_VPATH_RX_STATS_LEN);
			continue;
		}

		*ptr++ = vpath_info->tx_stats.tx_ttl_eth_frms;
		*ptr++ = vpath_info->tx_stats.tx_ttl_eth_octets;
		*ptr++ = vpath_info->tx_stats.tx_data_octets;
		*ptr++ = vpath_info->tx_stats.tx_mcast_frms;
		*ptr++ = vpath_info->tx_stats.tx_bcast_frms;
		*ptr++ = vpath_info->tx_stats.tx_ucast_frms;
		*ptr++ = vpath_info->tx_stats.tx_tagged_frms;
		*ptr++ = vpath_info->tx_stats.tx_vld_ip;
		*ptr++ = vpath_info->tx_stats.tx_vld_ip_octets;
		*ptr++ = vpath_info->tx_stats.tx_icmp;
		*ptr++ = vpath_info->tx_stats.tx_tcp;
		*ptr++ = vpath_info->tx_stats.tx_rst_tcp;
		*ptr++ = vpath_info->tx_stats.tx_udp;
		*ptr++ = vpath_info->tx_stats.tx_unknown_protocol;
		*ptr++ = vpath_info->tx_stats.tx_lost_ip;
		*ptr++ = vpath_info->tx_stats.tx_parse_error;
		*ptr++ = vpath_info->tx_stats.tx_tcp_offload;
		*ptr++ = vpath_info->tx_stats.tx_retx_tcp_offload;
		*ptr++ = vpath_info->tx_stats.tx_lost_ip_offload;
		*ptr++ = vpath_info->rx_stats.rx_ttl_eth_frms;
		*ptr++ = vpath_info->rx_stats.rx_vld_frms;
		*ptr++ = vpath_info->rx_stats.rx_offload_frms;
		*ptr++ = vpath_info->rx_stats.rx_ttl_eth_octets;
		*ptr++ = vpath_info->rx_stats.rx_data_octets;
		*ptr++ = vpath_info->rx_stats.rx_offload_octets;
		*ptr++ = vpath_info->rx_stats.rx_vld_mcast_frms;
		*ptr++ = vpath_info->rx_stats.rx_vld_bcast_frms;
		*ptr++ = vpath_info->rx_stats.rx_accepted_ucast_frms;
		*ptr++ = vpath_info->rx_stats.rx_accepted_nucast_frms;
		*ptr++ = vpath_info->rx_stats.rx_tagged_frms;
		*ptr++ = vpath_info->rx_stats.rx_long_frms;
		*ptr++ = vpath_info->rx_stats.rx_usized_frms;
		*ptr++ = vpath_info->rx_stats.rx_osized_frms;
		*ptr++ = vpath_info->rx_stats.rx_frag_frms;
		*ptr++ = vpath_info->rx_stats.rx_jabber_frms;
		*ptr++ = vpath_info->rx_stats.rx_ttl_64_frms;
		*ptr++ = vpath_info->rx_stats.rx_ttl_65_127_frms;
		*ptr++ = vpath_info->rx_stats.rx_ttl_128_255_frms;
		*ptr++ = vpath_info->rx_stats.rx_ttl_256_511_frms;
		*ptr++ = vpath_info->rx_stats.rx_ttl_512_1023_frms;
		*ptr++ = vpath_info->rx_stats.rx_ttl_1024_1518_frms;
		*ptr++ = vpath_info->rx_stats.rx_ttl_1519_4095_frms;
		*ptr++ = vpath_info->rx_stats.rx_ttl_4096_8191_frms;
		*ptr++ = vpath_info->rx_stats.rx_ttl_8192_max_frms;
		*ptr++ = vpath_info->rx_stats.rx_ttl_gt_max_frms;
		*ptr++ = vpath_info->rx_stats.rx_ip;
		*ptr++ = vpath_info->rx_stats.rx_accepted_ip;
		*ptr++ = vpath_info->rx_stats.rx_ip_octets;
		*ptr++ = vpath_info->rx_stats.rx_err_ip;
		*ptr++ = vpath_info->rx_stats.rx_icmp;
		*ptr++ = vpath_info->rx_stats.rx_tcp;
		*ptr++ = vpath_info->rx_stats.rx_udp;
		*ptr++ = vpath_info->rx_stats.rx_err_tcp;
		*ptr++ = vpath_info->rx_stats.rx_lost_frms;
		*ptr++ = vpath_info->rx_stats.rx_lost_ip;
		*ptr++ = vpath_info->rx_stats.rx_lost_ip_offload;
		*ptr++ = vpath_info->rx_stats.rx_various_discard;
		*ptr++ = vpath_info->rx_stats.rx_sleep_discard;
		*ptr++ = vpath_info->rx_stats.rx_red_discard;
		*ptr++ = vpath_info->rx_stats.rx_queue_full_discard;
		*ptr++ = vpath_info->rx_stats.rx_mpa_ok_frms;
	}
	*ptr++ = 0;
	for (k = 0; k < vdev->max_config_port; k++) {
		*ptr++ = xmac_stats->aggr_stats[k].tx_frms;
		*ptr++ = xmac_stats->aggr_stats[k].tx_data_octets;
		*ptr++ = xmac_stats->aggr_stats[k].tx_mcast_frms;
		*ptr++ = xmac_stats->aggr_stats[k].tx_bcast_frms;
		*ptr++ = xmac_stats->aggr_stats[k].tx_discarded_frms;
		*ptr++ = xmac_stats->aggr_stats[k].tx_errored_frms;
		*ptr++ = xmac_stats->aggr_stats[k].rx_frms;
		*ptr++ = xmac_stats->aggr_stats[k].rx_data_octets;
		*ptr++ = xmac_stats->aggr_stats[k].rx_mcast_frms;
		*ptr++ = xmac_stats->aggr_stats[k].rx_bcast_frms;
		*ptr++ = xmac_stats->aggr_stats[k].rx_discarded_frms;
		*ptr++ = xmac_stats->aggr_stats[k].rx_errored_frms;
		*ptr++ = xmac_stats->aggr_stats[k].rx_unknown_slow_proto_frms;
	}
	*ptr++ = 0;
	for (k = 0; k < vdev->max_config_port; k++) {
		*ptr++ = xmac_stats->port_stats[k].tx_ttl_frms;
		*ptr++ = xmac_stats->port_stats[k].tx_ttl_octets;
		*ptr++ = xmac_stats->port_stats[k].tx_data_octets;
		*ptr++ = xmac_stats->port_stats[k].tx_mcast_frms;
		*ptr++ = xmac_stats->port_stats[k].tx_bcast_frms;
		*ptr++ = xmac_stats->port_stats[k].tx_ucast_frms;
		*ptr++ = xmac_stats->port_stats[k].tx_tagged_frms;
		*ptr++ = xmac_stats->port_stats[k].tx_vld_ip;
		*ptr++ = xmac_stats->port_stats[k].tx_vld_ip_octets;
		*ptr++ = xmac_stats->port_stats[k].tx_icmp;
		*ptr++ = xmac_stats->port_stats[k].tx_tcp;
		*ptr++ = xmac_stats->port_stats[k].tx_rst_tcp;
		*ptr++ = xmac_stats->port_stats[k].tx_udp;
		*ptr++ = xmac_stats->port_stats[k].tx_parse_error;
		*ptr++ = xmac_stats->port_stats[k].tx_unknown_protocol;
		*ptr++ = xmac_stats->port_stats[k].tx_pause_ctrl_frms;
		*ptr++ = xmac_stats->port_stats[k].tx_marker_pdu_frms;
		*ptr++ = xmac_stats->port_stats[k].tx_lacpdu_frms;
		*ptr++ = xmac_stats->port_stats[k].tx_drop_ip;
		*ptr++ = xmac_stats->port_stats[k].tx_marker_resp_pdu_frms;
		*ptr++ = xmac_stats->port_stats[k].tx_xgmii_char2_match;
		*ptr++ = xmac_stats->port_stats[k].tx_xgmii_char1_match;
		*ptr++ = xmac_stats->port_stats[k].tx_xgmii_column2_match;
		*ptr++ = xmac_stats->port_stats[k].tx_xgmii_column1_match;
		*ptr++ = xmac_stats->port_stats[k].tx_any_err_frms;
		*ptr++ = xmac_stats->port_stats[k].tx_drop_frms;
		*ptr++ = xmac_stats->port_stats[k].rx_ttl_frms;
		*ptr++ = xmac_stats->port_stats[k].rx_vld_frms;
		*ptr++ = xmac_stats->port_stats[k].rx_offload_frms;
		*ptr++ = xmac_stats->port_stats[k].rx_ttl_octets;
		*ptr++ = xmac_stats->port_stats[k].rx_data_octets;
		*ptr++ = xmac_stats->port_stats[k].rx_offload_octets;
		*ptr++ = xmac_stats->port_stats[k].rx_vld_mcast_frms;
		*ptr++ = xmac_stats->port_stats[k].rx_vld_bcast_frms;
		*ptr++ = xmac_stats->port_stats[k].rx_accepted_ucast_frms;
		*ptr++ = xmac_stats->port_stats[k].rx_accepted_nucast_frms;
		*ptr++ = xmac_stats->port_stats[k].rx_tagged_frms;
		*ptr++ = xmac_stats->port_stats[k].rx_long_frms;
		*ptr++ = xmac_stats->port_stats[k].rx_usized_frms;
		*ptr++ = xmac_stats->port_stats[k].rx_osized_frms;
		*ptr++ = xmac_stats->port_stats[k].rx_frag_frms;
		*ptr++ = xmac_stats->port_stats[k].rx_jabber_frms;
		*ptr++ = xmac_stats->port_stats[k].rx_ttl_64_frms;
		*ptr++ = xmac_stats->port_stats[k].rx_ttl_65_127_frms;
		*ptr++ = xmac_stats->port_stats[k].rx_ttl_128_255_frms;
		*ptr++ = xmac_stats->port_stats[k].rx_ttl_256_511_frms;
		*ptr++ = xmac_stats->port_stats[k].rx_ttl_512_1023_frms;
		*ptr++ = xmac_stats->port_stats[k].rx_ttl_1024_1518_frms;
		*ptr++ = xmac_stats->port_stats[k].rx_ttl_1519_4095_frms;
		*ptr++ = xmac_stats->port_stats[k].rx_ttl_4096_8191_frms;
		*ptr++ = xmac_stats->port_stats[k].rx_ttl_8192_max_frms;
		*ptr++ = xmac_stats->port_stats[k].rx_ttl_gt_max_frms;
		*ptr++ = xmac_stats->port_stats[k].rx_ip;
		*ptr++ = xmac_stats->port_stats[k].rx_accepted_ip;
		*ptr++ = xmac_stats->port_stats[k].rx_ip_octets;
		*ptr++ = xmac_stats->port_stats[k].rx_err_ip;
		*ptr++ = xmac_stats->port_stats[k].rx_icmp;
		*ptr++ = xmac_stats->port_stats[k].rx_tcp;
		*ptr++ = xmac_stats->port_stats[k].rx_udp;
		*ptr++ = xmac_stats->port_stats[k].rx_err_tcp;
		*ptr++ = xmac_stats->port_stats[k].rx_pause_count;
		*ptr++ = xmac_stats->port_stats[k].rx_pause_ctrl_frms;
		*ptr++ = xmac_stats->port_stats[k].rx_unsup_ctrl_frms;
		*ptr++ = xmac_stats->port_stats[k].rx_fcs_err_frms;
		*ptr++ = xmac_stats->port_stats[k].rx_in_rng_len_err_frms;
		*ptr++ = xmac_stats->port_stats[k].rx_out_rng_len_err_frms;
		*ptr++ = xmac_stats->port_stats[k].rx_drop_frms;
		*ptr++ = xmac_stats->port_stats[k].rx_discarded_frms;
		*ptr++ = xmac_stats->port_stats[k].rx_drop_ip;
		*ptr++ = xmac_stats->port_stats[k].rx_drop_udp;
		*ptr++ = xmac_stats->port_stats[k].rx_marker_pdu_frms;
		*ptr++ = xmac_stats->port_stats[k].rx_lacpdu_frms;
		*ptr++ = xmac_stats->port_stats[k].rx_unknown_pdu_frms;
		*ptr++ = xmac_stats->port_stats[k].rx_marker_resp_pdu_frms;
		*ptr++ = xmac_stats->port_stats[k].rx_fcs_discard;
		*ptr++ = xmac_stats->port_stats[k].rx_illegal_pdu_frms;
		*ptr++ = xmac_stats->port_stats[k].rx_switch_discard;
		*ptr++ = xmac_stats->port_stats[k].rx_len_discard;
		*ptr++ = xmac_stats->port_stats[k].rx_rpa_discard;
		*ptr++ = xmac_stats->port_stats[k].rx_l2_mgmt_discard;
		*ptr++ = xmac_stats->port_stats[k].rx_rts_discard;
		*ptr++ = xmac_stats->port_stats[k].rx_trash_discard;
		*ptr++ = xmac_stats->port_stats[k].rx_buff_full_discard;
		*ptr++ = xmac_stats->port_stats[k].rx_red_discard;
		*ptr++ = xmac_stats->port_stats[k].rx_xgmii_ctrl_err_cnt;
		*ptr++ = xmac_stats->port_stats[k].rx_xgmii_data_err_cnt;
		*ptr++ = xmac_stats->port_stats[k].rx_xgmii_char1_match;
		*ptr++ = xmac_stats->port_stats[k].rx_xgmii_err_sym;
		*ptr++ = xmac_stats->port_stats[k].rx_xgmii_column1_match;
		*ptr++ = xmac_stats->port_stats[k].rx_xgmii_char2_match;
		*ptr++ = xmac_stats->port_stats[k].rx_local_fault;
		*ptr++ = xmac_stats->port_stats[k].rx_xgmii_column2_match;
		*ptr++ = xmac_stats->port_stats[k].rx_jettison;
		*ptr++ = xmac_stats->port_stats[k].rx_remote_fault;
	}

	*ptr++ = 0;
	for (k = 0; k < vdev->no_of_vpath; k++) {
		struct vxge_hw_vpath_stats_sw_info *vpath_info;

		vpath = &vdev->vpaths[k];
		j = vpath->device_id;
		vpath_info = (struct vxge_hw_vpath_stats_sw_info *)
				&sw_stats->vpath_info[j];
		*ptr++ = vpath_info->soft_reset_cnt;
		*ptr++ = vpath_info->error_stats.unknown_alarms;
		*ptr++ = vpath_info->error_stats.network_sustained_fault;
		*ptr++ = vpath_info->error_stats.network_sustained_ok;
		*ptr++ = vpath_info->error_stats.kdfcctl_fifo0_overwrite;
		*ptr++ = vpath_info->error_stats.kdfcctl_fifo0_poison;
		*ptr++ = vpath_info->error_stats.kdfcctl_fifo0_dma_error;
		*ptr++ = vpath_info->error_stats.dblgen_fifo0_overflow;
		*ptr++ = vpath_info->error_stats.statsb_pif_chain_error;
		*ptr++ = vpath_info->error_stats.statsb_drop_timeout;
		*ptr++ = vpath_info->error_stats.target_illegal_access;
		*ptr++ = vpath_info->error_stats.ini_serr_det;
		*ptr++ = vpath_info->error_stats.prc_ring_bumps;
		*ptr++ = vpath_info->error_stats.prc_rxdcm_sc_err;
		*ptr++ = vpath_info->error_stats.prc_rxdcm_sc_abort;
		*ptr++ = vpath_info->error_stats.prc_quanta_size_err;
		*ptr++ = vpath_info->ring_stats.common_stats.full_cnt;
		*ptr++ = vpath_info->ring_stats.common_stats.usage_cnt;
		*ptr++ = vpath_info->ring_stats.common_stats.usage_max;
		*ptr++ = vpath_info->ring_stats.common_stats.
					reserve_free_swaps_cnt;
		*ptr++ = vpath_info->ring_stats.common_stats.total_compl_cnt;
		for (j = 0; j < VXGE_HW_DTR_MAX_T_CODE; j++)
			*ptr++ = vpath_info->ring_stats.rxd_t_code_err_cnt[j];
		*ptr++ = vpath_info->fifo_stats.common_stats.full_cnt;
		*ptr++ = vpath_info->fifo_stats.common_stats.usage_cnt;
		*ptr++ = vpath_info->fifo_stats.common_stats.usage_max;
		*ptr++ = vpath_info->fifo_stats.common_stats.
						reserve_free_swaps_cnt;
		*ptr++ = vpath_info->fifo_stats.common_stats.total_compl_cnt;
		*ptr++ = vpath_info->fifo_stats.total_posts;
		*ptr++ = vpath_info->fifo_stats.total_buffers;
		for (j = 0; j < VXGE_HW_DTR_MAX_T_CODE; j++)
			*ptr++ = vpath_info->fifo_stats.txd_t_code_err_cnt[j];
	}

	*ptr++ = 0;
	for (k = 0; k < vdev->no_of_vpath; k++) {
		struct vxge_hw_vpath_stats_hw_info *vpath_info;
		vpath = &vdev->vpaths[k];
		j = vpath->device_id;
		vpath_info = hw_stats->vpath_info[j];
		if (!vpath_info) {
			memset(ptr, 0, VXGE_HW_VPATH_STATS_LEN * sizeof(u64));
			ptr += VXGE_HW_VPATH_STATS_LEN;
			continue;
		}
		*ptr++ = vpath_info->ini_num_mwr_sent;
		*ptr++ = vpath_info->ini_num_mrd_sent;
		*ptr++ = vpath_info->ini_num_cpl_rcvd;
		*ptr++ = vpath_info->ini_num_mwr_byte_sent;
		*ptr++ = vpath_info->ini_num_cpl_byte_rcvd;
		*ptr++ = vpath_info->wrcrdtarb_xoff;
		*ptr++ = vpath_info->rdcrdtarb_xoff;
		*ptr++ = vpath_info->vpath_genstats_count0;
		*ptr++ = vpath_info->vpath_genstats_count1;
		*ptr++ = vpath_info->vpath_genstats_count2;
		*ptr++ = vpath_info->vpath_genstats_count3;
		*ptr++ = vpath_info->vpath_genstats_count4;
		*ptr++ = vpath_info->vpath_genstats_count5;
		*ptr++ = vpath_info->prog_event_vnum0;
		*ptr++ = vpath_info->prog_event_vnum1;
		*ptr++ = vpath_info->prog_event_vnum2;
		*ptr++ = vpath_info->prog_event_vnum3;
		*ptr++ = vpath_info->rx_multi_cast_frame_discard;
		*ptr++ = vpath_info->rx_frm_transferred;
		*ptr++ = vpath_info->rxd_returned;
		*ptr++ = vpath_info->rx_mpa_len_fail_frms;
		*ptr++ = vpath_info->rx_mpa_mrk_fail_frms;
		*ptr++ = vpath_info->rx_mpa_crc_fail_frms;
		*ptr++ = vpath_info->rx_permitted_frms;
		*ptr++ = vpath_info->rx_vp_reset_discarded_frms;
		*ptr++ = vpath_info->rx_wol_frms;
		*ptr++ = vpath_info->tx_vp_reset_discarded_frms;
	}

	*ptr++ = 0;
	*ptr++ = vdev->stats.vpaths_open;
	*ptr++ = vdev->stats.vpath_open_fail;
	*ptr++ = vdev->stats.link_up;
	*ptr++ = vdev->stats.link_down;

	for (k = 0; k < vdev->no_of_vpath; k++) {
		*ptr += vdev->vpaths[k].fifo.stats.tx_frms;
		*(ptr + 1) += vdev->vpaths[k].fifo.stats.tx_errors;
		*(ptr + 2) += vdev->vpaths[k].fifo.stats.tx_bytes;
		*(ptr + 3) += vdev->vpaths[k].fifo.stats.txd_not_free;
		*(ptr + 4) += vdev->vpaths[k].fifo.stats.txd_out_of_desc;
		*(ptr + 5) += vdev->vpaths[k].ring.stats.rx_frms;
		*(ptr + 6) += vdev->vpaths[k].ring.stats.rx_errors;
		*(ptr + 7) += vdev->vpaths[k].ring.stats.rx_bytes;
		*(ptr + 8) += vdev->vpaths[k].ring.stats.rx_mcast;
		*(ptr + 9) += vdev->vpaths[k].fifo.stats.pci_map_fail +
				vdev->vpaths[k].ring.stats.pci_map_fail;
		*(ptr + 10) += vdev->vpaths[k].ring.stats.skb_alloc_fail;
	}

	ptr += 12;

	kfree(xmac_stats);
	kfree(sw_stats);
	kfree(hw_stats);
}
Пример #12
0
static int vxge_ethtool_get_regs_len(struct net_device *dev)
{
	struct vxgedev *vdev = (struct vxgedev *)netdev_priv(dev);

	return sizeof(struct vxge_hw_vpath_reg) * vdev->no_of_vpath;
}
Пример #13
0
static int netdev_event(struct notifier_block *nb, unsigned long notification, void *ptr)
{
   struct in_device *in_dev;    /* ALPS00409409406 */
    UINT_8  ip[4] = { 0 };
    UINT_32 u4NumIPv4 = 0;
//#ifdef  CONFIG_IPV6
#if 0
    UINT_8  ip6[16] = { 0 };     // FIX ME: avoid to allocate large memory in stack
    UINT_32 u4NumIPv6 = 0;
#endif
    struct in_ifaddr *ifa = (struct in_ifaddr *) ptr;
    struct net_device *prDev = ifa->ifa_dev->dev;
    UINT_32 i;
    P_PARAM_NETWORK_ADDRESS_IP prParamIpAddr;
    P_GLUE_INFO_T prGlueInfo = NULL;

    if (prDev == NULL) {
        DBGLOG(REQ, INFO, ("netdev_event: device is empty.\n"));
        return NOTIFY_DONE;
    }

    if ((strncmp(prDev->name, "p2p", 3) != 0) && (strncmp(prDev->name, "wlan", 4) != 0)) {
        DBGLOG(REQ, INFO, ("netdev_event: xxx\n"));
        return NOTIFY_DONE;
    }

    prGlueInfo = *((P_GLUE_INFO_T *) netdev_priv(prDev));

    if (prGlueInfo == NULL) {
        DBGLOG(REQ, INFO, ("netdev_event: prGlueInfo is empty.\n"));
        return NOTIFY_DONE;
    }
    ASSERT(prGlueInfo);

#ifdef FIX_ALPS00409409406
           // <3> get the IPv4 address    
           in_dev = in_dev_get(prDev);
           if (!in_dev)
              return;
           
           //rtnl_lock();                  
           if(!in_dev->ifa_list ||!in_dev->ifa_list->ifa_local) {
                  //rtnl_unlock();
                  in_dev_put(in_dev);
                  DBGLOG(REQ, INFO, ("ip is not avaliable.\n"));
                  return;
    }
           // <4> copy the IPv4 address
           kalMemCopy(ip, &(in_dev->ifa_list->ifa_local), sizeof(ip));
           //rtnl_unlock();
           in_dev_put(in_dev);

           DBGLOG(REQ, INFO, ("ip is %d.%d.%d.%d\n",
                   ip[0],ip[1],ip[2],ip[3]));
#else
    // <3> get the IPv4 address
    if(!prDev || !(prDev->ip_ptr)||\
            !((struct in_device *)(prDev->ip_ptr))->ifa_list||\
            !(&(((struct in_device *)(prDev->ip_ptr))->ifa_list->ifa_local))){
        DBGLOG(REQ, INFO, ("ip is not avaliable.\n"));
        return NOTIFY_DONE;
    }

    kalMemCopy(ip, &(((struct in_device *)(prDev->ip_ptr))->ifa_list->ifa_local), sizeof(ip));
    DBGLOG(REQ, INFO, ("ip is %d.%d.%d.%d\n",
            ip[0],ip[1],ip[2],ip[3]));
#endif

    // todo: traverse between list to find whole sets of IPv4 addresses
    if (!((ip[0] == 0) &&
         (ip[1] == 0) &&
         (ip[2] == 0) &&
         (ip[3] == 0))) {
        u4NumIPv4++;
    }

#if defined(MTK_WLAN_ARP_OFFLOAD)
	if(NETDEV_UP == notification && PARAM_MEDIA_STATE_CONNECTED == prGlueInfo->eParamMediaStateIndicated){
		PARAM_CUSTOM_SW_CTRL_STRUC_T SwCtrlInfo;
		UINT_32 u4SetInfoLen;
		WLAN_STATUS rStatus = WLAN_STATUS_FAILURE;
		
		SwCtrlInfo.u4Id = 0x90110000;
		SwCtrlInfo.u4Data = 1;
		
		rStatus = kalIoctl(prGlueInfo,
                wlanoidSetSwCtrlWrite,
                (PVOID)&SwCtrlInfo,
                sizeof(SwCtrlInfo),
                FALSE,
                FALSE,
                TRUE,
                FALSE,
                &u4SetInfoLen);

     if (rStatus != WLAN_STATUS_SUCCESS) {
       		DBGLOG(REQ, INFO, ("ARP OFFLOAD fail 0x%lx\n", rStatus));
     }else{
       		DBGLOG(REQ, INFO, ("ARP OFFLOAD success\n"));  			
    } 	  
  }
#endif


#ifdef FIX_ALPS00409409406    
       if(atomic_read(&fgIsUnderEarlierSuspend)==0){
#else
       if (fgIsUnderEarlierSuspend == false) {
#endif
				DBGLOG(REQ, INFO, ("netdev_event: PARAM_MEDIA_STATE_DISCONNECTED. (%d)\n", prGlueInfo->eParamMediaStateIndicated));
				return NOTIFY_DONE;
    }

//#ifdef  CONFIG_IPV6
#if 0
    if(!prDev || !(prDev->ip6_ptr)||\
        !((struct in_device *)(prDev->ip6_ptr))->ifa_list||\
        !(&(((struct in_device *)(prDev->ip6_ptr))->ifa_list->ifa_local))){
        printk(KERN_INFO "ipv6 is not avaliable.\n");
        return NOTIFY_DONE;
    }

    kalMemCopy(ip6, &(((struct in_device *)(prDev->ip6_ptr))->ifa_list->ifa_local), sizeof(ip6));
    printk(KERN_INFO"ipv6 is %d.%d.%d.%d.%d.%d.%d.%d.%d.%d.%d.%d.%d.%d.%d.%d\n",
            ip6[0],ip6[1],ip6[2],ip6[3],
            ip6[4],ip6[5],ip6[6],ip6[7],
            ip6[8],ip6[9],ip6[10],ip6[11],
            ip6[12],ip6[13],ip6[14],ip6[15]
            );

    // todo: traverse between list to find whole sets of IPv6 addresses
    if (!((ip6[0] == 0) &&
         (ip6[1] == 0) &&
         (ip6[2] == 0) &&
         (ip6[3] == 0) &&
         (ip6[4] == 0) &&
         (ip6[5] == 0))) {
        //u4NumIPv6++;
    }
#endif

    // here we can compare the dev with other network's netdev to
    // set the proper arp filter
    //
    // IMPORTANT: please make sure if the context can sleep, if the context can't sleep
    // we should schedule a kernel thread to do this for us

    // <7> set up the ARP filter
    {
        WLAN_STATUS rStatus = WLAN_STATUS_FAILURE;
        UINT_32 u4SetInfoLen = 0;
        UINT_8 aucBuf[32] = {0};
        UINT_32 u4Len = OFFSET_OF(PARAM_NETWORK_ADDRESS_LIST, arAddress);
        P_PARAM_NETWORK_ADDRESS_LIST prParamNetAddrList = (P_PARAM_NETWORK_ADDRESS_LIST)aucBuf;
        P_PARAM_NETWORK_ADDRESS prParamNetAddr = prParamNetAddrList->arAddress;

//#ifdef  CONFIG_IPV6
#if 0
        prParamNetAddrList->u4AddressCount = u4NumIPv4 + u4NumIPv6;
#else
        prParamNetAddrList->u4AddressCount = u4NumIPv4;
#endif
        prParamNetAddrList->u2AddressType = PARAM_PROTOCOL_ID_TCP_IP;
        for (i = 0; i < u4NumIPv4; i++) {
            prParamNetAddr->u2AddressLength = sizeof(PARAM_NETWORK_ADDRESS_IP);//4;;
            prParamNetAddr->u2AddressType = PARAM_PROTOCOL_ID_TCP_IP;;
#if 0
            kalMemCopy(prParamNetAddr->aucAddress, ip, sizeof(ip));
            prParamNetAddr = (P_PARAM_NETWORK_ADDRESS)((UINT_32)prParamNetAddr + sizeof(ip));
            u4Len += OFFSET_OF(PARAM_NETWORK_ADDRESS, aucAddress) + sizeof(ip);
#else
            prParamIpAddr = (P_PARAM_NETWORK_ADDRESS_IP)prParamNetAddr->aucAddress;
            kalMemCopy(&prParamIpAddr->in_addr, ip, sizeof(ip));
            prParamNetAddr = (P_PARAM_NETWORK_ADDRESS)((UINT_32)prParamNetAddr + sizeof(PARAM_NETWORK_ADDRESS));
            u4Len += OFFSET_OF(PARAM_NETWORK_ADDRESS, aucAddress) + sizeof(PARAM_NETWORK_ADDRESS);
#endif
        }
//#ifdef  CONFIG_IPV6
#if 0
        for (i = 0; i < u4NumIPv6; i++) {
            prParamNetAddr->u2AddressLength = 6;;
            prParamNetAddr->u2AddressType = PARAM_PROTOCOL_ID_TCP_IP;;
            kalMemCopy(prParamNetAddr->aucAddress, ip6, sizeof(ip6));
            prParamNetAddr = (P_PARAM_NETWORK_ADDRESS)((UINT_32)prParamNetAddr + sizeof(ip6));
            u4Len += OFFSET_OF(PARAM_NETWORK_ADDRESS, aucAddress) + sizeof(ip6);
       }
#endif
        ASSERT(u4Len <= sizeof(aucBuf));

    DBGLOG(REQ, INFO, ("kalIoctl (0x%x, 0x%x)\n", prGlueInfo, prParamNetAddrList));

        rStatus = kalIoctl(prGlueInfo,
                wlanoidSetNetworkAddress,
                (PVOID)prParamNetAddrList,
                u4Len,
                FALSE,
                FALSE,
                TRUE,
                FALSE,
                &u4SetInfoLen);

        if (rStatus != WLAN_STATUS_SUCCESS) {
            DBGLOG(REQ, INFO, ("set HW pattern filter fail 0x%lx\n", rStatus));
        }
    }

    return NOTIFY_DONE;

}

static struct notifier_block inetaddr_notifier = {
    .notifier_call      =   netdev_event,
};

void wlanRegisterNotifier(void)
{
    register_inetaddr_notifier(&inetaddr_notifier);
}
Пример #14
0
/**
 * ixgbe_ipsec_rx - decode ipsec bits from Rx descriptor
 * @rx_ring: receiving ring
 * @rx_desc: receive data descriptor
 * @skb: current data packet
 *
 * Determine if there was an ipsec encapsulation noticed, and if so set up
 * the resulting status for later in the receive stack.
 **/
void ixgbe_ipsec_rx(struct ixgbe_ring *rx_ring,
		    union ixgbe_adv_rx_desc *rx_desc,
		    struct sk_buff *skb)
{
	struct ixgbe_adapter *adapter = netdev_priv(rx_ring->netdev);
	__le16 pkt_info = rx_desc->wb.lower.lo_dword.hs_rss.pkt_info;
	__le16 ipsec_pkt_types = cpu_to_le16(IXGBE_RXDADV_PKTTYPE_IPSEC_AH |
					     IXGBE_RXDADV_PKTTYPE_IPSEC_ESP);
	struct ixgbe_ipsec *ipsec = adapter->ipsec;
	struct xfrm_offload *xo = NULL;
	struct xfrm_state *xs = NULL;
	struct ipv6hdr *ip6 = NULL;
	struct iphdr *ip4 = NULL;
	void *daddr;
	__be32 spi;
	u8 *c_hdr;
	u8 proto;

	/* Find the ip and crypto headers in the data.
	 * We can assume no vlan header in the way, b/c the
	 * hw won't recognize the IPsec packet and anyway the
	 * currently vlan device doesn't support xfrm offload.
	 */
	if (pkt_info & cpu_to_le16(IXGBE_RXDADV_PKTTYPE_IPV4)) {
		ip4 = (struct iphdr *)(skb->data + ETH_HLEN);
		daddr = &ip4->daddr;
		c_hdr = (u8 *)ip4 + ip4->ihl * 4;
	} else if (pkt_info & cpu_to_le16(IXGBE_RXDADV_PKTTYPE_IPV6)) {
		ip6 = (struct ipv6hdr *)(skb->data + ETH_HLEN);
		daddr = &ip6->daddr;
		c_hdr = (u8 *)ip6 + sizeof(struct ipv6hdr);
	} else {
		return;
	}

	switch (pkt_info & ipsec_pkt_types) {
	case cpu_to_le16(IXGBE_RXDADV_PKTTYPE_IPSEC_AH):
		spi = ((struct ip_auth_hdr *)c_hdr)->spi;
		proto = IPPROTO_AH;
		break;
	case cpu_to_le16(IXGBE_RXDADV_PKTTYPE_IPSEC_ESP):
		spi = ((struct ip_esp_hdr *)c_hdr)->spi;
		proto = IPPROTO_ESP;
		break;
	default:
		return;
	}

	xs = ixgbe_ipsec_find_rx_state(ipsec, daddr, proto, spi, !!ip4);
	if (unlikely(!xs))
		return;

	skb->sp = secpath_dup(skb->sp);
	if (unlikely(!skb->sp))
		return;

	skb->sp->xvec[skb->sp->len++] = xs;
	skb->sp->olen++;
	xo = xfrm_offload(skb);
	xo->flags = CRYPTO_DONE;
	xo->status = CRYPTO_SUCCESS;

	adapter->rx_ipsec++;
}
Пример #15
0
static int
receive_packet (struct net_device *dev)
{
	struct netdev_private *np = netdev_priv(dev);
	int entry = np->cur_rx % RX_RING_SIZE;
	int cnt = 30;

	/* If RFDDone, FrameStart and FrameEnd set, there is a new packet in. */
	while (1) {
		struct netdev_desc *desc = &np->rx_ring[entry];
		int pkt_len;
		u64 frame_status;

		if (!(desc->status & cpu_to_le64(RFDDone)) ||
		    !(desc->status & cpu_to_le64(FrameStart)) ||
		    !(desc->status & cpu_to_le64(FrameEnd)))
			break;

		/* Chip omits the CRC. */
		frame_status = le64_to_cpu(desc->status);
		pkt_len = frame_status & 0xffff;
		if (--cnt < 0)
			break;
		/* Update rx error statistics, drop packet. */
		if (frame_status & RFS_Errors) {
			np->stats.rx_errors++;
			if (frame_status & (RxRuntFrame | RxLengthError))
				np->stats.rx_length_errors++;
			if (frame_status & RxFCSError)
				np->stats.rx_crc_errors++;
			if (frame_status & RxAlignmentError && np->speed != 1000)
				np->stats.rx_frame_errors++;
			if (frame_status & RxFIFOOverrun)
	 			np->stats.rx_fifo_errors++;
		} else {
			struct sk_buff *skb;

			/* Small skbuffs for short packets */
			if (pkt_len > copy_thresh) {
				pci_unmap_single (np->pdev,
						  desc_to_dma(desc),
						  np->rx_buf_sz,
						  PCI_DMA_FROMDEVICE);
				skb_put (skb = np->rx_skbuff[entry], pkt_len);
				np->rx_skbuff[entry] = NULL;
			} else if ((skb = netdev_alloc_skb_ip_align(dev, pkt_len))) {
				pci_dma_sync_single_for_cpu(np->pdev,
							    desc_to_dma(desc),
							    np->rx_buf_sz,
							    PCI_DMA_FROMDEVICE);
				skb_copy_to_linear_data (skb,
						  np->rx_skbuff[entry]->data,
						  pkt_len);
				skb_put (skb, pkt_len);
				pci_dma_sync_single_for_device(np->pdev,
							       desc_to_dma(desc),
							       np->rx_buf_sz,
							       PCI_DMA_FROMDEVICE);
			}
			skb->protocol = eth_type_trans (skb, dev);
#if 0
			/* Checksum done by hw, but csum value unavailable. */
			if (np->pdev->pci_rev_id >= 0x0c &&
				!(frame_status & (TCPError | UDPError | IPError))) {
				skb->ip_summed = CHECKSUM_UNNECESSARY;
			}
#endif
			netif_rx (skb);
		}
		entry = (entry + 1) % RX_RING_SIZE;
	}
	spin_lock(&np->rx_lock);
	np->cur_rx = entry;
	/* Re-allocate skbuffs to fill the descriptor ring */
	entry = np->old_rx;
	while (entry != np->cur_rx) {
		struct sk_buff *skb;
		/* Dropped packets don't need to re-allocate */
		if (np->rx_skbuff[entry] == NULL) {
			skb = netdev_alloc_skb_ip_align(dev, np->rx_buf_sz);
			if (skb == NULL) {
				np->rx_ring[entry].fraginfo = 0;
				printk (KERN_INFO
					"%s: receive_packet: "
					"Unable to re-allocate Rx skbuff.#%d\n",
					dev->name, entry);
				break;
			}
			np->rx_skbuff[entry] = skb;
			np->rx_ring[entry].fraginfo =
			    cpu_to_le64 (pci_map_single
					 (np->pdev, skb->data, np->rx_buf_sz,
					  PCI_DMA_FROMDEVICE));
		}
		np->rx_ring[entry].fraginfo |=
		    cpu_to_le64((u64)np->rx_buf_sz << 48);
		np->rx_ring[entry].status = 0;
		entry = (entry + 1) % RX_RING_SIZE;
	}
	np->old_rx = entry;
	spin_unlock(&np->rx_lock);
	return 0;
}
Пример #16
0
static int vxcan_newlink(struct net *net, struct net_device *dev,
			 struct nlattr *tb[], struct nlattr *data[],
			 struct netlink_ext_ack *extack)
{
	struct vxcan_priv *priv;
	struct net_device *peer;
	struct net *peer_net;

	struct nlattr *peer_tb[IFLA_MAX + 1], **tbp = tb;
	char ifname[IFNAMSIZ];
	unsigned char name_assign_type;
	struct ifinfomsg *ifmp = NULL;
	int err;

	/* register peer device */
	if (data && data[VXCAN_INFO_PEER]) {
		struct nlattr *nla_peer;

		nla_peer = data[VXCAN_INFO_PEER];
		ifmp = nla_data(nla_peer);
		err = rtnl_nla_parse_ifla(peer_tb,
					  nla_data(nla_peer) +
					  sizeof(struct ifinfomsg),
					  nla_len(nla_peer) -
					  sizeof(struct ifinfomsg),
					  NULL);
		if (err < 0)
			return err;

		tbp = peer_tb;
	}

	if (ifmp && tbp[IFLA_IFNAME]) {
		nla_strlcpy(ifname, tbp[IFLA_IFNAME], IFNAMSIZ);
		name_assign_type = NET_NAME_USER;
	} else {
		snprintf(ifname, IFNAMSIZ, DRV_NAME "%%d");
		name_assign_type = NET_NAME_ENUM;
	}

	peer_net = rtnl_link_get_net(net, tbp);
	if (IS_ERR(peer_net))
		return PTR_ERR(peer_net);

	peer = rtnl_create_link(peer_net, ifname, name_assign_type,
				&vxcan_link_ops, tbp);
	if (IS_ERR(peer)) {
		put_net(peer_net);
		return PTR_ERR(peer);
	}

	if (ifmp && dev->ifindex)
		peer->ifindex = ifmp->ifi_index;

	err = register_netdevice(peer);
	put_net(peer_net);
	peer_net = NULL;
	if (err < 0) {
		free_netdev(peer);
		return err;
	}

	netif_carrier_off(peer);

	err = rtnl_configure_link(peer, ifmp);
	if (err < 0)
		goto unregister_network_device;

	/* register first device */
	if (tb[IFLA_IFNAME])
		nla_strlcpy(dev->name, tb[IFLA_IFNAME], IFNAMSIZ);
	else
		snprintf(dev->name, IFNAMSIZ, DRV_NAME "%%d");

	err = register_netdevice(dev);
	if (err < 0)
		goto unregister_network_device;

	netif_carrier_off(dev);

	/* cross link the device pair */
	priv = netdev_priv(dev);
	rcu_assign_pointer(priv->peer, peer);

	priv = netdev_priv(peer);
	rcu_assign_pointer(priv->peer, dev);

	return 0;

unregister_network_device:
	unregister_netdevice(peer);
	return err;
}
Пример #17
0
struct net_device *
islpci_setup(struct pci_dev *pdev)
{
	islpci_private *priv;
	struct net_device *ndev = alloc_etherdev(sizeof (islpci_private));

	if (!ndev)
		return ndev;

	pci_set_drvdata(pdev, ndev);
#if defined(SET_NETDEV_DEV)
	SET_NETDEV_DEV(ndev, &pdev->dev);
#endif

	/* setup the structure members */
	ndev->base_addr = pci_resource_start(pdev, 0);
	ndev->irq = pdev->irq;

	/* initialize the function pointers */
	ndev->netdev_ops = &islpci_netdev_ops;
	ndev->wireless_handlers = &prism54_handler_def;
	ndev->ethtool_ops = &islpci_ethtool_ops;

	/* ndev->set_multicast_list = &islpci_set_multicast_list; */
	ndev->addr_len = ETH_ALEN;
	/* Get a non-zero dummy MAC address for nameif. Jean II */
	memcpy(ndev->dev_addr, dummy_mac, 6);

	ndev->watchdog_timeo = ISLPCI_TX_TIMEOUT;

	/* allocate a private device structure to the network device  */
	priv = netdev_priv(ndev);
	priv->ndev = ndev;
	priv->pdev = pdev;
	priv->monitor_type = ARPHRD_IEEE80211;
	priv->ndev->type = (priv->iw_mode == IW_MODE_MONITOR) ?
		priv->monitor_type : ARPHRD_ETHER;

	/* Add pointers to enable iwspy support. */
	priv->wireless_data.spy_data = &priv->spy_data;
	ndev->wireless_data = &priv->wireless_data;

	/* save the start and end address of the PCI memory area */
	ndev->mem_start = (unsigned long) priv->device_base;
	ndev->mem_end = ndev->mem_start + ISL38XX_PCI_MEM_SIZE;

#if VERBOSE > SHOW_ERROR_MESSAGES
	DEBUG(SHOW_TRACING, "PCI Memory remapped to 0x%p\n", priv->device_base);
#endif

	init_waitqueue_head(&priv->reset_done);

	/* init the queue read locks, process wait counter */
	mutex_init(&priv->mgmt_lock);
	priv->mgmt_received = NULL;
	init_waitqueue_head(&priv->mgmt_wqueue);
	mutex_init(&priv->stats_lock);
	spin_lock_init(&priv->slock);

	/* init state machine with off#1 state */
	priv->state = PRV_STATE_OFF;
	priv->state_off = 1;

	/* initialize workqueue's */
	INIT_WORK(&priv->stats_work, prism54_update_stats);
	priv->stats_timestamp = 0;

	INIT_WORK(&priv->reset_task, islpci_do_reset_and_wake);
	priv->reset_task_pending = 0;

	/* allocate various memory areas */
	if (islpci_alloc_memory(priv))
		goto do_free_netdev;

	/* select the firmware file depending on the device id */
	switch (pdev->device) {
	case 0x3877:
		strcpy(priv->firmware, ISL3877_IMAGE_FILE);
		break;

	case 0x3886:
		strcpy(priv->firmware, ISL3886_IMAGE_FILE);
		break;

	default:
		strcpy(priv->firmware, ISL3890_IMAGE_FILE);
		break;
	}

	if (register_netdev(ndev)) {
		DEBUG(SHOW_ERROR_MESSAGES,
		      "ERROR: register_netdev() failed\n");
		goto do_islpci_free_memory;
	}

	return ndev;

      do_islpci_free_memory:
	islpci_free_memory(priv);
      do_free_netdev:
	pci_set_drvdata(pdev, NULL);
	free_netdev(ndev);
	priv = NULL;
	return NULL;
}
Пример #18
0
int yatse_dma_init(struct net_device *ndev){
	struct yatse_private *priv = netdev_priv(ndev);
	struct yatse_dma *dma = &priv->dma;
	int ret = 0;
	int i;

	assert_spin_locked(&dma->rx_lock);
	assert_spin_locked(&dma->tx_lock);

	dma->rx_skbs = NULL;
	dma->rx_skb_phys = NULL;
	dma->tx_skbs = NULL;
	dma->tx_skb_phys = NULL;

	ret = yatse_dma_reset(dma);
	if(ret) goto out;

	dma->rx_buf_len = priv->config->rx_align + NET_IP_ALIGN + ETH_HLEN + priv->config->max_mtu;
	dma->rx_align = priv->config->rx_align;
	BUG_ON(dma->rx_ring_length & dma->rx_ring_mask);

	BUG_ON(dma->tx_ring_length & dma->tx_ring_mask);
	dma->tx_insert = 0;
	dma->tx_remove = 0;

	dma->rx_skbs = kzalloc(sizeof(*dma->rx_skbs) * dma->rx_ring_length, GFP_KERNEL);
	if(!dma->rx_skbs){
		ret = -ENOMEM;
		goto out;
	}
	dma->rx_skb_phys = kzalloc(sizeof(*dma->rx_skb_phys) * dma->rx_ring_length, GFP_KERNEL);
	if(!dma->rx_skb_phys){
		ret = -ENOMEM;
		goto out;
	}
	for(i = 0;i < dma->rx_ring_length;i++){
		dma->rx_pos = i;
		dma->rx_skbs[i] = yatse_rx_alloc_skb(ndev);
		if(!dma->rx_skbs[i]){
			ret = -ENOMEM;
			goto out;
		}

		dma->rx_skb_phys[i] = dma_map_single(&ndev->dev, dma->rx_skbs[i]->data - NET_IP_ALIGN, NET_IP_ALIGN + ETH_HLEN + priv->mtu, DMA_FROM_DEVICE);
		BUG_ON(dma_mapping_error(&ndev->dev, dma->rx_skb_phys[i]));

		yatse_rx_insert_skb(ndev, dma, dma->rx_skb_phys[i]);
	}
	dma->rx_pos = 0;

	dma->tx_skbs = kzalloc(sizeof(*dma->tx_skbs) * dma->tx_ring_length, GFP_KERNEL);
	if(!dma->tx_skbs){
		ret = -ENOMEM;
		goto out;
	}
	dma->tx_skb_phys = kzalloc(sizeof(*dma->tx_skb_phys) * dma->tx_ring_length, GFP_KERNEL);
	if(!dma->tx_skb_phys){
		ret = -ENOMEM;
		goto out;
	}
	for(i = 0;i < dma->tx_ring_length;i++){
		writeb(0, &dma->tx[i].status);
	}

	writel(YATSE_CSR_RCONTROL_RXENA | YATSE_CSR_RCONTROL_RXIE, &dma->csr->rcontrol);
	writel(YATSE_CSR_TCONTROL_TXENA | YATSE_CSR_TCONTROL_TXIE, &dma->csr->tcontrol);

	printk(KERN_INFO "yatse: DMA up\n");

	return 0;

out:
	yatse_dma_free(ndev);
	return ret;
}
static void Eth_rx_ints(struct net_device *dev, int enable) {
    struct eth_priv *priv = netdev_priv(dev);
    priv->rx_int_enabled = enable;
}
Пример #20
0
static struct net_device_stats *
get_stats (struct net_device *dev)
{
	struct netdev_private *np = netdev_priv(dev);
	void __iomem *ioaddr = np->ioaddr;
#ifdef MEM_MAPPING
	int i;
#endif
	unsigned int stat_reg;

	/* All statistics registers need to be acknowledged,
	   else statistic overflow could cause problems */

	np->stats.rx_packets += dr32(FramesRcvOk);
	np->stats.tx_packets += dr32(FramesXmtOk);
	np->stats.rx_bytes += dr32(OctetRcvOk);
	np->stats.tx_bytes += dr32(OctetXmtOk);

	np->stats.multicast = dr32(McstFramesRcvdOk);
	np->stats.collisions += dr32(SingleColFrames)
			     +  dr32(MultiColFrames);

	/* detailed tx errors */
	stat_reg = dr16(FramesAbortXSColls);
	np->stats.tx_aborted_errors += stat_reg;
	np->stats.tx_errors += stat_reg;

	stat_reg = dr16(CarrierSenseErrors);
	np->stats.tx_carrier_errors += stat_reg;
	np->stats.tx_errors += stat_reg;

	/* Clear all other statistic register. */
	dr32(McstOctetXmtOk);
	dr16(BcstFramesXmtdOk);
	dr32(McstFramesXmtdOk);
	dr16(BcstFramesRcvdOk);
	dr16(MacControlFramesRcvd);
	dr16(FrameTooLongErrors);
	dr16(InRangeLengthErrors);
	dr16(FramesCheckSeqErrors);
	dr16(FramesLostRxErrors);
	dr32(McstOctetXmtOk);
	dr32(BcstOctetXmtOk);
	dr32(McstFramesXmtdOk);
	dr32(FramesWDeferredXmt);
	dr32(LateCollisions);
	dr16(BcstFramesXmtdOk);
	dr16(MacControlFramesXmtd);
	dr16(FramesWEXDeferal);

#ifdef MEM_MAPPING
	for (i = 0x100; i <= 0x150; i += 4)
		dr32(i);
#endif
	dr16(TxJumboFrames);
	dr16(RxJumboFrames);
	dr16(TCPCheckSumErrors);
	dr16(UDPCheckSumErrors);
	dr16(IPCheckSumErrors);
	return &np->stats;
}
Пример #21
0
static u32 ixgbe_get_rx_csum(struct net_device *netdev)
{
	struct ixgbe_adapter *adapter = netdev_priv(netdev);
	return (adapter->flags & IXGBE_FLAG_RX_CSUM_ENABLED);
}
Пример #22
0
static int
rio_probe1 (struct pci_dev *pdev, const struct pci_device_id *ent)
{
	struct net_device *dev;
	struct netdev_private *np;
	static int card_idx;
	int chip_idx = ent->driver_data;
	int err, irq;
	void __iomem *ioaddr;
	static int version_printed;
	void *ring_space;
	dma_addr_t ring_dma;

	if (!version_printed++)
		printk ("%s", version);

	err = pci_enable_device (pdev);
	if (err)
		return err;

	irq = pdev->irq;
	err = pci_request_regions (pdev, "dl2k");
	if (err)
		goto err_out_disable;

	pci_set_master (pdev);

	err = -ENOMEM;

	dev = alloc_etherdev (sizeof (*np));
	if (!dev)
		goto err_out_res;
	SET_NETDEV_DEV(dev, &pdev->dev);

	np = netdev_priv(dev);

	/* IO registers range. */
	ioaddr = pci_iomap(pdev, 0, 0);
	if (!ioaddr)
		goto err_out_dev;
	np->eeprom_addr = ioaddr;

#ifdef MEM_MAPPING
	/* MM registers range. */
	ioaddr = pci_iomap(pdev, 1, 0);
	if (!ioaddr)
		goto err_out_iounmap;
#endif
	np->ioaddr = ioaddr;
	np->chip_id = chip_idx;
	np->pdev = pdev;
	spin_lock_init (&np->tx_lock);
	spin_lock_init (&np->rx_lock);

	/* Parse manual configuration */
	np->an_enable = 1;
	np->tx_coalesce = 1;
	if (card_idx < MAX_UNITS) {
		if (media[card_idx] != NULL) {
			np->an_enable = 0;
			if (strcmp (media[card_idx], "auto") == 0 ||
			    strcmp (media[card_idx], "autosense") == 0 ||
			    strcmp (media[card_idx], "0") == 0 ) {
				np->an_enable = 2;
			} else if (strcmp (media[card_idx], "100mbps_fd") == 0 ||
			    strcmp (media[card_idx], "4") == 0) {
				np->speed = 100;
				np->full_duplex = 1;
			} else if (strcmp (media[card_idx], "100mbps_hd") == 0 ||
				   strcmp (media[card_idx], "3") == 0) {
				np->speed = 100;
				np->full_duplex = 0;
			} else if (strcmp (media[card_idx], "10mbps_fd") == 0 ||
				   strcmp (media[card_idx], "2") == 0) {
				np->speed = 10;
				np->full_duplex = 1;
			} else if (strcmp (media[card_idx], "10mbps_hd") == 0 ||
				   strcmp (media[card_idx], "1") == 0) {
				np->speed = 10;
				np->full_duplex = 0;
			} else if (strcmp (media[card_idx], "1000mbps_fd") == 0 ||
				 strcmp (media[card_idx], "6") == 0) {
				np->speed=1000;
				np->full_duplex=1;
			} else if (strcmp (media[card_idx], "1000mbps_hd") == 0 ||
				 strcmp (media[card_idx], "5") == 0) {
				np->speed = 1000;
				np->full_duplex = 0;
			} else {
				np->an_enable = 1;
			}
		}
		if (jumbo[card_idx] != 0) {
			np->jumbo = 1;
			dev->mtu = MAX_JUMBO;
		} else {
			np->jumbo = 0;
			if (mtu[card_idx] > 0 && mtu[card_idx] < PACKET_SIZE)
				dev->mtu = mtu[card_idx];
		}
		np->vlan = (vlan[card_idx] > 0 && vlan[card_idx] < 4096) ?
		    vlan[card_idx] : 0;
		if (rx_coalesce > 0 && rx_timeout > 0) {
			np->rx_coalesce = rx_coalesce;
			np->rx_timeout = rx_timeout;
			np->coalesce = 1;
		}
		np->tx_flow = (tx_flow == 0) ? 0 : 1;
		np->rx_flow = (rx_flow == 0) ? 0 : 1;

		if (tx_coalesce < 1)
			tx_coalesce = 1;
		else if (tx_coalesce > TX_RING_SIZE-1)
			tx_coalesce = TX_RING_SIZE - 1;
	}
	dev->netdev_ops = &netdev_ops;
	dev->watchdog_timeo = TX_TIMEOUT;
	dev->ethtool_ops = &ethtool_ops;
#if 0
	dev->features = NETIF_F_IP_CSUM;
#endif
	/* MTU range: 68 - 1536 or 8000 */
	dev->min_mtu = ETH_MIN_MTU;
	dev->max_mtu = np->jumbo ? MAX_JUMBO : PACKET_SIZE;

	pci_set_drvdata (pdev, dev);

	ring_space = pci_alloc_consistent (pdev, TX_TOTAL_SIZE, &ring_dma);
	if (!ring_space)
		goto err_out_iounmap;
	np->tx_ring = ring_space;
	np->tx_ring_dma = ring_dma;

	ring_space = pci_alloc_consistent (pdev, RX_TOTAL_SIZE, &ring_dma);
	if (!ring_space)
		goto err_out_unmap_tx;
	np->rx_ring = ring_space;
	np->rx_ring_dma = ring_dma;

	/* Parse eeprom data */
	parse_eeprom (dev);

	/* Find PHY address */
	err = find_miiphy (dev);
	if (err)
		goto err_out_unmap_rx;

	/* Fiber device? */
	np->phy_media = (dr16(ASICCtrl) & PhyMedia) ? 1 : 0;
	np->link_status = 0;
	/* Set media and reset PHY */
	if (np->phy_media) {
		/* default Auto-Negotiation for fiber deivices */
	 	if (np->an_enable == 2) {
			np->an_enable = 1;
		}
	} else {
		/* Auto-Negotiation is mandatory for 1000BASE-T,
		   IEEE 802.3ab Annex 28D page 14 */
		if (np->speed == 1000)
			np->an_enable = 1;
	}

	err = register_netdev (dev);
	if (err)
		goto err_out_unmap_rx;

	card_idx++;

	printk (KERN_INFO "%s: %s, %pM, IRQ %d\n",
		dev->name, np->name, dev->dev_addr, irq);
	if (tx_coalesce > 1)
		printk(KERN_INFO "tx_coalesce:\t%d packets\n",
				tx_coalesce);
	if (np->coalesce)
		printk(KERN_INFO
		       "rx_coalesce:\t%d packets\n"
		       "rx_timeout: \t%d ns\n",
				np->rx_coalesce, np->rx_timeout*640);
	if (np->vlan)
		printk(KERN_INFO "vlan(id):\t%d\n", np->vlan);
	return 0;

err_out_unmap_rx:
	pci_free_consistent (pdev, RX_TOTAL_SIZE, np->rx_ring, np->rx_ring_dma);
err_out_unmap_tx:
	pci_free_consistent (pdev, TX_TOTAL_SIZE, np->tx_ring, np->tx_ring_dma);
err_out_iounmap:
#ifdef MEM_MAPPING
	pci_iounmap(pdev, np->ioaddr);
#endif
	pci_iounmap(pdev, np->eeprom_addr);
err_out_dev:
	free_netdev (dev);
err_out_res:
	pci_release_regions (pdev);
err_out_disable:
	pci_disable_device (pdev);
	return err;
}
Пример #23
0
static void ixgbe_set_msglevel(struct net_device *netdev, u32 data)
{
	struct ixgbe_adapter *adapter = netdev_priv(netdev);
	adapter->msg_enable = data;
}
Пример #24
0
static int
parse_eeprom (struct net_device *dev)
{
	struct netdev_private *np = netdev_priv(dev);
	void __iomem *ioaddr = np->ioaddr;
	int i, j;
	u8 sromdata[256];
	u8 *psib;
	u32 crc;
	PSROM_t psrom = (PSROM_t) sromdata;

	int cid, next;

	for (i = 0; i < 128; i++)
		((__le16 *) sromdata)[i] = cpu_to_le16(read_eeprom(np, i));

	if (np->pdev->vendor == PCI_VENDOR_ID_DLINK) {	/* D-Link Only */
		/* Check CRC */
		crc = ~ether_crc_le (256 - 4, sromdata);
		if (psrom->crc != cpu_to_le32(crc)) {
			printk (KERN_ERR "%s: EEPROM data CRC error.\n",
					dev->name);
			return -1;
		}
	}

	/* Set MAC address */
	for (i = 0; i < 6; i++)
		dev->dev_addr[i] = psrom->mac_addr[i];

	if (np->chip_id == CHIP_IP1000A) {
		np->led_mode = psrom->led_mode;
		return 0;
	}

	if (np->pdev->vendor != PCI_VENDOR_ID_DLINK) {
		return 0;
	}

	/* Parse Software Information Block */
	i = 0x30;
	psib = (u8 *) sromdata;
	do {
		cid = psib[i++];
		next = psib[i++];
		if ((cid == 0 && next == 0) || (cid == 0xff && next == 0xff)) {
			printk (KERN_ERR "Cell data error\n");
			return -1;
		}
		switch (cid) {
		case 0:	/* Format version */
			break;
		case 1:	/* End of cell */
			return 0;
		case 2:	/* Duplex Polarity */
			np->duplex_polarity = psib[i];
			dw8(PhyCtrl, dr8(PhyCtrl) | psib[i]);
			break;
		case 3:	/* Wake Polarity */
			np->wake_polarity = psib[i];
			break;
		case 9:	/* Adapter description */
			j = (next - i > 255) ? 255 : next - i;
			memcpy (np->name, &(psib[i]), j);
			break;
		case 4:
		case 5:
		case 6:
		case 7:
		case 8:	/* Reversed */
			break;
		default:	/* Unknown cell */
			return -1;
		}
		i = next;
	} while (1);

	return 0;
}
Пример #25
0
static int ixgbe_get_eeprom_len(struct net_device *netdev)
{
	struct ixgbe_adapter *adapter = netdev_priv(netdev);
	return adapter->hw.eeprom.word_size * 2;
}
Пример #26
0
static void rio_hw_init(struct net_device *dev)
{
	struct netdev_private *np = netdev_priv(dev);
	void __iomem *ioaddr = np->ioaddr;
	int i;
	u16 macctrl;

	/* Reset all logic functions */
	dw16(ASICCtrl + 2,
	     GlobalReset | DMAReset | FIFOReset | NetworkReset | HostReset);
	mdelay(10);

	rio_set_led_mode(dev);

	/* DebugCtrl bit 4, 5, 9 must set */
	dw32(DebugCtrl, dr32(DebugCtrl) | 0x0230);

	if (np->chip_id == CHIP_IP1000A &&
	    (np->pdev->revision == 0x40 || np->pdev->revision == 0x41)) {
		/* PHY magic taken from ipg driver, undocumented registers */
		mii_write(dev, np->phy_addr, 31, 0x0001);
		mii_write(dev, np->phy_addr, 27, 0x01e0);
		mii_write(dev, np->phy_addr, 31, 0x0002);
		mii_write(dev, np->phy_addr, 27, 0xeb8e);
		mii_write(dev, np->phy_addr, 31, 0x0000);
		mii_write(dev, np->phy_addr, 30, 0x005e);
		/* advertise 1000BASE-T half & full duplex, prefer MASTER */
		mii_write(dev, np->phy_addr, MII_CTRL1000, 0x0700);
	}

	if (np->phy_media)
		mii_set_media_pcs(dev);
	else
		mii_set_media(dev);

	/* Jumbo frame */
	if (np->jumbo != 0)
		dw16(MaxFrameSize, MAX_JUMBO+14);

	/* Set RFDListPtr */
	dw32(RFDListPtr0, np->rx_ring_dma);
	dw32(RFDListPtr1, 0);

	/* Set station address */
	/* 16 or 32-bit access is required by TC9020 datasheet but 8-bit works
	 * too. However, it doesn't work on IP1000A so we use 16-bit access.
	 */
	for (i = 0; i < 3; i++)
		dw16(StationAddr0 + 2 * i,
		     cpu_to_le16(((u16 *)dev->dev_addr)[i]));

	set_multicast (dev);
	if (np->coalesce) {
		dw32(RxDMAIntCtrl, np->rx_coalesce | np->rx_timeout << 16);
	}
	/* Set RIO to poll every N*320nsec. */
	dw8(RxDMAPollPeriod, 0x20);
	dw8(TxDMAPollPeriod, 0xff);
	dw8(RxDMABurstThresh, 0x30);
	dw8(RxDMAUrgentThresh, 0x30);
	dw32(RmonStatMask, 0x0007ffff);
	/* clear statistics */
	clear_stats (dev);

	/* VLAN supported */
	if (np->vlan) {
		/* priority field in RxDMAIntCtrl  */
		dw32(RxDMAIntCtrl, dr32(RxDMAIntCtrl) | 0x7 << 10);
		/* VLANId */
		dw16(VLANId, np->vlan);
		/* Length/Type should be 0x8100 */
		dw32(VLANTag, 0x8100 << 16 | np->vlan);
		/* Enable AutoVLANuntagging, but disable AutoVLANtagging.
		   VLAN information tagged by TFC' VID, CFI fields. */
		dw32(MACCtrl, dr32(MACCtrl) | AutoVLANuntagging);
	}

	/* Start Tx/Rx */
	dw32(MACCtrl, dr32(MACCtrl) | StatsEnable | RxEnable | TxEnable);

	macctrl = 0;
	macctrl |= (np->vlan) ? AutoVLANuntagging : 0;
	macctrl |= (np->full_duplex) ? DuplexSelect : 0;
	macctrl |= (np->tx_flow) ? TxFlowControlEnable : 0;
	macctrl |= (np->rx_flow) ? RxFlowControlEnable : 0;
	dw16(MACCtrl, macctrl);
}
Пример #27
0
static struct ip_tunnel * ipgre_tunnel_lookup(struct net_device *dev,
					      __be32 remote, __be32 local,
					      __be32 key, __be16 gre_proto)
{
	struct net *net = dev_net(dev);
	int link = dev->ifindex;
	unsigned h0 = HASH(remote);
	unsigned h1 = HASH(key);
	struct ip_tunnel *t, *cand = NULL;
	struct ipgre_net *ign = net_generic(net, ipgre_net_id);
	int dev_type = (gre_proto == htons(ETH_P_TEB)) ?
		       ARPHRD_ETHER : ARPHRD_IPGRE;
	int score, cand_score = 4;

	for (t = ign->tunnels_r_l[h0^h1]; t; t = t->next) {
		if (local != t->parms.iph.saddr ||
		    remote != t->parms.iph.daddr ||
		    key != t->parms.i_key ||
		    !(t->dev->flags & IFF_UP))
			continue;

		if (t->dev->type != ARPHRD_IPGRE &&
		    t->dev->type != dev_type)
			continue;

		score = 0;
		if (t->parms.link != link)
			score |= 1;
		if (t->dev->type != dev_type)
			score |= 2;
		if (score == 0)
			return t;

		if (score < cand_score) {
			cand = t;
			cand_score = score;
		}
	}

	for (t = ign->tunnels_r[h0^h1]; t; t = t->next) {
		if (remote != t->parms.iph.daddr ||
		    key != t->parms.i_key ||
		    !(t->dev->flags & IFF_UP))
			continue;

		if (t->dev->type != ARPHRD_IPGRE &&
		    t->dev->type != dev_type)
			continue;

		score = 0;
		if (t->parms.link != link)
			score |= 1;
		if (t->dev->type != dev_type)
			score |= 2;
		if (score == 0)
			return t;

		if (score < cand_score) {
			cand = t;
			cand_score = score;
		}
	}

	for (t = ign->tunnels_l[h1]; t; t = t->next) {
		if ((local != t->parms.iph.saddr &&
		     (local != t->parms.iph.daddr ||
		      !ipv4_is_multicast(local))) ||
		    key != t->parms.i_key ||
		    !(t->dev->flags & IFF_UP))
			continue;

		if (t->dev->type != ARPHRD_IPGRE &&
		    t->dev->type != dev_type)
			continue;

		score = 0;
		if (t->parms.link != link)
			score |= 1;
		if (t->dev->type != dev_type)
			score |= 2;
		if (score == 0)
			return t;

		if (score < cand_score) {
			cand = t;
			cand_score = score;
		}
	}

	for (t = ign->tunnels_wc[h1]; t; t = t->next) {
		if (t->parms.i_key != key ||
		    !(t->dev->flags & IFF_UP))
			continue;

		if (t->dev->type != ARPHRD_IPGRE &&
		    t->dev->type != dev_type)
			continue;

		score = 0;
		if (t->parms.link != link)
			score |= 1;
		if (t->dev->type != dev_type)
			score |= 2;
		if (score == 0)
			return t;

		if (score < cand_score) {
			cand = t;
			cand_score = score;
		}
	}

	if (cand != NULL)
		return cand;

	if (ign->fb_tunnel_dev->flags & IFF_UP)
		return netdev_priv(ign->fb_tunnel_dev);

	return NULL;
}
Пример #28
0
static netdev_tx_t
start_xmit (struct sk_buff *skb, struct net_device *dev)
{
	struct netdev_private *np = netdev_priv(dev);
	void __iomem *ioaddr = np->ioaddr;
	struct netdev_desc *txdesc;
	unsigned entry;
	u64 tfc_vlan_tag = 0;

	if (np->link_status == 0) {	/* Link Down */
		dev_kfree_skb(skb);
		return NETDEV_TX_OK;
	}
	entry = np->cur_tx % TX_RING_SIZE;
	np->tx_skbuff[entry] = skb;
	txdesc = &np->tx_ring[entry];

#if 0
	if (skb->ip_summed == CHECKSUM_PARTIAL) {
		txdesc->status |=
		    cpu_to_le64 (TCPChecksumEnable | UDPChecksumEnable |
				 IPChecksumEnable);
	}
#endif
	if (np->vlan) {
		tfc_vlan_tag = VLANTagInsert |
		    ((u64)np->vlan << 32) |
		    ((u64)skb->priority << 45);
	}
	txdesc->fraginfo = cpu_to_le64 (pci_map_single (np->pdev, skb->data,
							skb->len,
							PCI_DMA_TODEVICE));
	txdesc->fraginfo |= cpu_to_le64((u64)skb->len << 48);

	/* DL2K bug: DMA fails to get next descriptor ptr in 10Mbps mode
	 * Work around: Always use 1 descriptor in 10Mbps mode */
	if (entry % np->tx_coalesce == 0 || np->speed == 10)
		txdesc->status = cpu_to_le64 (entry | tfc_vlan_tag |
					      WordAlignDisable |
					      TxDMAIndicate |
					      (1 << FragCountShift));
	else
		txdesc->status = cpu_to_le64 (entry | tfc_vlan_tag |
					      WordAlignDisable |
					      (1 << FragCountShift));

	/* TxDMAPollNow */
	dw32(DMACtrl, dr32(DMACtrl) | 0x00001000);
	/* Schedule ISR */
	dw32(CountDown, 10000);
	np->cur_tx = (np->cur_tx + 1) % TX_RING_SIZE;
	if ((np->cur_tx - np->old_tx + TX_RING_SIZE) % TX_RING_SIZE
			< TX_QUEUE_LEN - 1 && np->speed != 10) {
		/* do nothing */
	} else if (!netif_queue_stopped(dev)) {
		netif_stop_queue (dev);
	}

	/* The first TFDListPtr */
	if (!dr32(TFDListPtr0)) {
		dw32(TFDListPtr0, np->tx_ring_dma +
		     entry * sizeof (struct netdev_desc));
		dw32(TFDListPtr1, 0);
	}

	return NETDEV_TX_OK;
}
/**
 *  @brief ioctl function for wireless IOCTLs
 *
 *  @param dev		A pointer to net_device structure
 *  @param req	   	A pointer to ifreq structure
 *  @param cmd 		Command
 *
 *  @return          0 --success, otherwise fail
 */
int
woal_uap_do_priv_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
{
    moal_private *priv = (moal_private *) netdev_priv(dev);
    struct iwreq *wrq = (struct iwreq *) req;
    int ret = 0;

    ENTER();

    switch (cmd) {
    case WOAL_UAP_SETNONE_GETNONE:
        switch (wrq->u.data.flags) {
        case WOAL_UAP_START:
            break;
        case WOAL_UAP_STOP:
            ret = woal_uap_bss_ctrl(priv, MOAL_IOCTL_WAIT, UAP_BSS_STOP);
            break;
        case WOAL_AP_BSS_START:
            ret = woal_uap_bss_ctrl(priv, MOAL_IOCTL_WAIT, UAP_BSS_START);
            break;
        case WOAL_AP_BSS_STOP:
            ret = woal_uap_bss_ctrl(priv, MOAL_IOCTL_WAIT, UAP_BSS_STOP);
            break;
        default:
            ret = -EINVAL;
            break;
        }
        break;
    case WOAL_UAP_SETONEINT_GETWORDCHAR:
        switch (wrq->u.data.flags) {
        case WOAL_UAP_VERSION:
            ret = woal_get_driver_version(priv, req);
            break;
        case WOAL_UAP_VEREXT:
            ret = woal_get_driver_verext(priv, req);
            break;
        default:
            ret = -EOPNOTSUPP;
            break;
        }
        break;
    case WOAL_UAP_SET_GET_256_CHAR:
        switch (wrq->u.data.flags) {
        case WOAL_WL_FW_RELOAD:
            break;
        case WOAL_AP_SET_CFG:
            ret = woal_uap_set_ap_cfg(priv, wrq);
            break;
        default:
            ret = -EINVAL;
            break;
        }
        break;
#if defined(WFD_SUPPORT)
#if defined(STA_SUPPORT) && defined(UAP_SUPPORT)
    case WOAL_UAP_SETONEINT_GETONEINT:
        switch (wrq->u.data.flags) {
        case WOAL_UAP_SET_GET_BSS_ROLE:
            ret = woal_set_get_bss_role(priv, wrq);
            break;
        default:
            ret = -EINVAL;
            break;
        }
        break;
#endif
#endif
    case WOAL_UAP_HOST_CMD:
        ret = woal_host_command(priv, wrq);
        break;
    case WOAL_UAP_FROYO_START:
        break;
    case WOAL_UAP_FROYO_STOP:
        ret = woal_uap_bss_ctrl(priv, MOAL_IOCTL_WAIT, UAP_BSS_STOP);
        break;
    case WOAL_UAP_FROYO_AP_BSS_START:
        ret = woal_uap_bss_ctrl(priv, MOAL_IOCTL_WAIT, UAP_BSS_START);
        break;
    case WOAL_UAP_FROYO_AP_BSS_STOP:
        ret = woal_uap_bss_ctrl(priv, MOAL_IOCTL_WAIT, UAP_BSS_STOP);
        break;
    case WOAL_UAP_FROYO_WL_FW_RELOAD:
        break;
    case WOAL_UAP_FROYO_AP_SET_CFG:
        ret = woal_uap_set_ap_cfg(priv, wrq);
        break;
    default:
        ret = -EINVAL;
        break;
    }

    LEAVE();
    return ret;
}
Пример #30
0
/**
 * ixgbe_ipsec_del_sa - clear out this specific SA
 * @xs: pointer to transformer state struct
 **/
static void ixgbe_ipsec_del_sa(struct xfrm_state *xs)
{
	struct net_device *dev = xs->xso.dev;
	struct ixgbe_adapter *adapter = netdev_priv(dev);
	struct ixgbe_ipsec *ipsec = adapter->ipsec;
	struct ixgbe_hw *hw = &adapter->hw;
	u32 zerobuf[4] = {0, 0, 0, 0};
	u16 sa_idx;

	if (xs->xso.flags & XFRM_OFFLOAD_INBOUND) {
		struct rx_sa *rsa;
		u8 ipi;

		sa_idx = xs->xso.offload_handle - IXGBE_IPSEC_BASE_RX_INDEX;
		rsa = &ipsec->rx_tbl[sa_idx];

		if (!rsa->used) {
			netdev_err(dev, "Invalid Rx SA selected sa_idx=%d offload_handle=%lu\n",
				   sa_idx, xs->xso.offload_handle);
			return;
		}

		ixgbe_ipsec_set_rx_sa(hw, sa_idx, 0, zerobuf, 0, 0, 0);
		hash_del_rcu(&rsa->hlist);

		/* if the IP table entry is referenced by only this SA,
		 * i.e. ref_cnt is only 1, clear the IP table entry as well
		 */
		ipi = rsa->iptbl_ind;
		if (ipsec->ip_tbl[ipi].ref_cnt > 0) {
			ipsec->ip_tbl[ipi].ref_cnt--;

			if (!ipsec->ip_tbl[ipi].ref_cnt) {
				memset(&ipsec->ip_tbl[ipi], 0,
				       sizeof(struct rx_ip_sa));
				ixgbe_ipsec_set_rx_ip(hw, ipi, zerobuf);
			}
		}

		memset(rsa, 0, sizeof(struct rx_sa));
		ipsec->num_rx_sa--;
	} else {
		sa_idx = xs->xso.offload_handle - IXGBE_IPSEC_BASE_TX_INDEX;

		if (!ipsec->tx_tbl[sa_idx].used) {
			netdev_err(dev, "Invalid Tx SA selected sa_idx=%d offload_handle=%lu\n",
				   sa_idx, xs->xso.offload_handle);
			return;
		}

		ixgbe_ipsec_set_tx_sa(hw, sa_idx, zerobuf, 0);
		memset(&ipsec->tx_tbl[sa_idx], 0, sizeof(struct tx_sa));
		ipsec->num_tx_sa--;
	}

	/* if there are no SAs left, stop the engine to save energy */
	if (ipsec->num_rx_sa == 0 && ipsec->num_tx_sa == 0) {
		adapter->flags2 &= ~IXGBE_FLAG2_IPSEC_ENABLED;
		ixgbe_ipsec_stop_engine(adapter);
	}
}