Exemplo n.º 1
0
int usnic_fwd_disable_qp(struct usnic_fwd_dev *ufdev, int vnic_idx, int qp_idx)
{
	int status;
	u64 a0, a1;
	struct net_device *pf_netdev;

	pf_netdev = ufdev->netdev;
	a0 = qp_idx;
	a1 = CMD_QP_RQWQ;

	status = usnic_fwd_devcmd(ufdev, vnic_idx, CMD_QP_DISABLE,
			&a0, &a1);
	if (status) {
		usnic_err("PF %s VNIC Index %u RQ Index: %u DISABLE Failed with status %d",
				netdev_name(pf_netdev),
				vnic_idx,
				qp_idx,
				status);
	} else {
		usnic_dbg("PF %s VNIC Index %u RQ Index: %u DISABLED",
				netdev_name(pf_netdev),
				vnic_idx,
				qp_idx);
	}

	return status;
}
Exemplo n.º 2
0
int __netdev_printk(const char *level, const struct net_device *dev,
			   struct va_format *vaf)
{
	int r;

	if (dev && dev->dev.parent)
#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,35))
		r = dev_printk(level, dev->dev.parent, "%s: %pV",
			       netdev_name(dev), vaf);
#else
		/* XXX: this could likely be done better but I'm lazy */
		r = printk("%s%s: %pV", level, netdev_name(dev), vaf);
#endif
	else if (dev)
Exemplo n.º 3
0
/* Create debugfs counters for the device */
static inline void debugfs_init(struct cfv_info *cfv)
{
	cfv->debugfs =
		debugfs_create_dir(netdev_name(cfv->ndev), NULL);

	if (IS_ERR(cfv->debugfs))
		return;

	debugfs_create_u32("rx-napi-complete", S_IRUSR, cfv->debugfs,
			   &cfv->stats.rx_napi_complete);
	debugfs_create_u32("rx-napi-resched", S_IRUSR, cfv->debugfs,
			   &cfv->stats.rx_napi_resched);
	debugfs_create_u32("rx-nomem", S_IRUSR, cfv->debugfs,
			   &cfv->stats.rx_nomem);
	debugfs_create_u32("rx-kicks", S_IRUSR, cfv->debugfs,
			   &cfv->stats.rx_kicks);
	debugfs_create_u32("tx-full-ring", S_IRUSR, cfv->debugfs,
			   &cfv->stats.tx_full_ring);
	debugfs_create_u32("tx-no-mem", S_IRUSR, cfv->debugfs,
			   &cfv->stats.tx_no_mem);
	debugfs_create_u32("tx-kicks", S_IRUSR, cfv->debugfs,
			   &cfv->stats.tx_kicks);
	debugfs_create_u32("tx-flow-on", S_IRUSR, cfv->debugfs,
			   &cfv->stats.tx_flow_on);
}
Exemplo n.º 4
0
int gether_get_ifname(struct net_device *net, char *name, int len)
{
	rtnl_lock();
	strlcpy(name, netdev_name(net), len);
	rtnl_unlock();
	return strlen(name);
}
Exemplo n.º 5
0
static int nsim_init(struct net_device *dev)
{
	char sdev_ddir_name[10], sdev_link_name[32];
	struct netdevsim *ns = netdev_priv(dev);
	int err;

	ns->netdev = dev;
	ns->ddir = debugfs_create_dir(netdev_name(dev), nsim_ddir);
	if (IS_ERR_OR_NULL(ns->ddir))
		return -ENOMEM;

	if (!ns->sdev) {
		ns->sdev = kzalloc(sizeof(*ns->sdev), GFP_KERNEL);
		if (!ns->sdev) {
			err = -ENOMEM;
			goto err_debugfs_destroy;
		}
		ns->sdev->refcnt = 1;
		ns->sdev->switch_id = nsim_dev_id;
		sprintf(sdev_ddir_name, "%u", ns->sdev->switch_id);
		ns->sdev->ddir = debugfs_create_dir(sdev_ddir_name,
						    nsim_sdev_ddir);
		if (IS_ERR_OR_NULL(ns->sdev->ddir)) {
			err = PTR_ERR_OR_ZERO(ns->sdev->ddir) ?: -EINVAL;
			goto err_sdev_free;
		}
Exemplo n.º 6
0
static int rxe_enable_driver(struct ib_device *ib_dev)
{
	struct rxe_dev *rxe = container_of(ib_dev, struct rxe_dev, ib_dev);

	rxe_set_port_state(rxe);
	dev_info(&rxe->ib_dev.dev, "added %s\n", netdev_name(rxe->ndev));
	return 0;
}
Exemplo n.º 7
0
int gether_get_ifname(struct net_device *net, char *name, int len)
{
	int ret;

	rtnl_lock();
	ret = snprintf(name, len, "%s\n", netdev_name(net));
	rtnl_unlock();
	return ret < len ? ret : len;
}
Exemplo n.º 8
0
static ssize_t
usnic_ib_show_iface(struct device *device, struct device_attribute *attr,
			char *buf)
{
	struct usnic_ib_dev *us_ibdev;

	us_ibdev = container_of(device, struct usnic_ib_dev, ib_dev.dev);

	return scnprintf(buf, PAGE_SIZE, "%s\n",
			netdev_name(us_ibdev->netdev));
}
Exemplo n.º 9
0
struct usnic_fwd_dev *usnic_fwd_dev_alloc(struct pci_dev *pdev)
{
	struct usnic_fwd_dev *ufdev;

	ufdev = kzalloc(sizeof(*ufdev), GFP_KERNEL);
	if (!ufdev)
		return NULL;

	ufdev->pdev = pdev;
	ufdev->netdev = pci_get_drvdata(pdev);
	spin_lock_init(&ufdev->lock);
	strncpy(ufdev->name, netdev_name(ufdev->netdev),
			sizeof(ufdev->name) - 1);

	return ufdev;
}
Exemplo n.º 10
0
void xgbe_ptp_register(struct xgbe_prv_data *pdata)
{
	struct ptp_clock_info *info = &pdata->ptp_clock_info;
	struct ptp_clock *clock;
	struct cyclecounter *cc = &pdata->tstamp_cc;
	u64 dividend;

	snprintf(info->name, sizeof(info->name), "%s",
		 netdev_name(pdata->netdev));
	info->owner = THIS_MODULE;
	info->max_adj = clk_get_rate(pdata->ptpclk);
	info->adjfreq = xgbe_adjfreq;
	info->adjtime = xgbe_adjtime;
	info->gettime = xgbe_gettime;
	info->settime = xgbe_settime;
	info->enable = xgbe_enable;

	clock = ptp_clock_register(info, pdata->dev);
	if (IS_ERR(clock)) {
		dev_err(pdata->dev, "ptp_clock_register failed\n");
		return;
	}

	pdata->ptp_clock = clock;

	/* Calculate the addend:
	 *   addend = 2^32 / (PTP ref clock / 50Mhz)
	 *          = (2^32 * 50Mhz) / PTP ref clock
	 */
	dividend = 50000000;
	dividend <<= 32;
	pdata->tstamp_addend = div_u64(dividend, clk_get_rate(pdata->ptpclk));

	/* Setup the timecounter */
	cc->read = xgbe_cc_read;
	cc->mask = CLOCKSOURCE_MASK(64);
	cc->mult = 1;
	cc->shift = 0;

	timecounter_init(&pdata->tstamp_tc, &pdata->tstamp_cc,
			 ktime_to_ns(ktime_get_real()));

	/* Disable all timestamping to start */
	XGMAC_IOWRITE(pdata, MAC_TCR, 0);
	pdata->tstamp_config.tx_type = HWTSTAMP_TX_OFF;
	pdata->tstamp_config.rx_filter = HWTSTAMP_FILTER_NONE;
}
Exemplo n.º 11
0
static int xlgmac_request_irqs(struct xlgmac_pdata *pdata)
{
	struct net_device *netdev = pdata->netdev;
	struct xlgmac_channel *channel;
	unsigned int i;
	int ret;

	ret = devm_request_irq(pdata->dev, pdata->dev_irq, xlgmac_isr,
			       IRQF_SHARED, netdev->name, pdata);
	if (ret) {
		netdev_alert(netdev, "error requesting irq %d\n",
			     pdata->dev_irq);
		return ret;
	}

	if (!pdata->per_channel_irq)
		return 0;

	channel = pdata->channel_head;
	for (i = 0; i < pdata->channel_count; i++, channel++) {
		snprintf(channel->dma_irq_name,
			 sizeof(channel->dma_irq_name) - 1,
			 "%s-TxRx-%u", netdev_name(netdev),
			 channel->queue_index);

		ret = devm_request_irq(pdata->dev, channel->dma_irq,
				       xlgmac_dma_isr, 0,
				       channel->dma_irq_name, channel);
		if (ret) {
			netdev_alert(netdev, "error requesting irq %d\n",
				     channel->dma_irq);
			goto err_irq;
		}
	}

	return 0;

err_irq:
	/* Using an unsigned int, 'i' will go to UINT_MAX and exit */
	for (i--, channel--; i < pdata->channel_count; i--, channel--)
		devm_free_irq(pdata->dev, channel->dma_irq, channel);

	devm_free_irq(pdata->dev, pdata->dev_irq, pdata);

	return ret;
}
Exemplo n.º 12
0
/**
 * nfp_net_aux_irq_request() - Request an auxiliary interrupt (LSC or EXN)
 * @nn:		NFP Network structure
 * @ctrl_offset: Control BAR offset where IRQ configuration should be written
 * @format:	printf-style format to construct the interrupt name
 * @name:	Pointer to allocated space for interrupt name
 * @name_sz:	Size of space for interrupt name
 * @vector_idx:	Index of MSI-X vector used for this interrupt
 * @handler:	IRQ handler to register for this interrupt
 */
static int
nfp_net_aux_irq_request(struct nfp_net *nn, u32 ctrl_offset,
			const char *format, char *name, size_t name_sz,
			unsigned int vector_idx, irq_handler_t handler)
{
	struct msix_entry *entry;
	int err;

	entry = &nn->irq_entries[vector_idx];

	snprintf(name, name_sz, format, netdev_name(nn->netdev));
	err = request_irq(entry->vector, handler, 0, name, nn);
	if (err) {
		nn_err(nn, "Failed to request IRQ %d (err=%d).\n",
		       entry->vector, err);
		return err;
	}
	nn_writeb(nn, ctrl_offset, vector_idx);

	return 0;
}
Exemplo n.º 13
0
Arquivo: core.c Projeto: mdamt/linux
/* Netdev handler for transmission timeout.
 */
static void qtnf_netdev_tx_timeout(struct net_device *ndev)
{
	struct qtnf_vif *vif = qtnf_netdev_get_priv(ndev);
	struct qtnf_wmac *mac;
	struct qtnf_bus *bus;

	if (unlikely(!vif || !vif->mac || !vif->mac->bus))
		return;

	mac = vif->mac;
	bus = mac->bus;

	pr_warn("VIF%u.%u: Tx timeout- %lu\n", mac->macid, vif->vifid, jiffies);

	qtnf_bus_data_tx_timeout(bus, ndev);
	ndev->stats.tx_errors++;

	if (++vif->cons_tx_timeout_cnt > QTNF_TX_TIMEOUT_TRSHLD) {
		pr_err("Tx timeout threshold exceeded !\n");
		pr_err("schedule interface %s reset !\n", netdev_name(ndev));
		queue_work(bus->workqueue, &vif->reset_work);
	}
}
Exemplo n.º 14
0
/* some work can't be done in tasklets, so we use keventd
 *
 * NOTE:  annoying asymmetry:  if it's active, schedule_work() fails,
 * but tasklet_schedule() doesn't.  hope the failure is rare.
 */
void usbnet_defer_kevent (struct usbnet *dev, int work)
{
	set_bit (work, &dev->flags);
	if (!schedule_work (&dev->kevent)) {
	// BEGIN OUYA
	// Rate limit these error messages.  The allocation failure that triggers this message
	// is already rate limited, but under heavy cpu load, the number of times that
	// usbnet attempts to defer the kevent becomes too excessive to print each time.
	// (ex: >5000 lines in the kernel log for a single "batch" of failures - this causes
	// massive system slowdown).
	//
	// Note: I'm keeping the general format that netdev_err translates into, but I can't seem to access the bus_id
	// field, and so this message does not have it.  Everything else is the same as netdev_err
        printk_ratelimited(KERN_ERR "%s: " "%s: ""kevent %d may have been dropped\n" ,
                        dev_driver_string((dev->net)->dev.parent),
                        netdev_name(dev->net),
                        work);
//		netdev_err(dev->net, "kevent %d may have been dropped\n", work);
    // END OUYA
	}
	else
		netdev_dbg(dev->net, "kevent %d scheduled\n", work);
}
Exemplo n.º 15
0
/*
 * Create syn packet and send it to rs.
 * ATTENTION: we also store syn skb in cp if syn retransimition
 * is tured on.
 */
static int
syn_proxy_send_rs_syn(int af, const struct tcphdr *th,
		      struct ip_vs_conn *cp, struct sk_buff *skb,
		      struct ip_vs_protocol *pp, struct ip_vs_synproxy_opt *opt)
{
	struct sk_buff *syn_skb;
	int tcp_hdr_size;
	__u8 tcp_flags = TCPCB_FLAG_SYN;
	unsigned int tcphoff;
	struct tcphdr *new_th;

	if (!cp->packet_xmit) {
		IP_VS_ERR_RL("warning: packet_xmit is null");
		return 0;
	}

	syn_skb = alloc_skb(MAX_TCP_HEADER + 15, GFP_ATOMIC);
	if (unlikely(syn_skb == NULL)) {
		IP_VS_ERR_RL("alloc skb failed when send rs syn packet\n");
		return 0;
	}

	/* Reserve space for headers */
	skb_reserve(syn_skb, MAX_TCP_HEADER);
	tcp_hdr_size = (sizeof(struct tcphdr) + TCPOLEN_MSS +
			(opt->tstamp_ok ? TCPOLEN_TSTAMP_ALIGNED : 0) +
			(opt->wscale_ok ? TCPOLEN_WSCALE_ALIGNED : 0) +
			/* SACK_PERM is in the place of NOP NOP of TS */
			((opt->sack_ok
			  && !opt->tstamp_ok) ? TCPOLEN_SACKPERM_ALIGNED : 0));

	new_th = (struct tcphdr *)skb_push(syn_skb, tcp_hdr_size);
	/* Compose tcp header */
	skb_reset_transport_header(syn_skb);
	syn_skb->csum = 0;

	/* Set tcp hdr */
	new_th->source = th->source;
	new_th->dest = th->dest;
	new_th->seq = htonl(ntohl(th->seq) - 1);
	new_th->ack_seq = 0;
	*(((__u16 *) new_th) + 6) =
	    htons(((tcp_hdr_size >> 2) << 12) | tcp_flags);
	/* FIX_ME: what window should we use */
	new_th->window = htons(5000);
	new_th->check = 0;
	new_th->urg_ptr = 0;
	new_th->urg = 0;
	new_th->ece = 0;
	new_th->cwr = 0;

	syn_proxy_syn_build_options((__be32 *) (new_th + 1), opt);

	/*
	 * Set ip hdr
	 * Attention: set source and dest addr to ack skb's.
	 * we rely on packet_xmit func to do NATs thing.
	 */
#ifdef CONFIG_IP_VS_IPV6
	if (af == AF_INET6) {
		struct ipv6hdr *ack_iph = ipv6_hdr(skb);
		struct ipv6hdr *iph =
		    (struct ipv6hdr *)skb_push(syn_skb, sizeof(struct ipv6hdr));

		tcphoff = sizeof(struct ipv6hdr);
		skb_reset_network_header(syn_skb);
		memcpy(&iph->saddr, &ack_iph->saddr, sizeof(struct in6_addr));
		memcpy(&iph->daddr, &ack_iph->daddr, sizeof(struct in6_addr));

		iph->version = 6;
		iph->nexthdr = NEXTHDR_TCP;
		iph->payload_len = htons(tcp_hdr_size);
		iph->hop_limit = IPV6_DEFAULT_HOPLIMIT;

		new_th->check = 0;
		syn_skb->csum =
		    skb_checksum(syn_skb, tcphoff, syn_skb->len - tcphoff, 0);
		new_th->check =
		    csum_ipv6_magic(&iph->saddr, &iph->daddr,
				    syn_skb->len - tcphoff, IPPROTO_TCP,
				    syn_skb->csum);
	} else
#endif
	{
		struct iphdr *ack_iph = ip_hdr(skb);
		u32 rtos = RT_TOS(ack_iph->tos);
		struct iphdr *iph =
		    (struct iphdr *)skb_push(syn_skb, sizeof(struct iphdr));

		tcphoff = sizeof(struct iphdr);
		skb_reset_network_header(syn_skb);
		*((__u16 *) iph) = htons((4 << 12) | (5 << 8) | (rtos & 0xff));
		iph->tot_len = htons(syn_skb->len);
		iph->frag_off = htons(IP_DF);
		/* FIX_ME: what ttl shoule we use */
		iph->ttl = IPDEFTTL;
		iph->protocol = IPPROTO_TCP;
		iph->saddr = ack_iph->saddr;
		iph->daddr = ack_iph->daddr;

		ip_send_check(iph);

		new_th->check = 0;
		syn_skb->csum =
		    skb_checksum(syn_skb, tcphoff, syn_skb->len - tcphoff, 0);
		new_th->check =
		    csum_tcpudp_magic(iph->saddr, iph->daddr,
				      syn_skb->len - tcphoff, IPPROTO_TCP,
				      syn_skb->csum);
	}

	/* Save syn_skb if syn retransmission is on  */
	if (sysctl_ip_vs_synproxy_syn_retry > 0) {
		cp->syn_skb = skb_copy(syn_skb, GFP_ATOMIC);
		atomic_set(&cp->syn_retry_max, sysctl_ip_vs_synproxy_syn_retry);
	}

	/* Save info for fast_response_xmit */
	if(sysctl_ip_vs_fast_xmit && skb->dev &&
				likely(skb->dev->type == ARPHRD_ETHER) &&
				skb_mac_header_was_set(skb)) {
		struct ethhdr *eth = (struct ethhdr *)skb_mac_header(skb);

		if(likely(cp->indev == NULL)) {
			cp->indev = skb->dev;
			dev_hold(cp->indev);
		}

		if (unlikely(cp->indev != skb->dev)) {
			dev_put(cp->indev);
			cp->indev = skb->dev;
			dev_hold(cp->indev);
		}

		memcpy(cp->src_hwaddr, eth->h_source, ETH_ALEN);
		memcpy(cp->dst_hwaddr, eth->h_dest, ETH_ALEN);
		IP_VS_INC_ESTATS(ip_vs_esmib, FAST_XMIT_SYNPROXY_SAVE);
		IP_VS_DBG_RL("syn_proxy_send_rs_syn netdevice:%s\n",
						netdev_name(skb->dev));
	}

	/* count in the syn packet */
	ip_vs_in_stats(cp, skb);

	/* If xmit failed, syn_skb will be freed correctly. */
	cp->packet_xmit(syn_skb, cp, pp);

	return 1;
}
Exemplo n.º 16
0
static int amd_xgbe_phy_config_init(struct phy_device *phydev)
{
	struct amd_xgbe_phy_priv *priv = phydev->priv;
	struct net_device *netdev = phydev->attached_dev;
	int ret;

	if (!priv->an_irq_allocated) {
		/* Allocate the auto-negotiation workqueue and interrupt */
		snprintf(priv->an_irq_name, sizeof(priv->an_irq_name) - 1,
			 "%s-pcs", netdev_name(netdev));

		priv->an_workqueue =
			create_singlethread_workqueue(priv->an_irq_name);
		if (!priv->an_workqueue) {
			netdev_err(netdev, "phy workqueue creation failed\n");
			return -ENOMEM;
		}

		ret = devm_request_irq(priv->dev, priv->an_irq,
				       amd_xgbe_an_isr, 0, priv->an_irq_name,
				       priv);
		if (ret) {
			netdev_err(netdev, "phy irq request failed\n");
			destroy_workqueue(priv->an_workqueue);
			return ret;
		}

		priv->an_irq_allocated = 1;
	}

	ret = phy_read_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_PMA_10GBR_FEC_ABILITY);
	if (ret < 0)
		return ret;
	priv->fec_ability = ret & XGBE_PHY_FEC_MASK;

	/* Initialize supported features */
	phydev->supported = SUPPORTED_Autoneg;
	phydev->supported |= SUPPORTED_Pause | SUPPORTED_Asym_Pause;
	phydev->supported |= SUPPORTED_Backplane;
	phydev->supported |= SUPPORTED_10000baseKR_Full;
	switch (priv->speed_set) {
	case AMD_XGBE_PHY_SPEEDSET_1000_10000:
		phydev->supported |= SUPPORTED_1000baseKX_Full;
		break;
	case AMD_XGBE_PHY_SPEEDSET_2500_10000:
		phydev->supported |= SUPPORTED_2500baseX_Full;
		break;
	}

	if (priv->fec_ability & XGBE_PHY_FEC_ENABLE)
		phydev->supported |= SUPPORTED_10000baseR_FEC;

	phydev->advertising = phydev->supported;

	/* Set initial mode - call the mode setting routines
	 * directly to insure we are properly configured
	 */
	if (phydev->supported & SUPPORTED_10000baseKR_Full)
		ret = amd_xgbe_phy_xgmii_mode(phydev);
	else if (phydev->supported & SUPPORTED_1000baseKX_Full)
		ret = amd_xgbe_phy_gmii_mode(phydev);
	else if (phydev->supported & SUPPORTED_2500baseX_Full)
		ret = amd_xgbe_phy_gmii_2500_mode(phydev);
	else
		ret = -EINVAL;
	if (ret < 0)
		return ret;

	/* Set up advertisement registers based on current settings */
	ret = amd_xgbe_an_init(phydev);
	if (ret)
		return ret;

	/* Enable auto-negotiation interrupts */
	phy_write_mmd(phydev, MDIO_MMD_AN, MDIO_AN_INTMASK, 0x07);

	return 0;
}
Exemplo n.º 17
0
/*
 * Report the configuration for this PF
 */
static ssize_t
usnic_ib_show_config(struct device *device, struct device_attribute *attr,
			char *buf)
{
	struct usnic_ib_dev *us_ibdev;
	char *ptr;
	unsigned left;
	unsigned n;
	enum usnic_vnic_res_type res_type;

	us_ibdev = container_of(device, struct usnic_ib_dev, ib_dev.dev);

	/* Buffer space limit is 1 page */
	ptr = buf;
	left = PAGE_SIZE;

	mutex_lock(&us_ibdev->usdev_lock);
	if (atomic_read(&us_ibdev->vf_cnt.refcount) > 0) {
		char *busname;

		/*
		 * bus name seems to come with annoying prefix.
		 * Remove it if it is predictable
		 */
		busname = us_ibdev->pdev->bus->name;
		if (strncmp(busname, "PCI Bus ", 8) == 0)
			busname += 8;

		n = scnprintf(ptr, left,
			"%s: %s:%d.%d, %s, %pM, %u VFs\n Per VF:",
			us_ibdev->ib_dev.name,
			busname,
			PCI_SLOT(us_ibdev->pdev->devfn),
			PCI_FUNC(us_ibdev->pdev->devfn),
			netdev_name(us_ibdev->netdev),
			us_ibdev->ufdev->mac,
			atomic_read(&us_ibdev->vf_cnt.refcount));
		UPDATE_PTR_LEFT(n, ptr, left);

		for (res_type = USNIC_VNIC_RES_TYPE_EOL;
				res_type < USNIC_VNIC_RES_TYPE_MAX;
				res_type++) {
			if (us_ibdev->vf_res_cnt[res_type] == 0)
				continue;
			n = scnprintf(ptr, left, " %d %s%s",
				us_ibdev->vf_res_cnt[res_type],
				usnic_vnic_res_type_to_str(res_type),
				(res_type < (USNIC_VNIC_RES_TYPE_MAX - 1)) ?
				 "," : "");
			UPDATE_PTR_LEFT(n, ptr, left);
		}
		n = scnprintf(ptr, left, "\n");
		UPDATE_PTR_LEFT(n, ptr, left);
	} else {
		n = scnprintf(ptr, left, "%s: no VFs\n",
				us_ibdev->ib_dev.name);
		UPDATE_PTR_LEFT(n, ptr, left);
	}
	mutex_unlock(&us_ibdev->usdev_lock);

	return ptr - buf;
}
Exemplo n.º 18
0
static int xgbe_probe(struct platform_device *pdev)
{
	struct xgbe_prv_data *pdata;
	struct net_device *netdev;
	struct device *dev = &pdev->dev, *phy_dev;
	struct platform_device *phy_pdev;
	struct resource *res;
	const char *phy_mode;
	unsigned int i, phy_memnum, phy_irqnum;
	enum dev_dma_attr attr;
	int ret;

	DBGPR("--> xgbe_probe\n");

	netdev = alloc_etherdev_mq(sizeof(struct xgbe_prv_data),
				   XGBE_MAX_DMA_CHANNELS);
	if (!netdev) {
		dev_err(dev, "alloc_etherdev failed\n");
		ret = -ENOMEM;
		goto err_alloc;
	}
	SET_NETDEV_DEV(netdev, dev);
	pdata = netdev_priv(netdev);
	pdata->netdev = netdev;
	pdata->pdev = pdev;
	pdata->adev = ACPI_COMPANION(dev);
	pdata->dev = dev;
	platform_set_drvdata(pdev, netdev);

	spin_lock_init(&pdata->lock);
	spin_lock_init(&pdata->xpcs_lock);
	mutex_init(&pdata->rss_mutex);
	spin_lock_init(&pdata->tstamp_lock);

	pdata->msg_enable = netif_msg_init(debug, default_msg_level);

	set_bit(XGBE_DOWN, &pdata->dev_state);

	/* Check if we should use ACPI or DT */
	pdata->use_acpi = dev->of_node ? 0 : 1;

	phy_pdev = xgbe_get_phy_pdev(pdata);
	if (!phy_pdev) {
		dev_err(dev, "unable to obtain phy device\n");
		ret = -EINVAL;
		goto err_phydev;
	}
	phy_dev = &phy_pdev->dev;

	if (pdev == phy_pdev) {
		/* New style device tree or ACPI:
		 *   The XGBE and PHY resources are grouped together with
		 *   the PHY resources listed last
		 */
		phy_memnum = xgbe_resource_count(pdev, IORESOURCE_MEM) - 3;
		phy_irqnum = xgbe_resource_count(pdev, IORESOURCE_IRQ) - 1;
	} else {
		/* Old style device tree:
		 *   The XGBE and PHY resources are separate
		 */
		phy_memnum = 0;
		phy_irqnum = 0;
	}

	/* Set and validate the number of descriptors for a ring */
	BUILD_BUG_ON_NOT_POWER_OF_2(XGBE_TX_DESC_CNT);
	pdata->tx_desc_count = XGBE_TX_DESC_CNT;
	if (pdata->tx_desc_count & (pdata->tx_desc_count - 1)) {
		dev_err(dev, "tx descriptor count (%d) is not valid\n",
			pdata->tx_desc_count);
		ret = -EINVAL;
		goto err_io;
	}
	BUILD_BUG_ON_NOT_POWER_OF_2(XGBE_RX_DESC_CNT);
	pdata->rx_desc_count = XGBE_RX_DESC_CNT;
	if (pdata->rx_desc_count & (pdata->rx_desc_count - 1)) {
		dev_err(dev, "rx descriptor count (%d) is not valid\n",
			pdata->rx_desc_count);
		ret = -EINVAL;
		goto err_io;
	}

	/* Obtain the mmio areas for the device */
	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
	pdata->xgmac_regs = devm_ioremap_resource(dev, res);
	if (IS_ERR(pdata->xgmac_regs)) {
		dev_err(dev, "xgmac ioremap failed\n");
		ret = PTR_ERR(pdata->xgmac_regs);
		goto err_io;
	}
	if (netif_msg_probe(pdata))
		dev_dbg(dev, "xgmac_regs = %p\n", pdata->xgmac_regs);

	res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
	pdata->xpcs_regs = devm_ioremap_resource(dev, res);
	if (IS_ERR(pdata->xpcs_regs)) {
		dev_err(dev, "xpcs ioremap failed\n");
		ret = PTR_ERR(pdata->xpcs_regs);
		goto err_io;
	}
	if (netif_msg_probe(pdata))
		dev_dbg(dev, "xpcs_regs  = %p\n", pdata->xpcs_regs);

	res = platform_get_resource(phy_pdev, IORESOURCE_MEM, phy_memnum++);
	pdata->rxtx_regs = devm_ioremap_resource(dev, res);
	if (IS_ERR(pdata->rxtx_regs)) {
		dev_err(dev, "rxtx ioremap failed\n");
		ret = PTR_ERR(pdata->rxtx_regs);
		goto err_io;
	}
	if (netif_msg_probe(pdata))
		dev_dbg(dev, "rxtx_regs  = %p\n", pdata->rxtx_regs);

	res = platform_get_resource(phy_pdev, IORESOURCE_MEM, phy_memnum++);
	pdata->sir0_regs = devm_ioremap_resource(dev, res);
	if (IS_ERR(pdata->sir0_regs)) {
		dev_err(dev, "sir0 ioremap failed\n");
		ret = PTR_ERR(pdata->sir0_regs);
		goto err_io;
	}
	if (netif_msg_probe(pdata))
		dev_dbg(dev, "sir0_regs  = %p\n", pdata->sir0_regs);

	res = platform_get_resource(phy_pdev, IORESOURCE_MEM, phy_memnum++);
	pdata->sir1_regs = devm_ioremap_resource(dev, res);
	if (IS_ERR(pdata->sir1_regs)) {
		dev_err(dev, "sir1 ioremap failed\n");
		ret = PTR_ERR(pdata->sir1_regs);
		goto err_io;
	}
	if (netif_msg_probe(pdata))
		dev_dbg(dev, "sir1_regs  = %p\n", pdata->sir1_regs);

	/* Retrieve the MAC address */
	ret = device_property_read_u8_array(dev, XGBE_MAC_ADDR_PROPERTY,
					    pdata->mac_addr,
					    sizeof(pdata->mac_addr));
	if (ret || !is_valid_ether_addr(pdata->mac_addr)) {
		dev_err(dev, "invalid %s property\n", XGBE_MAC_ADDR_PROPERTY);
		if (!ret)
			ret = -EINVAL;
		goto err_io;
	}

	/* Retrieve the PHY mode - it must be "xgmii" */
	ret = device_property_read_string(dev, XGBE_PHY_MODE_PROPERTY,
					  &phy_mode);
	if (ret || strcmp(phy_mode, phy_modes(PHY_INTERFACE_MODE_XGMII))) {
		dev_err(dev, "invalid %s property\n", XGBE_PHY_MODE_PROPERTY);
		if (!ret)
			ret = -EINVAL;
		goto err_io;
	}
	pdata->phy_mode = PHY_INTERFACE_MODE_XGMII;

	/* Check for per channel interrupt support */
	if (device_property_present(dev, XGBE_DMA_IRQS_PROPERTY))
		pdata->per_channel_irq = 1;

	/* Retrieve the PHY speedset */
	ret = device_property_read_u32(phy_dev, XGBE_SPEEDSET_PROPERTY,
				       &pdata->speed_set);
	if (ret) {
		dev_err(dev, "invalid %s property\n", XGBE_SPEEDSET_PROPERTY);
		goto err_io;
	}

	switch (pdata->speed_set) {
	case XGBE_SPEEDSET_1000_10000:
	case XGBE_SPEEDSET_2500_10000:
		break;
	default:
		dev_err(dev, "invalid %s property\n", XGBE_SPEEDSET_PROPERTY);
		ret = -EINVAL;
		goto err_io;
	}

	/* Retrieve the PHY configuration properties */
	if (device_property_present(phy_dev, XGBE_BLWC_PROPERTY)) {
		ret = device_property_read_u32_array(phy_dev,
						     XGBE_BLWC_PROPERTY,
						     pdata->serdes_blwc,
						     XGBE_SPEEDS);
		if (ret) {
			dev_err(dev, "invalid %s property\n",
				XGBE_BLWC_PROPERTY);
			goto err_io;
		}
	} else {
		memcpy(pdata->serdes_blwc, xgbe_serdes_blwc,
		       sizeof(pdata->serdes_blwc));
	}

	if (device_property_present(phy_dev, XGBE_CDR_RATE_PROPERTY)) {
		ret = device_property_read_u32_array(phy_dev,
						     XGBE_CDR_RATE_PROPERTY,
						     pdata->serdes_cdr_rate,
						     XGBE_SPEEDS);
		if (ret) {
			dev_err(dev, "invalid %s property\n",
				XGBE_CDR_RATE_PROPERTY);
			goto err_io;
		}
	} else {
		memcpy(pdata->serdes_cdr_rate, xgbe_serdes_cdr_rate,
		       sizeof(pdata->serdes_cdr_rate));
	}

	if (device_property_present(phy_dev, XGBE_PQ_SKEW_PROPERTY)) {
		ret = device_property_read_u32_array(phy_dev,
						     XGBE_PQ_SKEW_PROPERTY,
						     pdata->serdes_pq_skew,
						     XGBE_SPEEDS);
		if (ret) {
			dev_err(dev, "invalid %s property\n",
				XGBE_PQ_SKEW_PROPERTY);
			goto err_io;
		}
	} else {
		memcpy(pdata->serdes_pq_skew, xgbe_serdes_pq_skew,
		       sizeof(pdata->serdes_pq_skew));
	}

	if (device_property_present(phy_dev, XGBE_TX_AMP_PROPERTY)) {
		ret = device_property_read_u32_array(phy_dev,
						     XGBE_TX_AMP_PROPERTY,
						     pdata->serdes_tx_amp,
						     XGBE_SPEEDS);
		if (ret) {
			dev_err(dev, "invalid %s property\n",
				XGBE_TX_AMP_PROPERTY);
			goto err_io;
		}
	} else {
		memcpy(pdata->serdes_tx_amp, xgbe_serdes_tx_amp,
		       sizeof(pdata->serdes_tx_amp));
	}

	if (device_property_present(phy_dev, XGBE_DFE_CFG_PROPERTY)) {
		ret = device_property_read_u32_array(phy_dev,
						     XGBE_DFE_CFG_PROPERTY,
						     pdata->serdes_dfe_tap_cfg,
						     XGBE_SPEEDS);
		if (ret) {
			dev_err(dev, "invalid %s property\n",
				XGBE_DFE_CFG_PROPERTY);
			goto err_io;
		}
	} else {
		memcpy(pdata->serdes_dfe_tap_cfg, xgbe_serdes_dfe_tap_cfg,
		       sizeof(pdata->serdes_dfe_tap_cfg));
	}

	if (device_property_present(phy_dev, XGBE_DFE_ENA_PROPERTY)) {
		ret = device_property_read_u32_array(phy_dev,
						     XGBE_DFE_ENA_PROPERTY,
						     pdata->serdes_dfe_tap_ena,
						     XGBE_SPEEDS);
		if (ret) {
			dev_err(dev, "invalid %s property\n",
				XGBE_DFE_ENA_PROPERTY);
			goto err_io;
		}
	} else {
		memcpy(pdata->serdes_dfe_tap_ena, xgbe_serdes_dfe_tap_ena,
		       sizeof(pdata->serdes_dfe_tap_ena));
	}

	/* Obtain device settings unique to ACPI/OF */
	if (pdata->use_acpi)
		ret = xgbe_acpi_support(pdata);
	else
		ret = xgbe_of_support(pdata);
	if (ret)
		goto err_io;

	/* Set the DMA coherency values */
	attr = device_get_dma_attr(dev);
	if (attr == DEV_DMA_NOT_SUPPORTED) {
		dev_err(dev, "DMA is not supported");
		goto err_io;
	}
	pdata->coherent = (attr == DEV_DMA_COHERENT);
	if (pdata->coherent) {
		pdata->axdomain = XGBE_DMA_OS_AXDOMAIN;
		pdata->arcache = XGBE_DMA_OS_ARCACHE;
		pdata->awcache = XGBE_DMA_OS_AWCACHE;
	} else {
		pdata->axdomain = XGBE_DMA_SYS_AXDOMAIN;
		pdata->arcache = XGBE_DMA_SYS_ARCACHE;
		pdata->awcache = XGBE_DMA_SYS_AWCACHE;
	}

	/* Get the device interrupt */
	ret = platform_get_irq(pdev, 0);
	if (ret < 0) {
		dev_err(dev, "platform_get_irq 0 failed\n");
		goto err_io;
	}
	pdata->dev_irq = ret;

	/* Get the auto-negotiation interrupt */
	ret = platform_get_irq(phy_pdev, phy_irqnum++);
	if (ret < 0) {
		dev_err(dev, "platform_get_irq phy 0 failed\n");
		goto err_io;
	}
	pdata->an_irq = ret;

	netdev->irq = pdata->dev_irq;
	netdev->base_addr = (unsigned long)pdata->xgmac_regs;
	memcpy(netdev->dev_addr, pdata->mac_addr, netdev->addr_len);

	/* Set all the function pointers */
	xgbe_init_all_fptrs(pdata);

	/* Issue software reset to device */
	pdata->hw_if.exit(pdata);

	/* Populate the hardware features */
	xgbe_get_all_hw_features(pdata);

	/* Set default configuration data */
	xgbe_default_config(pdata);

	/* Set the DMA mask */
	ret = dma_set_mask_and_coherent(dev,
					DMA_BIT_MASK(pdata->hw_feat.dma_width));
	if (ret) {
		dev_err(dev, "dma_set_mask_and_coherent failed\n");
		goto err_io;
	}

	/* Calculate the number of Tx and Rx rings to be created
	 *  -Tx (DMA) Channels map 1-to-1 to Tx Queues so set
	 *   the number of Tx queues to the number of Tx channels
	 *   enabled
	 *  -Rx (DMA) Channels do not map 1-to-1 so use the actual
	 *   number of Rx queues
	 */
	pdata->tx_ring_count = min_t(unsigned int, num_online_cpus(),
				     pdata->hw_feat.tx_ch_cnt);
	pdata->tx_q_count = pdata->tx_ring_count;
	ret = netif_set_real_num_tx_queues(netdev, pdata->tx_ring_count);
	if (ret) {
		dev_err(dev, "error setting real tx queue count\n");
		goto err_io;
	}

	pdata->rx_ring_count = min_t(unsigned int,
				     netif_get_num_default_rss_queues(),
				     pdata->hw_feat.rx_ch_cnt);
	pdata->rx_q_count = pdata->hw_feat.rx_q_cnt;
	ret = netif_set_real_num_rx_queues(netdev, pdata->rx_ring_count);
	if (ret) {
		dev_err(dev, "error setting real rx queue count\n");
		goto err_io;
	}

	/* Initialize RSS hash key and lookup table */
	netdev_rss_key_fill(pdata->rss_key, sizeof(pdata->rss_key));

	for (i = 0; i < XGBE_RSS_MAX_TABLE_SIZE; i++)
		XGMAC_SET_BITS(pdata->rss_table[i], MAC_RSSDR, DMCH,
			       i % pdata->rx_ring_count);

	XGMAC_SET_BITS(pdata->rss_options, MAC_RSSCR, IP2TE, 1);
	XGMAC_SET_BITS(pdata->rss_options, MAC_RSSCR, TCP4TE, 1);
	XGMAC_SET_BITS(pdata->rss_options, MAC_RSSCR, UDP4TE, 1);

	/* Call MDIO/PHY initialization routine */
	pdata->phy_if.phy_init(pdata);

	/* Set device operations */
	netdev->netdev_ops = xgbe_get_netdev_ops();
	netdev->ethtool_ops = xgbe_get_ethtool_ops();
#ifdef CONFIG_AMD_XGBE_DCB
	netdev->dcbnl_ops = xgbe_get_dcbnl_ops();
#endif

	/* Set device features */
	netdev->hw_features = NETIF_F_SG |
			      NETIF_F_IP_CSUM |
			      NETIF_F_IPV6_CSUM |
			      NETIF_F_RXCSUM |
			      NETIF_F_TSO |
			      NETIF_F_TSO6 |
			      NETIF_F_GRO |
			      NETIF_F_HW_VLAN_CTAG_RX |
			      NETIF_F_HW_VLAN_CTAG_TX |
			      NETIF_F_HW_VLAN_CTAG_FILTER;

	if (pdata->hw_feat.rss)
		netdev->hw_features |= NETIF_F_RXHASH;

	netdev->vlan_features |= NETIF_F_SG |
				 NETIF_F_IP_CSUM |
				 NETIF_F_IPV6_CSUM |
				 NETIF_F_TSO |
				 NETIF_F_TSO6;

	netdev->features |= netdev->hw_features;
	pdata->netdev_features = netdev->features;

	netdev->priv_flags |= IFF_UNICAST_FLT;

	/* Use default watchdog timeout */
	netdev->watchdog_timeo = 0;

	xgbe_init_rx_coalesce(pdata);
	xgbe_init_tx_coalesce(pdata);

	netif_carrier_off(netdev);
	ret = register_netdev(netdev);
	if (ret) {
		dev_err(dev, "net device registration failed\n");
		goto err_io;
	}

	/* Create the PHY/ANEG name based on netdev name */
	snprintf(pdata->an_name, sizeof(pdata->an_name) - 1, "%s-pcs",
		 netdev_name(netdev));

	/* Create workqueues */
	pdata->dev_workqueue =
		create_singlethread_workqueue(netdev_name(netdev));
	if (!pdata->dev_workqueue) {
		netdev_err(netdev, "device workqueue creation failed\n");
		ret = -ENOMEM;
		goto err_netdev;
	}

	pdata->an_workqueue =
		create_singlethread_workqueue(pdata->an_name);
	if (!pdata->an_workqueue) {
		netdev_err(netdev, "phy workqueue creation failed\n");
		ret = -ENOMEM;
		goto err_wq;
	}

	xgbe_ptp_register(pdata);

	xgbe_debugfs_init(pdata);

	platform_device_put(phy_pdev);

	netdev_notice(netdev, "net device enabled\n");

	DBGPR("<-- xgbe_probe\n");

	return 0;

err_wq:
	destroy_workqueue(pdata->dev_workqueue);

err_netdev:
	unregister_netdev(netdev);

err_io:
	platform_device_put(phy_pdev);

err_phydev:
	free_netdev(netdev);

err_alloc:
	dev_notice(dev, "net device not enabled\n");

	return ret;
}
Exemplo n.º 19
0
int __netdev_printk(const char *level, const struct net_device *dev,
			   struct va_format *vaf)
{
	int r;

	if (dev && dev->dev.parent)
#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,35))
		r = dev_printk(level, dev->dev.parent, "%s: %pV",
			       netdev_name(dev), vaf);
#else
		/* XXX: this could likely be done better but I'm lazy */
		r = printk("%s%s: %pV", level, netdev_name(dev), vaf);
#endif
	else if (dev)
		r = printk("%s%s: %pV", level, netdev_name(dev), vaf);
	else
		r = printk("%s(NULL net_device): %pV", level, vaf);

	return r;
}
EXPORT_SYMBOL_GPL(__netdev_printk);

int __ethtool_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
{
        ASSERT_RTNL();

        if (!dev->ethtool_ops || !dev->ethtool_ops->get_settings)
                return -EOPNOTSUPP;

        memset(cmd, 0, sizeof(struct ethtool_cmd));