Exemplo n.º 1
0
static int xennet_open(struct net_device *dev)
{
	struct netfront_info *np = netdev_priv(dev);

	napi_enable(&np->napi);

	spin_lock_bh(&np->rx_lock);
	if (netif_carrier_ok(dev)) {
		xennet_alloc_rx_buffers(dev);
		np->rx.sring->rsp_event = np->rx.rsp_cons + 1;
		if (RING_HAS_UNCONSUMED_RESPONSES(&np->rx))
			napi_schedule(&np->napi);
	}
	spin_unlock_bh(&np->rx_lock);

	netif_start_queue(dev);

	return 0;
}
Exemplo n.º 2
0
int drv_open()
{
    int rc2;
    int ret5;

    (*hw_start)();

    IntrMask = 0;

    rc2 = request_irq();
	
    if (rc2 < 0)
        ret5 = rc2;
    else {
        napi_enable();
        dev_up();
        ret5 = 0;
    }
    return ret5;
}
Exemplo n.º 3
0
static int c_can_open(struct net_device *dev)
{
	int err;
	struct c_can_priv *priv = netdev_priv(dev);

	c_can_pm_runtime_get_sync(priv);
	c_can_reset_ram(priv, true);

	/* open the can device */
	err = open_candev(dev);
	if (err) {
		netdev_err(dev, "failed to open can device\n");
		goto exit_open_fail;
	}

	/* register interrupt handler */
	err = request_irq(dev->irq, &c_can_isr, IRQF_SHARED, dev->name,
				dev);
	if (err < 0) {
		netdev_err(dev, "failed to request interrupt\n");
		goto exit_irq_fail;
	}

	napi_enable(&priv->napi);

	can_led_event(dev, CAN_LED_EVENT_OPEN);

	/* start the c_can controller */
	c_can_start(dev);

	netif_start_queue(dev);

	return 0;

exit_irq_fail:
	close_candev(dev);
exit_open_fail:
	c_can_reset_ram(priv, false);
	c_can_pm_runtime_put_sync(priv);
	return err;
}
Exemplo n.º 4
0
static int yatse_open(struct net_device *ndev){
	struct yatse_private *priv = netdev_priv(ndev);
	int ret;
	unsigned long flags;

	printk(KERN_INFO "yatse: opening\n");

	priv->link = 0;

#if 0
	ret = dma_set_mask(&ndev->dev, 0xffffffffULL); /* 32-bit DMA addresses */
	if(ret) goto out;
#endif

	spin_lock_irqsave(&priv->dma.rx_lock, flags);
	spin_lock(&priv->dma.tx_lock);
	ret = yatse_dma_init(ndev);
	spin_unlock(&priv->dma.tx_lock);
	spin_unlock_irqrestore(&priv->dma.rx_lock, flags);
	if(ret) goto out;

	tasklet_init(&priv->tx_tasklet, yatse_tx_complete, (unsigned long)ndev);

	ret = yatse_init_phy(ndev);
	if(ret) goto out;

	ret = yatse_init_mac(ndev);
	if(ret) goto out;


	napi_enable(&priv->napi);

	phy_start(priv->phydev);
	if(priv->phy_irq != PHY_POLL) phy_start_interrupts(priv->phydev);
	netif_start_queue(ndev);

	printk(KERN_INFO "yatse: open done, interface up\n");

out:
	return ret;
}
Exemplo n.º 5
0
/******************************************************************************
 * struct net_device_ops functions
 *****************************************************************************/
static int ftmac100_open(struct net_device *netdev)
{
	struct ftmac100 *priv = netdev_priv(netdev);
	int err;

	err = ftmac100_alloc_buffers(priv);
	if (err) {
		netdev_err(netdev, "failed to allocate buffers\n");
		goto err_alloc;
	}

	err = request_irq(priv->irq, ftmac100_interrupt, 0, netdev->name, netdev);
	if (err) {
		netdev_err(netdev, "failed to request irq %d\n", priv->irq);
		goto err_irq;
	}

	priv->rx_pointer = 0;
	priv->tx_clean_pointer = 0;
	priv->tx_pointer = 0;
	priv->tx_pending = 0;

	err = ftmac100_start_hw(priv);
	if (err)
		goto err_hw;

	napi_enable(&priv->napi);
	netif_start_queue(netdev);

	ftmac100_enable_all_int(priv);

	return 0;

err_hw:
	free_irq(priv->irq, netdev);
err_irq:
	ftmac100_free_buffers(priv);
err_alloc:
	return err;
}
Exemplo n.º 6
0
/* fjes_open - Called when a network interface is made active */
static int fjes_open(struct net_device *netdev)
{
	struct fjes_adapter *adapter = netdev_priv(netdev);
	struct fjes_hw *hw = &adapter->hw;
	int result;

	if (adapter->open_guard)
		return -ENXIO;

	result = fjes_setup_resources(adapter);
	if (result)
		goto err_setup_res;

	hw->txrx_stop_req_bit = 0;
	hw->epstop_req_bit = 0;

	napi_enable(&adapter->napi);

	fjes_hw_capture_interrupt_status(hw);

	result = fjes_request_irq(adapter);
	if (result)
		goto err_req_irq;

	fjes_hw_set_irqmask(hw, REG_ICTL_MASK_ALL, false);

	netif_tx_start_all_queues(netdev);
	netif_carrier_on(netdev);

	return 0;

err_req_irq:
	fjes_free_irq(adapter);
	napi_disable(&adapter->napi);

err_setup_res:
	fjes_free_resources(adapter);
	return result;
}
Exemplo n.º 7
0
static int stmmac_resume(struct device *dev)
{
	struct net_device *ndev = dev_get_drvdata(dev);
	struct stmmac_priv *priv = netdev_priv(ndev);

	if (!netif_running(ndev))
		return 0;

	spin_lock(&priv->lock);

	/* Power Down bit, into the PM register, is cleared
	 * automatically as soon as a magic packet or a Wake-up frame
	 * is received. Anyway, it's better to manually clear
	 * this bit because it can generate problems while resuming
	 * from another devices (e.g. serial console). */
	if (device_may_wakeup(priv->device))
		priv->hw->mac->pmt(priv->ioaddr, 0);

	netif_device_attach(ndev);

	/* Enable the MAC and DMA */
	stmmac_enable_mac(priv->ioaddr);
	priv->hw->dma->start_tx(priv->ioaddr);
	priv->hw->dma->start_rx(priv->ioaddr);

#ifdef CONFIG_STMMAC_TIMER
	if (likely(priv->tm->enable))
		priv->tm->timer_start(tmrate);
#endif
	napi_enable(&priv->napi);

	if (priv->phydev)
		phy_start(priv->phydev);

	netif_start_queue(ndev);

	spin_unlock(&priv->lock);
	return 0;
}
Exemplo n.º 8
0
static int hisi_femac_net_open(struct net_device *dev)
{
	struct hisi_femac_priv *priv = netdev_priv(dev);

	hisi_femac_port_reset(priv);
	hisi_femac_set_hw_mac_addr(priv, dev->dev_addr);
	hisi_femac_rx_refill(priv);

	netif_carrier_off(dev);
	netdev_reset_queue(dev);
	netif_start_queue(dev);
	napi_enable(&priv->napi);

	priv->link_status = 0;
	if (dev->phydev)
		phy_start(dev->phydev);

	writel(IRQ_ENA_PORT0_MASK, priv->glb_base + GLB_IRQ_RAW);
	hisi_femac_irq_enable(priv, IRQ_ENA_ALL | IRQ_ENA_PORT0 | DEF_INT_MASK);

	return 0;
}
Exemplo n.º 9
0
static int sprdwl_resume(struct device *dev)
{
	struct net_device *ndev = dev_get_drvdata(dev);
	struct sprdwl_priv *priv = netdev_priv(ndev);
	int ret = 0;

	dev_info(dev, "%s\n", __func__);

	if (priv->pm_status == true) {
		mutex_unlock(&priv->wlan_sipc->pm_lock);
		priv->pm_status = false;
	}
#ifdef CONFIG_SPRDWL_PM_POWERSAVE
	ret = sprdwl_pm_exit_ps_cmd(priv->wlan_sipc);
	if (ret)
		dev_err(dev, "Failed to resume (%d)!\n", ret);

	napi_enable(&priv->napi);
	netif_device_attach(ndev);
#endif
	return ret;
}
Exemplo n.º 10
0
static int ixpdev_open(struct net_device *dev)
{
	struct ixpdev_priv *ip = netdev_priv(dev);
	int err;

	napi_enable(&ip->napi);
	if (!nds_open++) {
		err = request_irq(IRQ_IXP2000_THDA0, ixpdev_interrupt,
					IRQF_SHARED, "ixp2000_eth", nds);
		if (err) {
			nds_open--;
			napi_disable(&ip->napi);
			return err;
		}

		ixp2000_reg_write(IXP2000_IRQ_THD_ENABLE_SET_A_0, 0xffff);
	}

	set_port_admin_status(ip->channel, 1);
	netif_start_queue(dev);

	return 0;
}
Exemplo n.º 11
0
Arquivo: main.c Projeto: Lyude/linux
static int xge_open(struct net_device *ndev)
{
	struct xge_pdata *pdata = netdev_priv(ndev);
	int ret;

	ret = xge_create_desc_rings(ndev);
	if (ret)
		return ret;

	napi_enable(&pdata->napi);
	ret = xge_request_irq(ndev);
	if (ret)
		return ret;

	xge_intr_enable(pdata);
	xge_wr_csr(pdata, DMARXCTRL, 1);

	phy_start(ndev->phydev);
	xge_mac_enable(pdata);
	netif_start_queue(ndev);

	return 0;
}
Exemplo n.º 12
0
static int fs_enet_open(struct net_device *dev)
{
	struct fs_enet_private *fep = netdev_priv(dev);
	int r;
	int err;

	/* to initialize the fep->cur_rx,... */
	/* not doing this, will cause a crash in fs_enet_rx_napi */
	fs_init_bds(fep->ndev);

	if (fep->fpi->use_napi)
		napi_enable(&fep->napi);

	/* Install our interrupt handler. */
	r = request_irq(fep->interrupt, fs_enet_interrupt, IRQF_SHARED,
			"fs_enet-mac", dev);
	if (r != 0) {
		printk(KERN_ERR DRV_MODULE_NAME
		       ": %s Could not allocate FS_ENET IRQ!", dev->name);
		if (fep->fpi->use_napi)
			napi_disable(&fep->napi);
		return -EINVAL;
	}

	err = fs_init_phy(dev);
	if (err) {
		free_irq(fep->interrupt, dev);
		if (fep->fpi->use_napi)
			napi_disable(&fep->napi);
		return err;
	}
	phy_start(fep->phydev);

	netif_start_queue(dev);

	return 0;
}
Exemplo n.º 13
0
Arquivo: bgmac.c Projeto: 7799/linux
static int bgmac_open(struct net_device *net_dev)
{
	struct bgmac *bgmac = netdev_priv(net_dev);
	int err = 0;

	bgmac_chip_reset(bgmac);
	/* Specs say about reclaiming rings here, but we do that in DMA init */
	bgmac_chip_init(bgmac, true);

	err = request_irq(bgmac->core->irq, bgmac_interrupt, IRQF_SHARED,
			  KBUILD_MODNAME, net_dev);
	if (err < 0) {
		bgmac_err(bgmac, "IRQ request error: %d!\n", err);
		goto err_out;
	}
	napi_enable(&bgmac->napi);

	phy_start(bgmac->phy_dev);

	netif_carrier_on(net_dev);

err_out:
	return err;
}
Exemplo n.º 14
0
/**
 * nps_enet_open - Open the network device.
 * @ndev:       Pointer to the network device.
 *
 * returns: 0, on success or non-zero error value on failure.
 *
 * This function sets the MAC address, requests and enables an IRQ
 * for the ENET device and starts the Tx queue.
 */
static s32 nps_enet_open(struct net_device *ndev)
{
	struct nps_enet_priv *priv = netdev_priv(ndev);
	s32 err;

	/* Reset private variables */
	priv->tx_skb = NULL;
	priv->ge_mac_cfg_2_value = 0;
	priv->ge_mac_cfg_3_value = 0;

	/* ge_mac_cfg_3 default values */
	priv->ge_mac_cfg_3_value |=
		 NPS_ENET_GE_MAC_CFG_3_RX_IFG_TH << CFG_3_RX_IFG_TH_SHIFT;

	priv->ge_mac_cfg_3_value |=
		 NPS_ENET_GE_MAC_CFG_3_MAX_LEN << CFG_3_MAX_LEN_SHIFT;

	/* Disable HW device */
	nps_enet_hw_disable_control(ndev);

	/* irq Rx allocation */
	err = request_irq(priv->irq, nps_enet_irq_handler,
			  0, "enet-rx-tx", ndev);
	if (err)
		return err;

	napi_enable(&priv->napi);

	/* Enable HW device */
	nps_enet_hw_reset(ndev);
	nps_enet_hw_enable_control(ndev);

	netif_start_queue(ndev);

	return 0;
}
Exemplo n.º 15
0
static int hss_hdlc_open(struct net_device *dev)
{
	struct port *port = dev_to_port(dev);
	unsigned long flags;
	int i, err = 0;

	if ((err = hdlc_open(dev)))
		return err;

	if ((err = hss_load_firmware(port)))
		goto err_hdlc_close;

	if ((err = request_hdlc_queues(port)))
		goto err_hdlc_close;

	if ((err = init_hdlc_queues(port)))
		goto err_destroy_queues;

	spin_lock_irqsave(&npe_lock, flags);
	if (port->plat->open)
		if ((err = port->plat->open(port->id, dev,
					    hss_hdlc_set_carrier)))
			goto err_unlock;
	spin_unlock_irqrestore(&npe_lock, flags);

	
	for (i = 0; i < TX_DESCS; i++)
		queue_put_desc(port->plat->txreadyq,
			       tx_desc_phys(port, i), tx_desc_ptr(port, i));

	for (i = 0; i < RX_DESCS; i++)
		queue_put_desc(queue_ids[port->id].rxfree,
			       rx_desc_phys(port, i), rx_desc_ptr(port, i));

	napi_enable(&port->napi);
	netif_start_queue(dev);

	qmgr_set_irq(queue_ids[port->id].rx, QUEUE_IRQ_SRC_NOT_EMPTY,
		     hss_hdlc_rx_irq, dev);

	qmgr_set_irq(queue_ids[port->id].txdone, QUEUE_IRQ_SRC_NOT_EMPTY,
		     hss_hdlc_txdone_irq, dev);
	qmgr_enable_irq(queue_ids[port->id].txdone);

	ports_open++;

	hss_set_hdlc_cfg(port);
	hss_config(port);

	hss_start_hdlc(port);

	
	napi_schedule(&port->napi);
	return 0;

err_unlock:
	spin_unlock_irqrestore(&npe_lock, flags);
err_destroy_queues:
	destroy_hdlc_queues(port);
	release_hdlc_queues(port);
err_hdlc_close:
	hdlc_close(dev);
	return err;
}
Exemplo n.º 16
0
Arquivo: en_cq.c Projeto: 020gzh/linux
int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq,
			int cq_idx)
{
	struct mlx4_en_dev *mdev = priv->mdev;
	int err = 0;
	int timestamp_en = 0;
	bool assigned_eq = false;

	cq->dev = mdev->pndev[priv->port];
	cq->mcq.set_ci_db  = cq->wqres.db.db;
	cq->mcq.arm_db     = cq->wqres.db.db + 1;
	*cq->mcq.set_ci_db = 0;
	*cq->mcq.arm_db    = 0;
	memset(cq->buf, 0, cq->buf_size);

	if (cq->is_tx == RX) {
		if (!mlx4_is_eq_vector_valid(mdev->dev, priv->port,
					     cq->vector)) {
			cq->vector = cpumask_first(priv->rx_ring[cq->ring]->affinity_mask);

			err = mlx4_assign_eq(mdev->dev, priv->port,
					     &cq->vector);
			if (err) {
				mlx4_err(mdev, "Failed assigning an EQ to CQ vector %d\n",
					 cq->vector);
				goto free_eq;
			}

			assigned_eq = true;
		}

		cq->irq_desc =
			irq_to_desc(mlx4_eq_get_irq(mdev->dev,
						    cq->vector));
	} else {
		/* For TX we use the same irq per
		ring we assigned for the RX    */
		struct mlx4_en_cq *rx_cq;

		cq_idx = cq_idx % priv->rx_ring_num;
		rx_cq = priv->rx_cq[cq_idx];
		cq->vector = rx_cq->vector;
	}

	if (!cq->is_tx)
		cq->size = priv->rx_ring[cq->ring]->actual_size;

	if ((cq->is_tx && priv->hwtstamp_config.tx_type) ||
	    (!cq->is_tx && priv->hwtstamp_config.rx_filter))
		timestamp_en = 1;

	err = mlx4_cq_alloc(mdev->dev, cq->size, &cq->wqres.mtt,
			    &mdev->priv_uar, cq->wqres.db.dma, &cq->mcq,
			    cq->vector, 0, timestamp_en);
	if (err)
		goto free_eq;

	cq->mcq.comp  = cq->is_tx ? mlx4_en_tx_irq : mlx4_en_rx_irq;
	cq->mcq.event = mlx4_en_cq_event;

	if (cq->is_tx)
		netif_tx_napi_add(cq->dev, &cq->napi, mlx4_en_poll_tx_cq,
				  NAPI_POLL_WEIGHT);
	else
		netif_napi_add(cq->dev, &cq->napi, mlx4_en_poll_rx_cq, 64);

	napi_enable(&cq->napi);

	return 0;

free_eq:
	if (assigned_eq)
		mlx4_release_eq(mdev->dev, cq->vector);
	cq->vector = mdev->dev->caps.num_comp_vectors;
	return err;
}
Exemplo n.º 17
0
/*********************************************************** 
 * mv_eth_start --                                          *
 *   start a network device. connect and enable interrupts *
 *   set hw defaults. fill rx buffers. restart phy link    *
 *   auto neg. set device link flags. report status.       *
 ***********************************************************/
int mv_eth_start(struct net_device *dev)
{
    mv_eth_priv *priv = MV_ETH_PRIV(dev);
    int             err;

    ETH_DBG( ETH_DBG_LOAD, ("%s: starting... ", dev->name ) );

    /* in default link is down */
    netif_carrier_off(dev);

    /* Stop the TX queue - it will be enabled upon PHY status change after link-up interrupt/timer */
    netif_stop_queue(dev);

    /* enable polling on the port, must be used after netif_poll_disable */
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24)
    netif_poll_enable(dev);
#else
    napi_enable(&priv->napi);
#endif

    /* fill rx buffers, start rx/tx activity, set coalescing */
    if (mv_eth_start_internals( priv, dev->mtu) != 0) {
        printk(KERN_ERR "%s: start internals failed\n", dev->name);
        goto error;
    }

    if (priv->flags & MV_ETH_F_FORCED_LINK) {
        netif_carrier_on(dev);
        netif_wake_queue(dev);
    }
    else {
#ifdef CONFIG_MV_ETH_TOOL
        if ((err = mv_eth_tool_restore_settings(dev)) != 0) {
            printk(KERN_ERR "%s: mv_eth_tool_restore_settings failed %d\n", dev->name, err);
            goto error;
        }
        if (priv->autoneg_cfg == AUTONEG_DISABLE) {
            if (priv->flags & MV_ETH_F_LINK_UP) {
	        netif_carrier_on(dev);
	        netif_wake_queue(dev);
            }
        }
#else
        mv_eth_restart_autoneg(priv->port);
#endif /* CONFIG_MV_ETH_TOOL */
    }

    if (!(priv->flags & MV_ETH_F_TIMER))
    {
        priv->timer.expires = jiffies + ((HZ*CONFIG_MV_ETH_TIMER_PERIOD) / 1000); /* ms */
        add_timer(&priv->timer);
        priv->flags |= MV_ETH_F_TIMER;
    }

    /* connect to port interrupt line */
    if (request_irq(dev->irq, mv_eth_interrupt_handler,
        (IRQF_DISABLED | IRQF_SAMPLE_RANDOM), "mv_ethernet", priv)) {
        printk( KERN_ERR "cannot assign irq%d to %s port%d\n", dev->irq, dev->name, priv->port );
        dev->irq = 0;
    	goto error;
    }

    mv_eth_unmask_interrupts(priv);

    ETH_DBG( ETH_DBG_LOAD, ("%s: start ok\n", dev->name) );

    printk(KERN_NOTICE "%s: started\n", dev->name);

    return 0;

 error:

    printk( KERN_ERR "%s: start failed\n", dev->name );
    return -1;
}
Exemplo n.º 18
0
static int enic_open(struct net_device *netdev)
{
	struct enic *enic = netdev_priv(netdev);
	unsigned int i;
	int err;

	err = enic_request_intr(enic);
	if (err) {
		printk(KERN_ERR PFX "%s: Unable to request irq.\n",
			netdev->name);
		return err;
	}

	err = enic_notify_set(enic);
	if (err) {
		printk(KERN_ERR PFX
			"%s: Failed to alloc notify buffer, aborting.\n",
			netdev->name);
		goto err_out_free_intr;
	}

	for (i = 0; i < enic->rq_count; i++) {
		err = vnic_rq_fill(&enic->rq[i], enic->rq_alloc_buf);
		if (err) {
			printk(KERN_ERR PFX
				"%s: Unable to alloc receive buffers.\n",
				netdev->name);
			goto err_out_notify_unset;
		}
	}

	for (i = 0; i < enic->wq_count; i++)
		vnic_wq_enable(&enic->wq[i]);
	for (i = 0; i < enic->rq_count; i++)
		vnic_rq_enable(&enic->rq[i]);

	spin_lock(&enic->devcmd_lock);
	enic_add_station_addr(enic);
	spin_unlock(&enic->devcmd_lock);
	enic_set_multicast_list(netdev);

	netif_wake_queue(netdev);
	napi_enable(&enic->napi);
	spin_lock(&enic->devcmd_lock);
	vnic_dev_enable(enic->vdev);
	spin_unlock(&enic->devcmd_lock);

	for (i = 0; i < enic->intr_count; i++)
		vnic_intr_unmask(&enic->intr[i]);

	enic_notify_timer_start(enic);

	return 0;

err_out_notify_unset:
	spin_lock(&enic->devcmd_lock);
	vnic_dev_notify_unset(enic->vdev);
	spin_unlock(&enic->devcmd_lock);
err_out_free_intr:
	enic_free_intr(enic);

	return err;
}
Exemplo n.º 19
0
int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq,
			int cq_idx)
{
	struct mlx4_en_dev *mdev = priv->mdev;
	int err = 0;
	char name[25];
	int timestamp_en = 0;
	struct cpu_rmap *rmap =
#ifdef CONFIG_RFS_ACCEL
		priv->dev->rx_cpu_rmap;
#else
		NULL;
#endif

	cq->dev = mdev->pndev[priv->port];
	cq->mcq.set_ci_db  = cq->wqres.db.db;
	cq->mcq.arm_db     = cq->wqres.db.db + 1;
	*cq->mcq.set_ci_db = 0;
	*cq->mcq.arm_db    = 0;
	memset(cq->buf, 0, cq->buf_size);

	if (cq->is_tx == RX) {
		if (mdev->dev->caps.comp_pool) {
			if (!cq->vector) {
				sprintf(name, "%s-%d", priv->dev->name,
					cq->ring);
				/* Set IRQ for specific name (per ring) */
				if (mlx4_assign_eq(mdev->dev, name, rmap,
						   &cq->vector)) {
					cq->vector = (cq->ring + 1 + priv->port)
					    % mdev->dev->caps.num_comp_vectors;
					mlx4_warn(mdev, "Failed assigning an EQ to %s, falling back to legacy EQ's\n",
						  name);
				}

				cq->irq_desc =
					irq_to_desc(mlx4_eq_get_irq(mdev->dev,
								    cq->vector));
			}
		} else {
			cq->vector = (cq->ring + 1 + priv->port) %
				mdev->dev->caps.num_comp_vectors;
		}
	} else {
		/* For TX we use the same irq per
		ring we assigned for the RX    */
		struct mlx4_en_cq *rx_cq;

		cq_idx = cq_idx % priv->rx_ring_num;
		rx_cq = priv->rx_cq[cq_idx];
		cq->vector = rx_cq->vector;
	}

	if (!cq->is_tx)
		cq->size = priv->rx_ring[cq->ring]->actual_size;

	if ((cq->is_tx && priv->hwtstamp_config.tx_type) ||
	    (!cq->is_tx && priv->hwtstamp_config.rx_filter))
		timestamp_en = 1;

	err = mlx4_cq_alloc(mdev->dev, cq->size, &cq->wqres.mtt,
			    &mdev->priv_uar, cq->wqres.db.dma, &cq->mcq,
			    cq->vector, 0, timestamp_en);
	if (err)
		return err;

	cq->mcq.comp  = cq->is_tx ? mlx4_en_tx_irq : mlx4_en_rx_irq;
	cq->mcq.event = mlx4_en_cq_event;

	if (cq->is_tx) {
		netif_napi_add(cq->dev, &cq->napi, mlx4_en_poll_tx_cq,
			       NAPI_POLL_WEIGHT);
	} else {
		struct mlx4_en_rx_ring *ring = priv->rx_ring[cq->ring];

		err = irq_set_affinity_hint(cq->mcq.irq,
					    ring->affinity_mask);
		if (err)
			mlx4_warn(mdev, "Failed setting affinity hint\n");

		netif_napi_add(cq->dev, &cq->napi, mlx4_en_poll_rx_cq, 64);
		napi_hash_add(&cq->napi);
	}

	napi_enable(&cq->napi);

	return 0;
}
Exemplo n.º 20
0
/**
 * arc_emac_open - Open the network device.
 * @ndev:	Pointer to the network device.
 *
 * returns: 0, on success or non-zero error value on failure.
 *
 * This function sets the MAC address, requests and enables an IRQ
 * for the EMAC device and starts the Tx queue.
 * It also connects to the phy device.
 */
static int arc_emac_open(struct net_device *ndev)
{
	struct arc_emac_priv *priv = netdev_priv(ndev);
	struct phy_device *phy_dev = ndev->phydev;
	int i;

	phy_dev->autoneg = AUTONEG_ENABLE;
	phy_dev->speed = 0;
	phy_dev->duplex = 0;
	linkmode_and(phy_dev->advertising, phy_dev->advertising,
		     phy_dev->supported);

	priv->last_rx_bd = 0;

	/* Allocate and set buffers for Rx BD's */
	for (i = 0; i < RX_BD_NUM; i++) {
		dma_addr_t addr;
		unsigned int *last_rx_bd = &priv->last_rx_bd;
		struct arc_emac_bd *rxbd = &priv->rxbd[*last_rx_bd];
		struct buffer_state *rx_buff = &priv->rx_buff[*last_rx_bd];

		rx_buff->skb = netdev_alloc_skb_ip_align(ndev,
							 EMAC_BUFFER_SIZE);
		if (unlikely(!rx_buff->skb))
			return -ENOMEM;

		addr = dma_map_single(&ndev->dev, (void *)rx_buff->skb->data,
				      EMAC_BUFFER_SIZE, DMA_FROM_DEVICE);
		if (dma_mapping_error(&ndev->dev, addr)) {
			netdev_err(ndev, "cannot dma map\n");
			dev_kfree_skb(rx_buff->skb);
			return -ENOMEM;
		}
		dma_unmap_addr_set(rx_buff, addr, addr);
		dma_unmap_len_set(rx_buff, len, EMAC_BUFFER_SIZE);

		rxbd->data = cpu_to_le32(addr);

		/* Make sure pointer to data buffer is set */
		wmb();

		/* Return ownership to EMAC */
		rxbd->info = cpu_to_le32(FOR_EMAC | EMAC_BUFFER_SIZE);

		*last_rx_bd = (*last_rx_bd + 1) % RX_BD_NUM;
	}

	priv->txbd_curr = 0;
	priv->txbd_dirty = 0;

	/* Clean Tx BD's */
	memset(priv->txbd, 0, TX_RING_SZ);

	/* Initialize logical address filter */
	arc_reg_set(priv, R_LAFL, 0);
	arc_reg_set(priv, R_LAFH, 0);

	/* Set BD ring pointers for device side */
	arc_reg_set(priv, R_RX_RING, (unsigned int)priv->rxbd_dma);
	arc_reg_set(priv, R_TX_RING, (unsigned int)priv->txbd_dma);

	/* Enable interrupts */
	arc_reg_set(priv, R_ENABLE, RXINT_MASK | TXINT_MASK | ERR_MASK);

	/* Set CONTROL */
	arc_reg_set(priv, R_CTRL,
		    (RX_BD_NUM << 24) |	/* RX BD table length */
		    (TX_BD_NUM << 16) |	/* TX BD table length */
		    TXRN_MASK | RXRN_MASK);

	napi_enable(&priv->napi);

	/* Enable EMAC */
	arc_reg_or(priv, R_CTRL, EN_MASK);

	phy_start(ndev->phydev);

	netif_start_queue(ndev);

	return 0;
}
Exemplo n.º 21
0
static int eth_open(struct net_device *dev)
{
	struct port *port = netdev_priv(dev);
	struct npe *npe = port->npe;
	struct msg msg;
	int i, err;

	if (!npe_running(npe)) {
		err = npe_load_firmware(npe, npe_name(npe), &dev->dev);
		if (err)
			return err;

		if (npe_recv_message(npe, &msg, "ETH_GET_STATUS")) {
			printk(KERN_ERR "%s: %s not responding\n", dev->name,
			       npe_name(npe));
			return -EIO;
		}
		port->firmware[0] = msg.byte4;
		port->firmware[1] = msg.byte5;
		port->firmware[2] = msg.byte6;
		port->firmware[3] = msg.byte7;
	}

	memset(&msg, 0, sizeof(msg));
	msg.cmd = NPE_VLAN_SETRXQOSENTRY;
	msg.eth_id = port->id;
	msg.byte5 = port->plat->rxq | 0x80;
	msg.byte7 = port->plat->rxq << 4;
	for (i = 0; i < 8; i++) {
		msg.byte3 = i;
		if (npe_send_recv_message(port->npe, &msg, "ETH_SET_RXQ"))
			return -EIO;
	}

	msg.cmd = NPE_EDB_SETPORTADDRESS;
	msg.eth_id = PHYSICAL_ID(port->id);
	msg.byte2 = dev->dev_addr[0];
	msg.byte3 = dev->dev_addr[1];
	msg.byte4 = dev->dev_addr[2];
	msg.byte5 = dev->dev_addr[3];
	msg.byte6 = dev->dev_addr[4];
	msg.byte7 = dev->dev_addr[5];
	if (npe_send_recv_message(port->npe, &msg, "ETH_SET_MAC"))
		return -EIO;

	memset(&msg, 0, sizeof(msg));
	msg.cmd = NPE_FW_SETFIREWALLMODE;
	msg.eth_id = port->id;
	if (npe_send_recv_message(port->npe, &msg, "ETH_SET_FIREWALL_MODE"))
		return -EIO;

	if ((err = request_queues(port)) != 0)
		return err;

	if ((err = init_queues(port)) != 0) {
		destroy_queues(port);
		release_queues(port);
		return err;
	}

	port->speed = 0;	/* force "link up" message */
	phy_start(port->phydev);

	for (i = 0; i < ETH_ALEN; i++)
		__raw_writel(dev->dev_addr[i], &port->regs->hw_addr[i]);
	__raw_writel(0x08, &port->regs->random_seed);
	__raw_writel(0x12, &port->regs->partial_empty_threshold);
	__raw_writel(0x30, &port->regs->partial_full_threshold);
	__raw_writel(0x08, &port->regs->tx_start_bytes);
	__raw_writel(0x15, &port->regs->tx_deferral);
	__raw_writel(0x08, &port->regs->tx_2part_deferral[0]);
	__raw_writel(0x07, &port->regs->tx_2part_deferral[1]);
	__raw_writel(0x80, &port->regs->slot_time);
	__raw_writel(0x01, &port->regs->int_clock_threshold);

	/* Populate queues with buffers, no failure after this point */
	for (i = 0; i < TX_DESCS; i++)
		queue_put_desc(port->plat->txreadyq,
			       tx_desc_phys(port, i), tx_desc_ptr(port, i));

	for (i = 0; i < RX_DESCS; i++)
		queue_put_desc(RXFREE_QUEUE(port->id),
			       rx_desc_phys(port, i), rx_desc_ptr(port, i));

	__raw_writel(TX_CNTRL1_RETRIES, &port->regs->tx_control[1]);
	__raw_writel(DEFAULT_TX_CNTRL0, &port->regs->tx_control[0]);
	__raw_writel(0, &port->regs->rx_control[1]);
	__raw_writel(DEFAULT_RX_CNTRL0, &port->regs->rx_control[0]);

	napi_enable(&port->napi);
	eth_set_mcast_list(dev);
	netif_start_queue(dev);

	qmgr_set_irq(port->plat->rxq, QUEUE_IRQ_SRC_NOT_EMPTY,
		     eth_rx_irq, dev);
	if (!ports_open) {
		qmgr_set_irq(TXDONE_QUEUE, QUEUE_IRQ_SRC_NOT_EMPTY,
			     eth_txdone_irq, NULL);
		qmgr_enable_irq(TXDONE_QUEUE);
	}
	ports_open++;
	/* we may already have RX data, enables IRQ */
	napi_schedule(&port->napi);
	return 0;
}
static int rmnet_mhi_enable_iface(struct rmnet_mhi_private *rmnet_mhi_ptr)
{
	int ret = 0;
	struct rmnet_mhi_private **rmnet_mhi_ctxt = NULL;
	enum MHI_STATUS r = MHI_STATUS_SUCCESS;

	memset(tx_interrupts_count, 0, sizeof(tx_interrupts_count));
	memset(rx_interrupts_count, 0, sizeof(rx_interrupts_count));
	memset(rx_interrupts_in_masked_irq, 0,
	       sizeof(rx_interrupts_in_masked_irq));
	memset(rx_napi_skb_burst_min, 0, sizeof(rx_napi_skb_burst_min));
	memset(rx_napi_skb_burst_max, 0, sizeof(rx_napi_skb_burst_max));
	memset(tx_cb_skb_free_burst_min, 0, sizeof(tx_cb_skb_free_burst_min));
	memset(tx_cb_skb_free_burst_max, 0, sizeof(tx_cb_skb_free_burst_max));
	memset(tx_ring_full_count, 0, sizeof(tx_ring_full_count));
	memset(tx_queued_packets_count, 0, sizeof(tx_queued_packets_count));
	memset(rx_napi_budget_overflow, 0, sizeof(rx_napi_budget_overflow));

	rmnet_log(MSG_INFO, "Entered.\n");

	if (rmnet_mhi_ptr == NULL) {
		rmnet_log(MSG_CRITICAL, "Bad input args.\n");
		return -EINVAL;
	}

	rx_napi_skb_burst_min[rmnet_mhi_ptr->dev_index] = UINT_MAX;
	tx_cb_skb_free_burst_min[rmnet_mhi_ptr->dev_index] = UINT_MAX;

	skb_queue_head_init(&(rmnet_mhi_ptr->tx_buffers));
	skb_queue_head_init(&(rmnet_mhi_ptr->rx_buffers));

	if (rmnet_mhi_ptr->tx_client_handle != NULL) {
		rmnet_log(MSG_INFO,
			"Opening TX channel\n");
		r = mhi_open_channel(rmnet_mhi_ptr->tx_client_handle);
		if (r != MHI_STATUS_SUCCESS) {
			rmnet_log(MSG_CRITICAL,
				"Failed to start TX chan ret %d\n", r);
			goto mhi_tx_chan_start_fail;
		} else {
			rmnet_mhi_ptr->tx_enabled = 1;
		}
	}
	if (rmnet_mhi_ptr->rx_client_handle != NULL) {
		rmnet_log(MSG_INFO,
			"Opening RX channel\n");
		r = mhi_open_channel(rmnet_mhi_ptr->rx_client_handle);
		if (r != MHI_STATUS_SUCCESS) {
			rmnet_log(MSG_CRITICAL,
				"Failed to start RX chan ret %d\n", r);
			goto mhi_rx_chan_start_fail;
		} else {
			rmnet_mhi_ptr->rx_enabled = 1;
		}
	}
	rmnet_mhi_ptr->dev =
		alloc_netdev(sizeof(struct rmnet_mhi_private *),
			     RMNET_MHI_DEV_NAME,
			     NET_NAME_PREDICTABLE, rmnet_mhi_setup);
	if (!rmnet_mhi_ptr->dev) {
		rmnet_log(MSG_CRITICAL, "Network device allocation failed\n");
		ret = -ENOMEM;
		goto net_dev_alloc_fail;
	}

	rmnet_mhi_ctxt = netdev_priv(rmnet_mhi_ptr->dev);
	*rmnet_mhi_ctxt = rmnet_mhi_ptr;

	ret = dma_set_mask(&(rmnet_mhi_ptr->dev->dev),
						MHI_DMA_MASK);
	if (ret)
		rmnet_mhi_ptr->allocation_flags = GFP_KERNEL;
	else
		rmnet_mhi_ptr->allocation_flags = GFP_DMA;

	r = rmnet_mhi_init_inbound(rmnet_mhi_ptr);
	if (r) {
		rmnet_log(MSG_CRITICAL,
			"Failed to init inbound ret %d\n", r);
	}

	netif_napi_add(rmnet_mhi_ptr->dev, &(rmnet_mhi_ptr->napi),
		       rmnet_mhi_poll, MHI_NAPI_WEIGHT_VALUE);

	rmnet_mhi_ptr->mhi_enabled = 1;
	ret = register_netdev(rmnet_mhi_ptr->dev);
	if (ret) {
		rmnet_log(MSG_CRITICAL,
			  "Network device registration failed\n");
		goto net_dev_reg_fail;
	}
	napi_enable(&(rmnet_mhi_ptr->napi));

	rmnet_log(MSG_INFO, "Exited.\n");

	return 0;

net_dev_reg_fail:
	netif_napi_del(&(rmnet_mhi_ptr->napi));
	free_netdev(rmnet_mhi_ptr->dev);
net_dev_alloc_fail:
	mhi_close_channel(rmnet_mhi_ptr->rx_client_handle);
	rmnet_mhi_ptr->dev = NULL;
mhi_rx_chan_start_fail:
	mhi_close_channel(rmnet_mhi_ptr->tx_client_handle);
mhi_tx_chan_start_fail:
	rmnet_log(MSG_INFO, "Exited ret %d.\n", ret);
	return ret;
}
Exemplo n.º 23
0
static int fjes_change_mtu(struct net_device *netdev, int new_mtu)
{
	struct fjes_adapter *adapter = netdev_priv(netdev);
	bool running = netif_running(netdev);
	struct fjes_hw *hw = &adapter->hw;
	unsigned long flags;
	int ret = -EINVAL;
	int idx, epidx;

	for (idx = 0; fjes_support_mtu[idx] != 0; idx++) {
		if (new_mtu <= fjes_support_mtu[idx]) {
			new_mtu = fjes_support_mtu[idx];
			if (new_mtu == netdev->mtu)
				return 0;

			ret = 0;
			break;
		}
	}

	if (ret)
		return ret;

	if (running) {
		spin_lock_irqsave(&hw->rx_status_lock, flags);
		for (epidx = 0; epidx < hw->max_epid; epidx++) {
			if (epidx == hw->my_epid)
				continue;
			hw->ep_shm_info[epidx].tx.info->v1i.rx_status &=
				~FJES_RX_MTU_CHANGING_DONE;
		}
		spin_unlock_irqrestore(&hw->rx_status_lock, flags);

		netif_tx_stop_all_queues(netdev);
		netif_carrier_off(netdev);
		cancel_work_sync(&adapter->tx_stall_task);
		napi_disable(&adapter->napi);

		msleep(1000);

		netif_tx_stop_all_queues(netdev);
	}

	netdev->mtu = new_mtu;

	if (running) {
		for (epidx = 0; epidx < hw->max_epid; epidx++) {
			if (epidx == hw->my_epid)
				continue;

			spin_lock_irqsave(&hw->rx_status_lock, flags);
			fjes_hw_setup_epbuf(&hw->ep_shm_info[epidx].tx,
					    netdev->dev_addr,
					    netdev->mtu);

			hw->ep_shm_info[epidx].tx.info->v1i.rx_status |=
				FJES_RX_MTU_CHANGING_DONE;
			spin_unlock_irqrestore(&hw->rx_status_lock, flags);
		}

		netif_tx_wake_all_queues(netdev);
		netif_carrier_on(netdev);
		napi_enable(&adapter->napi);
		napi_schedule(&adapter->napi);
	}

	return ret;
}
Exemplo n.º 24
0
Arquivo: en_main.c Projeto: Dyoed/ath
static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix,
			      struct mlx5e_channel_param *cparam,
			      struct mlx5e_channel **cp)
{
	struct net_device *netdev = priv->netdev;
	int cpu = mlx5e_get_cpu(priv, ix);
	struct mlx5e_channel *c;
	int err;

	c = kzalloc_node(sizeof(*c), GFP_KERNEL, cpu_to_node(cpu));
	if (!c)
		return -ENOMEM;

	c->priv     = priv;
	c->ix       = ix;
	c->cpu      = cpu;
	c->pdev     = &priv->mdev->pdev->dev;
	c->netdev   = priv->netdev;
	c->mkey_be  = cpu_to_be32(priv->mr.key);
	c->num_tc   = priv->num_tc;

	netif_napi_add(netdev, &c->napi, mlx5e_napi_poll, 64);

	err = mlx5e_open_tx_cqs(c, cparam);
	if (err)
		goto err_napi_del;

	err = mlx5e_open_cq(c, &cparam->rx_cq, &c->rq.cq,
			    priv->params.rx_cq_moderation_usec,
			    priv->params.rx_cq_moderation_pkts);
	if (err)
		goto err_close_tx_cqs;
	c->rq.cq.sqrq = &c->rq;

	napi_enable(&c->napi);

	err = mlx5e_open_sqs(c, cparam);
	if (err)
		goto err_disable_napi;

	err = mlx5e_open_rq(c, &cparam->rq, &c->rq);
	if (err)
		goto err_close_sqs;

	netif_set_xps_queue(netdev, get_cpu_mask(c->cpu), ix);
	*cp = c;

	return 0;

err_close_sqs:
	mlx5e_close_sqs(c);

err_disable_napi:
	napi_disable(&c->napi);
	mlx5e_close_cq(&c->rq.cq);

err_close_tx_cqs:
	mlx5e_close_tx_cqs(c);

err_napi_del:
	netif_napi_del(&c->napi);
	kfree(c);

	return err;
}
Exemplo n.º 25
0
int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq,
			int cq_idx)
{
	struct mlx4_en_dev *mdev = priv->mdev;
	int err = 0;
	char name[25];
	int timestamp_en = 0;
	bool assigned_eq = false;

	cq->dev = mdev->pndev[priv->port];
	cq->mcq.set_ci_db  = cq->wqres.db.db;
	cq->mcq.arm_db     = cq->wqres.db.db + 1;
	*cq->mcq.set_ci_db = 0;
	*cq->mcq.arm_db    = 0;
	memset(cq->buf, 0, cq->buf_size);

	if (cq->is_tx == RX) {
		if (!mlx4_is_eq_vector_valid(mdev->dev, priv->port,
					     cq->vector)) {
			cq->vector = cpumask_first(priv->rx_ring[cq->ring]->affinity_mask);

			err = mlx4_assign_eq(mdev->dev, priv->port,
					     MLX4_EQ_ID_TO_UUID(MLX4_EQ_ID_EN,
								priv->port,
								cq_idx),
					     mlx4_en_cq_eq_cb,
					     &priv->rx_cq[cq_idx],
					     &cq->vector);
			if (err) {
				mlx4_err(mdev, "Failed assigning an EQ to %s\n",
					 name);
				goto free_eq;
			}

			assigned_eq = true;
		}

		/* Set IRQ for specific name (per ring) */
		err = mlx4_rename_eq(mdev->dev, priv->port, cq->vector,
				     MLX4_EN_EQ_NAME_PRIORITY, "%s-%d",
				     priv->dev->name, cq->ring);

		if (err) {
			mlx4_warn(mdev, "Failed to rename EQ, continuing with default name\n");
			err = 0;
		}

#if defined(HAVE_IRQ_DESC_GET_IRQ_DATA) && defined(HAVE_IRQ_TO_DESC_EXPORTED)
		cq->irq_desc =
			irq_to_desc(mlx4_eq_get_irq(mdev->dev,
						    cq->vector));
#endif
	} else {
		/* For TX we use the same irq per
		ring we assigned for the RX    */
		struct mlx4_en_cq *rx_cq;

		cq_idx = cq_idx % priv->rx_ring_num;
		rx_cq = priv->rx_cq[cq_idx];
		cq->vector = rx_cq->vector;
	}

	if (!cq->is_tx)
		cq->size = priv->rx_ring[cq->ring]->actual_size;

	if ((cq->is_tx && priv->hwtstamp_config.tx_type) ||
	    (!cq->is_tx && priv->hwtstamp_config.rx_filter))
		timestamp_en = 1;

	err = mlx4_cq_alloc(mdev->dev, cq->size, &cq->wqres.mtt,
			    &mdev->priv_uar, cq->wqres.db.dma, &cq->mcq,
			    cq->vector, 0, timestamp_en, &cq->wqres.buf, false);
	if (err)
		goto free_eq;

	cq->mcq.comp  = cq->is_tx ? mlx4_en_tx_irq : mlx4_en_rx_irq;
	cq->mcq.event = mlx4_en_cq_event;

	if (cq->is_tx) {
		netif_napi_add(cq->dev, &cq->napi, mlx4_en_poll_tx_cq,
			       NAPI_POLL_WEIGHT);
	} else {
		netif_napi_add(cq->dev, &cq->napi, mlx4_en_poll_rx_cq, 64);
#ifdef HAVE_NAPI_HASH_ADD
		napi_hash_add(&cq->napi);
#endif
	}

	napi_enable(&cq->napi);

	return 0;

free_eq:
	if (assigned_eq)
		mlx4_release_eq(mdev->dev, MLX4_EQ_ID_TO_UUID(
					MLX4_EQ_ID_EN, priv->port, cq_idx),
				cq->vector);
	cq->vector = mdev->dev->caps.num_comp_vectors;
	return err;
}
Exemplo n.º 26
0
/*
 * open callback, allocate dma rings & buffers and start rx operation
 */
static int bcm_enet_open(struct net_device *dev)
{
	struct bcm_enet_priv *priv;
	struct sockaddr addr;
	struct device *kdev;
	struct phy_device *phydev;
	int i, ret;
	unsigned int size;
	char phy_id[MII_BUS_ID_SIZE + 3];
	void *p;
	u32 val;

	priv = netdev_priv(dev);
	kdev = &priv->pdev->dev;

	if (priv->has_phy) {
		/* connect to PHY */
		snprintf(phy_id, sizeof(phy_id), PHY_ID_FMT,
			 priv->mac_id ? "1" : "0", priv->phy_id);

		phydev = phy_connect(dev, phy_id, &bcm_enet_adjust_phy_link, 0,
				     PHY_INTERFACE_MODE_MII);

		if (IS_ERR(phydev)) {
			dev_err(kdev, "could not attach to PHY\n");
			return PTR_ERR(phydev);
		}

		/* mask with MAC supported features */
		phydev->supported &= (SUPPORTED_10baseT_Half |
				      SUPPORTED_10baseT_Full |
				      SUPPORTED_100baseT_Half |
				      SUPPORTED_100baseT_Full |
				      SUPPORTED_Autoneg |
				      SUPPORTED_Pause |
				      SUPPORTED_MII);
		phydev->advertising = phydev->supported;

		if (priv->pause_auto && priv->pause_rx && priv->pause_tx)
			phydev->advertising |= SUPPORTED_Pause;
		else
			phydev->advertising &= ~SUPPORTED_Pause;

		dev_info(kdev, "attached PHY at address %d [%s]\n",
			 phydev->addr, phydev->drv->name);

		priv->old_link = 0;
		priv->old_duplex = -1;
		priv->old_pause = -1;
		priv->phydev = phydev;
	}

	/* mask all interrupts and request them */
	enet_writel(priv, 0, ENET_IRMASK_REG);
	enet_dma_writel(priv, 0, ENETDMA_IRMASK_REG(priv->rx_chan));
	enet_dma_writel(priv, 0, ENETDMA_IRMASK_REG(priv->tx_chan));

	ret = request_irq(dev->irq, bcm_enet_isr_mac, 0, dev->name, dev);
	if (ret)
		goto out_phy_disconnect;

	ret = request_irq(priv->irq_rx, bcm_enet_isr_dma,
			  IRQF_SAMPLE_RANDOM | IRQF_DISABLED, dev->name, dev);
	if (ret)
		goto out_freeirq;

	ret = request_irq(priv->irq_tx, bcm_enet_isr_dma,
			  IRQF_DISABLED, dev->name, dev);
	if (ret)
		goto out_freeirq_rx;

	/* initialize perfect match registers */
	for (i = 0; i < 4; i++) {
		enet_writel(priv, 0, ENET_PML_REG(i));
		enet_writel(priv, 0, ENET_PMH_REG(i));
	}

	/* write device mac address */
	memcpy(addr.sa_data, dev->dev_addr, ETH_ALEN);
	bcm_enet_set_mac_address(dev, &addr);

	/* allocate rx dma ring */
	size = priv->rx_ring_size * sizeof(struct bcm_enet_desc);
	p = dma_alloc_coherent(kdev, size, &priv->rx_desc_dma, GFP_KERNEL);
	if (!p) {
		dev_err(kdev, "cannot allocate rx ring %u\n", size);
		ret = -ENOMEM;
		goto out_freeirq_tx;
	}

	memset(p, 0, size);
	priv->rx_desc_alloc_size = size;
	priv->rx_desc_cpu = p;

	/* allocate tx dma ring */
	size = priv->tx_ring_size * sizeof(struct bcm_enet_desc);
	p = dma_alloc_coherent(kdev, size, &priv->tx_desc_dma, GFP_KERNEL);
	if (!p) {
		dev_err(kdev, "cannot allocate tx ring\n");
		ret = -ENOMEM;
		goto out_free_rx_ring;
	}

	memset(p, 0, size);
	priv->tx_desc_alloc_size = size;
	priv->tx_desc_cpu = p;

	priv->tx_skb = kzalloc(sizeof(struct sk_buff *) * priv->tx_ring_size,
			       GFP_KERNEL);
	if (!priv->tx_skb) {
		dev_err(kdev, "cannot allocate rx skb queue\n");
		ret = -ENOMEM;
		goto out_free_tx_ring;
	}

	priv->tx_desc_count = priv->tx_ring_size;
	priv->tx_dirty_desc = 0;
	priv->tx_curr_desc = 0;
	spin_lock_init(&priv->tx_lock);

	/* init & fill rx ring with skbs */
	priv->rx_skb = kzalloc(sizeof(struct sk_buff *) * priv->rx_ring_size,
			       GFP_KERNEL);
	if (!priv->rx_skb) {
		dev_err(kdev, "cannot allocate rx skb queue\n");
		ret = -ENOMEM;
		goto out_free_tx_skb;
	}

	priv->rx_desc_count = 0;
	priv->rx_dirty_desc = 0;
	priv->rx_curr_desc = 0;

	/* initialize flow control buffer allocation */
	enet_dma_writel(priv, ENETDMA_BUFALLOC_FORCE_MASK | 0,
			ENETDMA_BUFALLOC_REG(priv->rx_chan));

	if (bcm_enet_refill_rx(dev)) {
		dev_err(kdev, "cannot allocate rx skb queue\n");
		ret = -ENOMEM;
		goto out;
	}

	/* write rx & tx ring addresses */
	enet_dma_writel(priv, priv->rx_desc_dma,
			ENETDMA_RSTART_REG(priv->rx_chan));
	enet_dma_writel(priv, priv->tx_desc_dma,
			ENETDMA_RSTART_REG(priv->tx_chan));

	/* clear remaining state ram for rx & tx channel */
	enet_dma_writel(priv, 0, ENETDMA_SRAM2_REG(priv->rx_chan));
	enet_dma_writel(priv, 0, ENETDMA_SRAM2_REG(priv->tx_chan));
	enet_dma_writel(priv, 0, ENETDMA_SRAM3_REG(priv->rx_chan));
	enet_dma_writel(priv, 0, ENETDMA_SRAM3_REG(priv->tx_chan));
	enet_dma_writel(priv, 0, ENETDMA_SRAM4_REG(priv->rx_chan));
	enet_dma_writel(priv, 0, ENETDMA_SRAM4_REG(priv->tx_chan));

	/* set max rx/tx length */
	enet_writel(priv, priv->hw_mtu, ENET_RXMAXLEN_REG);
	enet_writel(priv, priv->hw_mtu, ENET_TXMAXLEN_REG);

	/* set dma maximum burst len */
	enet_dma_writel(priv, BCMENET_DMA_MAXBURST,
			ENETDMA_MAXBURST_REG(priv->rx_chan));
	enet_dma_writel(priv, BCMENET_DMA_MAXBURST,
			ENETDMA_MAXBURST_REG(priv->tx_chan));

	/* set correct transmit fifo watermark */
	enet_writel(priv, BCMENET_TX_FIFO_TRESH, ENET_TXWMARK_REG);

	/* set flow control low/high threshold to 1/3 / 2/3 */
	val = priv->rx_ring_size / 3;
	enet_dma_writel(priv, val, ENETDMA_FLOWCL_REG(priv->rx_chan));
	val = (priv->rx_ring_size * 2) / 3;
	enet_dma_writel(priv, val, ENETDMA_FLOWCH_REG(priv->rx_chan));

	/* all set, enable mac and interrupts, start dma engine and
	 * kick rx dma channel */
	wmb();
	enet_writel(priv, ENET_CTL_ENABLE_MASK, ENET_CTL_REG);
	enet_dma_writel(priv, ENETDMA_CFG_EN_MASK, ENETDMA_CFG_REG);
	enet_dma_writel(priv, ENETDMA_CHANCFG_EN_MASK,
			ENETDMA_CHANCFG_REG(priv->rx_chan));

	/* watch "mib counters about to overflow" interrupt */
	enet_writel(priv, ENET_IR_MIB, ENET_IR_REG);
	enet_writel(priv, ENET_IR_MIB, ENET_IRMASK_REG);

	/* watch "packet transferred" interrupt in rx and tx */
	enet_dma_writel(priv, ENETDMA_IR_PKTDONE_MASK,
			ENETDMA_IR_REG(priv->rx_chan));
	enet_dma_writel(priv, ENETDMA_IR_PKTDONE_MASK,
			ENETDMA_IR_REG(priv->tx_chan));

	/* make sure we enable napi before rx interrupt  */
	napi_enable(&priv->napi);

	enet_dma_writel(priv, ENETDMA_IR_PKTDONE_MASK,
			ENETDMA_IRMASK_REG(priv->rx_chan));
	enet_dma_writel(priv, ENETDMA_IR_PKTDONE_MASK,
			ENETDMA_IRMASK_REG(priv->tx_chan));

	if (priv->has_phy)
		phy_start(priv->phydev);
	else
		bcm_enet_adjust_link(dev);

	netif_start_queue(dev);
	return 0;

out:
	for (i = 0; i < priv->rx_ring_size; i++) {
		struct bcm_enet_desc *desc;

		if (!priv->rx_skb[i])
			continue;

		desc = &priv->rx_desc_cpu[i];
		dma_unmap_single(kdev, desc->address, priv->rx_skb_size,
				 DMA_FROM_DEVICE);
		kfree_skb(priv->rx_skb[i]);
	}
	kfree(priv->rx_skb);

out_free_tx_skb:
	kfree(priv->tx_skb);

out_free_tx_ring:
	dma_free_coherent(kdev, priv->tx_desc_alloc_size,
			  priv->tx_desc_cpu, priv->tx_desc_dma);

out_free_rx_ring:
	dma_free_coherent(kdev, priv->rx_desc_alloc_size,
			  priv->rx_desc_cpu, priv->rx_desc_dma);

out_freeirq_tx:
	free_irq(priv->irq_tx, dev);

out_freeirq_rx:
	free_irq(priv->irq_rx, dev);

out_freeirq:
	free_irq(dev->irq, dev);

out_phy_disconnect:
	phy_disconnect(priv->phydev);

	return ret;
}
Exemplo n.º 27
0
static int cpsw_ndo_open(struct net_device *ndev)
{
	struct cpsw_priv *priv = netdev_priv(ndev);
	int i, ret;
	u32 reg;

	cpsw_intr_disable(priv);
	netif_carrier_off(ndev);

	ret = clk_enable(priv->clk);
	if (ret < 0) {
		dev_err(priv->dev, "unable to turn on device clock\n");
		return ret;
	}

	reg = __raw_readl(&priv->regs->id_ver);

	dev_info(priv->dev, "initializing cpsw version %d.%d (%d)\n",
		 CPSW_MAJOR_VERSION(reg), CPSW_MINOR_VERSION(reg),
		 CPSW_RTL_VERSION(reg));

	/*                                 */
	cpsw_init_host_port(priv);
	for_each_slave(priv, cpsw_slave_open, priv);

	/*                                            */
	cpdma_control_set(priv->dma, CPDMA_TX_PRIO_FIXED, 1);
	cpdma_control_set(priv->dma, CPDMA_RX_BUFFER_OFFSET, 0);

	/*                                                               */
	__raw_writel(0, &priv->regs->ptype);

	/*                                                    */
	__raw_writel(0x7, &priv->regs->stat_port_en);

	if (WARN_ON(!priv->data.rx_descs))
		priv->data.rx_descs = 128;

	for (i = 0; i < priv->data.rx_descs; i++) {
		struct sk_buff *skb;

		ret = -ENOMEM;
		skb = netdev_alloc_skb_ip_align(priv->ndev,
						priv->rx_packet_max);
		if (!skb)
			break;
		ret = cpdma_chan_submit(priv->rxch, skb, skb->data,
					skb_tailroom(skb), GFP_KERNEL);
		if (WARN_ON(ret < 0))
			break;
	}
	/*                                                               */
	cpsw_info(priv, ifup, "submitted %d rx descriptors\n", i);

	cpdma_ctlr_start(priv->dma);
	cpsw_intr_enable(priv);
	napi_enable(&priv->napi);
	cpdma_ctlr_eoi(priv->dma);

	return 0;
}
Exemplo n.º 28
0
static int octeon_mgmt_open(struct net_device *netdev)
{
	struct octeon_mgmt *p = netdev_priv(netdev);
	union cvmx_mixx_ctl mix_ctl;
	union cvmx_agl_gmx_inf_mode agl_gmx_inf_mode;
	union cvmx_mixx_oring1 oring1;
	union cvmx_mixx_iring1 iring1;
	union cvmx_agl_gmx_rxx_frm_ctl rxx_frm_ctl;
	union cvmx_mixx_irhwm mix_irhwm;
	union cvmx_mixx_orhwm mix_orhwm;
	union cvmx_mixx_intena mix_intena;
	struct sockaddr sa;

	/* Allocate ring buffers.  */
	p->tx_ring = kzalloc(ring_size_to_bytes(OCTEON_MGMT_TX_RING_SIZE),
			     GFP_KERNEL);
	if (!p->tx_ring)
		return -ENOMEM;
	p->tx_ring_handle =
		dma_map_single(p->dev, p->tx_ring,
			       ring_size_to_bytes(OCTEON_MGMT_TX_RING_SIZE),
			       DMA_BIDIRECTIONAL);
	p->tx_next = 0;
	p->tx_next_clean = 0;
	p->tx_current_fill = 0;


	p->rx_ring = kzalloc(ring_size_to_bytes(OCTEON_MGMT_RX_RING_SIZE),
			     GFP_KERNEL);
	if (!p->rx_ring)
		goto err_nomem;
	p->rx_ring_handle =
		dma_map_single(p->dev, p->rx_ring,
			       ring_size_to_bytes(OCTEON_MGMT_RX_RING_SIZE),
			       DMA_BIDIRECTIONAL);

	p->rx_next = 0;
	p->rx_next_fill = 0;
	p->rx_current_fill = 0;

	octeon_mgmt_reset_hw(p);

	mix_ctl.u64 = cvmx_read_csr(p->mix + MIX_CTL);

	/* Bring it out of reset if needed. */
	if (mix_ctl.s.reset) {
		mix_ctl.s.reset = 0;
		cvmx_write_csr(p->mix + MIX_CTL, mix_ctl.u64);
		do {
			mix_ctl.u64 = cvmx_read_csr(p->mix + MIX_CTL);
		} while (mix_ctl.s.reset);
	}

	if (OCTEON_IS_MODEL(OCTEON_CN5XXX)) {
		agl_gmx_inf_mode.u64 = 0;
		agl_gmx_inf_mode.s.en = 1;
		cvmx_write_csr(CVMX_AGL_GMX_INF_MODE, agl_gmx_inf_mode.u64);
	}
	if (OCTEON_IS_MODEL(OCTEON_CN56XX_PASS1_X)
		|| OCTEON_IS_MODEL(OCTEON_CN52XX_PASS1_X)) {
		/* Force compensation values, as they are not
		 * determined properly by HW
		 */
		union cvmx_agl_gmx_drv_ctl drv_ctl;

		drv_ctl.u64 = cvmx_read_csr(CVMX_AGL_GMX_DRV_CTL);
		if (p->port) {
			drv_ctl.s.byp_en1 = 1;
			drv_ctl.s.nctl1 = 6;
			drv_ctl.s.pctl1 = 6;
		} else {
			drv_ctl.s.byp_en = 1;
			drv_ctl.s.nctl = 6;
			drv_ctl.s.pctl = 6;
		}
		cvmx_write_csr(CVMX_AGL_GMX_DRV_CTL, drv_ctl.u64);
	}

	oring1.u64 = 0;
	oring1.s.obase = p->tx_ring_handle >> 3;
	oring1.s.osize = OCTEON_MGMT_TX_RING_SIZE;
	cvmx_write_csr(p->mix + MIX_ORING1, oring1.u64);

	iring1.u64 = 0;
	iring1.s.ibase = p->rx_ring_handle >> 3;
	iring1.s.isize = OCTEON_MGMT_RX_RING_SIZE;
	cvmx_write_csr(p->mix + MIX_IRING1, iring1.u64);

	memcpy(sa.sa_data, netdev->dev_addr, ETH_ALEN);
	octeon_mgmt_set_mac_address(netdev, &sa);

	octeon_mgmt_change_mtu(netdev, netdev->mtu);

	/* Enable the port HW. Packets are not allowed until
	 * cvmx_mgmt_port_enable() is called.
	 */
	mix_ctl.u64 = 0;
	mix_ctl.s.crc_strip = 1;    /* Strip the ending CRC */
	mix_ctl.s.en = 1;           /* Enable the port */
	mix_ctl.s.nbtarb = 0;       /* Arbitration mode */
	/* MII CB-request FIFO programmable high watermark */
	mix_ctl.s.mrq_hwm = 1;
#ifdef __LITTLE_ENDIAN
	mix_ctl.s.lendian = 1;
#endif
	cvmx_write_csr(p->mix + MIX_CTL, mix_ctl.u64);

	/* Read the PHY to find the mode of the interface. */
	if (octeon_mgmt_init_phy(netdev)) {
		dev_err(p->dev, "Cannot initialize PHY on MIX%d.\n", p->port);
		goto err_noirq;
	}

	/* Set the mode of the interface, RGMII/MII. */
	if (OCTEON_IS_MODEL(OCTEON_CN6XXX) && p->phydev) {
		union cvmx_agl_prtx_ctl agl_prtx_ctl;
		int rgmii_mode = (p->phydev->supported &
				  (SUPPORTED_1000baseT_Half | SUPPORTED_1000baseT_Full)) != 0;

		agl_prtx_ctl.u64 = cvmx_read_csr(p->agl_prt_ctl);
		agl_prtx_ctl.s.mode = rgmii_mode ? 0 : 1;
		cvmx_write_csr(p->agl_prt_ctl,	agl_prtx_ctl.u64);

		/* MII clocks counts are based on the 125Mhz
		 * reference, which has an 8nS period. So our delays
		 * need to be multiplied by this factor.
		 */
#define NS_PER_PHY_CLK 8

		/* Take the DLL and clock tree out of reset */
		agl_prtx_ctl.u64 = cvmx_read_csr(p->agl_prt_ctl);
		agl_prtx_ctl.s.clkrst = 0;
		if (rgmii_mode) {
			agl_prtx_ctl.s.dllrst = 0;
			agl_prtx_ctl.s.clktx_byp = 0;
		}
		cvmx_write_csr(p->agl_prt_ctl,	agl_prtx_ctl.u64);
		cvmx_read_csr(p->agl_prt_ctl); /* Force write out before wait */

		/* Wait for the DLL to lock. External 125 MHz
		 * reference clock must be stable at this point.
		 */
		ndelay(256 * NS_PER_PHY_CLK);

		/* Enable the interface */
		agl_prtx_ctl.u64 = cvmx_read_csr(p->agl_prt_ctl);
		agl_prtx_ctl.s.enable = 1;
		cvmx_write_csr(p->agl_prt_ctl, agl_prtx_ctl.u64);

		/* Read the value back to force the previous write */
		agl_prtx_ctl.u64 = cvmx_read_csr(p->agl_prt_ctl);

		/* Enable the compensation controller */
		agl_prtx_ctl.s.comp = 1;
		agl_prtx_ctl.s.drv_byp = 0;
		cvmx_write_csr(p->agl_prt_ctl,	agl_prtx_ctl.u64);
		/* Force write out before wait. */
		cvmx_read_csr(p->agl_prt_ctl);

		/* For compensation state to lock. */
		ndelay(1040 * NS_PER_PHY_CLK);

		/* Default Interframe Gaps are too small.  Recommended
		 * workaround is.
		 *
		 * AGL_GMX_TX_IFG[IFG1]=14
		 * AGL_GMX_TX_IFG[IFG2]=10
		 */
		cvmx_write_csr(CVMX_AGL_GMX_TX_IFG, 0xae);
	}

	octeon_mgmt_rx_fill_ring(netdev);

	/* Clear statistics. */
	/* Clear on read. */
	cvmx_write_csr(p->agl + AGL_GMX_RX_STATS_CTL, 1);
	cvmx_write_csr(p->agl + AGL_GMX_RX_STATS_PKTS_DRP, 0);
	cvmx_write_csr(p->agl + AGL_GMX_RX_STATS_PKTS_BAD, 0);

	cvmx_write_csr(p->agl + AGL_GMX_TX_STATS_CTL, 1);
	cvmx_write_csr(p->agl + AGL_GMX_TX_STAT0, 0);
	cvmx_write_csr(p->agl + AGL_GMX_TX_STAT1, 0);

	/* Clear any pending interrupts */
	cvmx_write_csr(p->mix + MIX_ISR, cvmx_read_csr(p->mix + MIX_ISR));

	if (request_irq(p->irq, octeon_mgmt_interrupt, 0, netdev->name,
			netdev)) {
		dev_err(p->dev, "request_irq(%d) failed.\n", p->irq);
		goto err_noirq;
	}

	/* Interrupt every single RX packet */
	mix_irhwm.u64 = 0;
	mix_irhwm.s.irhwm = 0;
	cvmx_write_csr(p->mix + MIX_IRHWM, mix_irhwm.u64);

	/* Interrupt when we have 1 or more packets to clean.  */
	mix_orhwm.u64 = 0;
	mix_orhwm.s.orhwm = 0;
	cvmx_write_csr(p->mix + MIX_ORHWM, mix_orhwm.u64);

	/* Enable receive and transmit interrupts */
	mix_intena.u64 = 0;
	mix_intena.s.ithena = 1;
	mix_intena.s.othena = 1;
	cvmx_write_csr(p->mix + MIX_INTENA, mix_intena.u64);

	/* Enable packet I/O. */

	rxx_frm_ctl.u64 = 0;
	rxx_frm_ctl.s.ptp_mode = p->has_rx_tstamp ? 1 : 0;
	rxx_frm_ctl.s.pre_align = 1;
	/* When set, disables the length check for non-min sized pkts
	 * with padding in the client data.
	 */
	rxx_frm_ctl.s.pad_len = 1;
	/* When set, disables the length check for VLAN pkts */
	rxx_frm_ctl.s.vlan_len = 1;
	/* When set, PREAMBLE checking is  less strict */
	rxx_frm_ctl.s.pre_free = 1;
	/* Control Pause Frames can match station SMAC */
	rxx_frm_ctl.s.ctl_smac = 0;
	/* Control Pause Frames can match globally assign Multicast address */
	rxx_frm_ctl.s.ctl_mcst = 1;
	/* Forward pause information to TX block */
	rxx_frm_ctl.s.ctl_bck = 1;
	/* Drop Control Pause Frames */
	rxx_frm_ctl.s.ctl_drp = 1;
	/* Strip off the preamble */
	rxx_frm_ctl.s.pre_strp = 1;
	/* This port is configured to send PREAMBLE+SFD to begin every
	 * frame.  GMX checks that the PREAMBLE is sent correctly.
	 */
	rxx_frm_ctl.s.pre_chk = 1;
	cvmx_write_csr(p->agl + AGL_GMX_RX_FRM_CTL, rxx_frm_ctl.u64);

	/* Configure the port duplex, speed and enables */
	octeon_mgmt_disable_link(p);
	if (p->phydev)
		octeon_mgmt_update_link(p);
	octeon_mgmt_enable_link(p);

	p->last_link = 0;
	p->last_speed = 0;
	/* PHY is not present in simulator. The carrier is enabled
	 * while initializing the phy for simulator, leave it enabled.
	 */
	if (p->phydev) {
		netif_carrier_off(netdev);
		phy_start_aneg(p->phydev);
	}

	netif_wake_queue(netdev);
	napi_enable(&p->napi);

	return 0;
err_noirq:
	octeon_mgmt_reset_hw(p);
	dma_unmap_single(p->dev, p->rx_ring_handle,
			 ring_size_to_bytes(OCTEON_MGMT_RX_RING_SIZE),
			 DMA_BIDIRECTIONAL);
	kfree(p->rx_ring);
err_nomem:
	dma_unmap_single(p->dev, p->tx_ring_handle,
			 ring_size_to_bytes(OCTEON_MGMT_TX_RING_SIZE),
			 DMA_BIDIRECTIONAL);
	kfree(p->tx_ring);
	return -ENOMEM;
}
Exemplo n.º 29
0
static int ibmveth_open(struct net_device *netdev)
{
	struct ibmveth_adapter *adapter = netdev_priv(netdev);
	u64 mac_address = 0;
	int rxq_entries = 1;
	unsigned long lpar_rc;
	int rc;
	union ibmveth_buf_desc rxq_desc;
	int i;
	struct device *dev;

	ibmveth_debug_printk("open starting\n");

	napi_enable(&adapter->napi);

	for(i = 0; i<IbmVethNumBufferPools; i++)
		rxq_entries += adapter->rx_buff_pool[i].size;

	adapter->buffer_list_addr = (void*) get_zeroed_page(GFP_KERNEL);
	adapter->filter_list_addr = (void*) get_zeroed_page(GFP_KERNEL);

	if(!adapter->buffer_list_addr || !adapter->filter_list_addr) {
		ibmveth_error_printk("unable to allocate filter or buffer list pages\n");
		ibmveth_cleanup(adapter);
		napi_disable(&adapter->napi);
		return -ENOMEM;
	}

	adapter->rx_queue.queue_len = sizeof(struct ibmveth_rx_q_entry) * rxq_entries;
	adapter->rx_queue.queue_addr = kmalloc(adapter->rx_queue.queue_len, GFP_KERNEL);

	if(!adapter->rx_queue.queue_addr) {
		ibmveth_error_printk("unable to allocate rx queue pages\n");
		ibmveth_cleanup(adapter);
		napi_disable(&adapter->napi);
		return -ENOMEM;
	}

	dev = &adapter->vdev->dev;

	adapter->buffer_list_dma = dma_map_single(dev,
			adapter->buffer_list_addr, 4096, DMA_BIDIRECTIONAL);
	adapter->filter_list_dma = dma_map_single(dev,
			adapter->filter_list_addr, 4096, DMA_BIDIRECTIONAL);
	adapter->rx_queue.queue_dma = dma_map_single(dev,
			adapter->rx_queue.queue_addr,
			adapter->rx_queue.queue_len, DMA_BIDIRECTIONAL);

	if ((dma_mapping_error(dev, adapter->buffer_list_dma)) ||
	    (dma_mapping_error(dev, adapter->filter_list_dma)) ||
	    (dma_mapping_error(dev, adapter->rx_queue.queue_dma))) {
		ibmveth_error_printk("unable to map filter or buffer list pages\n");
		ibmveth_cleanup(adapter);
		napi_disable(&adapter->napi);
		return -ENOMEM;
	}

	adapter->rx_queue.index = 0;
	adapter->rx_queue.num_slots = rxq_entries;
	adapter->rx_queue.toggle = 1;

	memcpy(&mac_address, netdev->dev_addr, netdev->addr_len);
	mac_address = mac_address >> 16;

	rxq_desc.fields.flags_len = IBMVETH_BUF_VALID | adapter->rx_queue.queue_len;
	rxq_desc.fields.address = adapter->rx_queue.queue_dma;

	ibmveth_debug_printk("buffer list @ 0x%p\n", adapter->buffer_list_addr);
	ibmveth_debug_printk("filter list @ 0x%p\n", adapter->filter_list_addr);
	ibmveth_debug_printk("receive q   @ 0x%p\n", adapter->rx_queue.queue_addr);

	h_vio_signal(adapter->vdev->unit_address, VIO_IRQ_DISABLE);

	lpar_rc = ibmveth_register_logical_lan(adapter, rxq_desc, mac_address);

	if(lpar_rc != H_SUCCESS) {
		ibmveth_error_printk("h_register_logical_lan failed with %ld\n", lpar_rc);
		ibmveth_error_printk("buffer TCE:0x%llx filter TCE:0x%llx rxq desc:0x%llx MAC:0x%llx\n",
				     adapter->buffer_list_dma,
				     adapter->filter_list_dma,
				     rxq_desc.desc,
				     mac_address);
		ibmveth_cleanup(adapter);
		napi_disable(&adapter->napi);
		return -ENONET;
	}

	for(i = 0; i<IbmVethNumBufferPools; i++) {
		if(!adapter->rx_buff_pool[i].active)
			continue;
		if (ibmveth_alloc_buffer_pool(&adapter->rx_buff_pool[i])) {
			ibmveth_error_printk("unable to alloc pool\n");
			adapter->rx_buff_pool[i].active = 0;
			ibmveth_cleanup(adapter);
			napi_disable(&adapter->napi);
			return -ENOMEM ;
		}
	}

	ibmveth_debug_printk("registering irq 0x%x\n", netdev->irq);
	if((rc = request_irq(netdev->irq, &ibmveth_interrupt, 0, netdev->name, netdev)) != 0) {
		ibmveth_error_printk("unable to request irq 0x%x, rc %d\n", netdev->irq, rc);
		do {
			rc = h_free_logical_lan(adapter->vdev->unit_address);
		} while (H_IS_LONG_BUSY(rc) || (rc == H_BUSY));

		ibmveth_cleanup(adapter);
		napi_disable(&adapter->napi);
		return rc;
	}

	adapter->bounce_buffer =
	    kmalloc(netdev->mtu + IBMVETH_BUFF_OH, GFP_KERNEL);
	if (!adapter->bounce_buffer) {
		ibmveth_error_printk("unable to allocate bounce buffer\n");
		ibmveth_cleanup(adapter);
		napi_disable(&adapter->napi);
		return -ENOMEM;
	}
	adapter->bounce_buffer_dma =
	    dma_map_single(&adapter->vdev->dev, adapter->bounce_buffer,
			   netdev->mtu + IBMVETH_BUFF_OH, DMA_BIDIRECTIONAL);
	if (dma_mapping_error(dev, adapter->bounce_buffer_dma)) {
		ibmveth_error_printk("unable to map bounce buffer\n");
		ibmveth_cleanup(adapter);
		napi_disable(&adapter->napi);
		return -ENOMEM;
	}

	ibmveth_debug_printk("initial replenish cycle\n");
	ibmveth_interrupt(netdev->irq, netdev);

	netif_start_queue(netdev);

	ibmveth_debug_printk("open complete\n");

	return 0;
}
Exemplo n.º 30
0
static int __wil_up(struct wil6210_priv *wil)
{
	struct net_device *ndev = wil_to_ndev(wil);
	struct wireless_dev *wdev = wil->wdev;

	int rc;
	int bi;
	u16 wmi_nettype = wil_iftype_nl2wmi(wdev->iftype);

	rc = wil_reset(wil);
	if (rc)
		return rc;

	/* Rx VRING. After MAC and beacon */
	rc = wil_rx_init(wil);
	if (rc)
		return rc;

	/* FIXME Firmware works now in PBSS mode(ToDS=0, FromDS=0) */
	wmi_nettype = wil_iftype_nl2wmi(NL80211_IFTYPE_ADHOC);
	switch (wdev->iftype) {
	case NL80211_IFTYPE_STATION:
		wil_dbg_misc(wil, "type: STATION\n");
		bi = 0;
		ndev->type = ARPHRD_ETHER;
		break;
	case NL80211_IFTYPE_AP:
		wil_dbg_misc(wil, "type: AP\n");
		bi = 100;
		ndev->type = ARPHRD_ETHER;
		break;
	case NL80211_IFTYPE_P2P_CLIENT:
		wil_dbg_misc(wil, "type: P2P_CLIENT\n");
		bi = 0;
		ndev->type = ARPHRD_ETHER;
		break;
	case NL80211_IFTYPE_P2P_GO:
		wil_dbg_misc(wil, "type: P2P_GO\n");
		bi = 100;
		ndev->type = ARPHRD_ETHER;
		break;
	case NL80211_IFTYPE_MONITOR:
		wil_dbg_misc(wil, "type: Monitor\n");
		bi = 0;
		ndev->type = ARPHRD_IEEE80211_RADIOTAP;
		/* ARPHRD_IEEE80211 or ARPHRD_IEEE80211_RADIOTAP ? */
		break;
	default:
		return -EOPNOTSUPP;
	}

	/* Apply profile in the following order: */
	/* SSID and channel for the AP */
	switch (wdev->iftype) {
	case NL80211_IFTYPE_AP:
	case NL80211_IFTYPE_P2P_GO:
		if (wdev->ssid_len > 0 &&
            wdev->ssid_len <= IEEE80211_MAX_SSID_LEN) {
			rc = wmi_set_ssid(wil, wdev->ssid_len, wdev->ssid);
			if (rc)
				return rc;				
		}
		break;
	default:
		break;
	}

	/* MAC address - pre-requisite for other commands */
	wmi_set_mac_address(wil, ndev->dev_addr);

	/* Set up beaconing if required. */
#if 0 	
	if (bi > 0) {

		 rc = wmi_pcp_start(wil, bi, wmi_nettype,
				   (channel ? channel->hw_value : 0));
		if (rc)
			return rc;
	} 
#endif
	napi_enable(&wil->napi_rx);
	napi_enable(&wil->napi_tx);

	return 0;
}