예제 #1
0
static int epic_open(struct net_device *dev)
{
	struct epic_private *ep = dev->priv;
	long ioaddr = dev->base_addr;
	int i;
	int retval;

	/* Soft reset the chip. */
	outl(0x4001, ioaddr + GENCTL);

	napi_enable(&ep->napi);
	if ((retval = request_irq(dev->irq, &epic_interrupt, IRQF_SHARED, dev->name, dev))) {
		napi_disable(&ep->napi);
		return retval;
	}

	epic_init_ring(dev);

	outl(0x4000, ioaddr + GENCTL);
	/* This magic is documented in SMSC app note 7.15 */
	for (i = 16; i > 0; i--)
		outl(0x0008, ioaddr + TEST1);

	/* Pull the chip out of low-power mode, enable interrupts, and set for
	   PCI read multiple.  The MIIcfg setting and strange write order are
	   required by the details of which bits are reset and the transceiver
	   wiring on the Ositech CardBus card.
	*/
#if 0
	outl(dev->if_port == 1 ? 0x13 : 0x12, ioaddr + MIICfg);
#endif
	if (ep->chip_flags & MII_PWRDWN)
		outl((inl(ioaddr + NVCTL) & ~0x003C) | 0x4800, ioaddr + NVCTL);

#if defined(__powerpc__) || defined(__sparc__)		/* Big endian */
	outl(0x4432 | (RX_FIFO_THRESH<<8), ioaddr + GENCTL);
	inl(ioaddr + GENCTL);
	outl(0x0432 | (RX_FIFO_THRESH<<8), ioaddr + GENCTL);
#else
	outl(0x4412 | (RX_FIFO_THRESH<<8), ioaddr + GENCTL);
	inl(ioaddr + GENCTL);
	outl(0x0412 | (RX_FIFO_THRESH<<8), ioaddr + GENCTL);
#endif

	udelay(20); /* Looks like EPII needs that if you want reliable RX init. FIXME: pci posting bug? */

	for (i = 0; i < 3; i++)
		outl(cpu_to_le16(((u16*)dev->dev_addr)[i]), ioaddr + LAN0 + i*4);

	ep->tx_threshold = TX_FIFO_THRESH;
	outl(ep->tx_threshold, ioaddr + TxThresh);

	if (media2miictl[dev->if_port & 15]) {
		if (ep->mii_phy_cnt)
			mdio_write(dev, ep->phys[0], MII_BMCR, media2miictl[dev->if_port&15]);
		if (dev->if_port == 1) {
			if (debug > 1)
				printk(KERN_INFO "%s: Using the 10base2 transceiver, MII "
					   "status %4.4x.\n",
					   dev->name, mdio_read(dev, ep->phys[0], MII_BMSR));
		}
	} else {
		int mii_lpa = mdio_read(dev, ep->phys[0], MII_LPA);
		if (mii_lpa != 0xffff) {
			if ((mii_lpa & LPA_100FULL) || (mii_lpa & 0x01C0) == LPA_10FULL)
				ep->mii.full_duplex = 1;
			else if (! (mii_lpa & LPA_LPACK))
				mdio_write(dev, ep->phys[0], MII_BMCR, BMCR_ANENABLE|BMCR_ANRESTART);
			if (debug > 1)
				printk(KERN_INFO "%s: Setting %s-duplex based on MII xcvr %d"
					   " register read of %4.4x.\n", dev->name,
					   ep->mii.full_duplex ? "full" : "half",
					   ep->phys[0], mii_lpa);
		}
	}

	outl(ep->mii.full_duplex ? 0x7F : 0x79, ioaddr + TxCtrl);
	outl(ep->rx_ring_dma, ioaddr + PRxCDAR);
	outl(ep->tx_ring_dma, ioaddr + PTxCDAR);

	/* Start the chip's Rx process. */
	set_rx_mode(dev);
	outl(StartRx | RxQueued, ioaddr + COMMAND);

	netif_start_queue(dev);

	/* Enable interrupts by setting the interrupt mask. */
	outl((ep->chip_flags & TYPE2_INTR ? PCIBusErr175 : PCIBusErr170)
		 | CntFull | TxUnderrun
		 | RxError | RxHeader | EpicNapiEvent, ioaddr + INTMASK);

	if (debug > 1)
		printk(KERN_DEBUG "%s: epic_open() ioaddr %lx IRQ %d status %4.4x "
			   "%s-duplex.\n",
			   dev->name, ioaddr, dev->irq, (int)inl(ioaddr + GENCTL),
			   ep->mii.full_duplex ? "full" : "half");

	/* Set the timer to switch to check for link beat and perhaps switch
	   to an alternate media type. */
	init_timer(&ep->timer);
	ep->timer.expires = jiffies + 3*HZ;
	ep->timer.data = (unsigned long)dev;
	ep->timer.function = &epic_timer;				/* timer handler */
	add_timer(&ep->timer);

	return 0;
}
예제 #2
0
static int qos_del_qdisc(ASF_uint32_t  ulVsgId,
			ASFQOSDeleteQdisc_t *qdisc)
{
	struct  asf_qdisc *root;
	uint32_t i;
	int	bLockFlag;

	root = qdisc->dev->asf_qdisc;
	if (!root) {
		asf_err("Qdisc not exists.\n");
		return ASFQOS_FAILURE;
	}

	if (qdisc->qdisc_type != ASF_QDISC_TBF) {
		asf_err("Unsupported Qdisc Delete Command.\n");
		return ASFQOS_SUCCESS;
	}
	ASF_RCU_READ_LOCK(bLockFlag);
	/* If root Qdisc is TBF then simply delete Shaper */
	if (root->qdisc_type == ASF_QDISC_TBF) {
		root->dev->asf_qdisc = NULL;
		kfree(root->priv);
		del_timer(&(root->timer));
		/* NAPI */
		napi_disable(&(root->qos_napi));
		netif_napi_del(&(root->qos_napi));

		if (root->pShaper)
			kfree(root->pShaper);
		for (i = 0; i < ASF_MAX_IFACES; i++) {
			if (qdisc_in_use[i] == root) {
				spin_lock(&cnt_lock);
				qdisc_in_use[i] = NULL;
				qdisc_cnt--;
				spin_unlock(&cnt_lock);
				asf_debug("Deleted Qdisc at index %d, qdisc_cnt %d\n",
					i, qdisc_cnt);
				break;
			}
		}
		kfree(root);
	} else {
		/* If Root is not TBF , it means we have
		configured any SCHEDULER over SHAPER */
		if (qdisc->parent != ROOT_ID) {
			/* Should be Queue Level Shaper */
			switch (root->qdisc_type) {
			case ASF_QDISC_PRIO:
			case ASF_QDISC_PRIO_DRR:
			case ASF_QDISC_DRR:
			{
				struct  asf_prio_sched_data *root_priv;
				/* Find out the Queue, to which shaper need to apply */
				i = qdisc->parent & MINOR_ID;
				i--; /* Index value */

				/* Deleting Per Queue Shaper */
				root_priv = root->priv;
				if (root_priv->q[i].shaper) {
					root_priv->q[i].shaper =  NULL;
					kfree(root_priv->q[i].shaper);
				}

			}
			break;
			default:
				asf_err("Ohh.., Unsupported Parent Qdisc\n");
			}

			ASF_RCU_READ_UNLOCK(bLockFlag);
			return ASFQOS_SUCCESS;
		}
		/* ELSE Request for Deleting Root TBF Shaper */
		/* Delete Inactive Shaper Qdisc */
		for (i = 0; i < ASF_MAX_IFACES; i++) {
			struct  asf_qdisc *x = qdisc_in_use[i];
			if (!x)
				continue;
			if ((x->handle == qdisc->handle) &&
					(x->dev == qdisc->dev)) {
				spin_lock(&cnt_lock);
				qdisc_in_use[i] = NULL;
				qdisc_cnt--;
				spin_unlock(&cnt_lock);
				asf_debug("Deleted Qdisc at index %d, qdisc_cnt %d\n",
					i, qdisc_cnt);
				kfree(x);
				break;
			}
		}
		/* Now delete Child Qdisc */
		qos_flush_qdisc(ulVsgId, qdisc);
	}
	ASF_RCU_READ_UNLOCK(bLockFlag);
	return ASFQOS_SUCCESS;
}
예제 #3
0
static int qos_flush_qdisc(ASF_uint32_t  ulVsgId,
			ASFQOSDeleteQdisc_t *qdisc)
{
	struct  asf_qdisc *root;
	int	i;
	int	bLockFlag;

	/* Root Qdisc  */
	root = qdisc->dev->asf_qdisc;
	if (!root) {
		asf_err("Qdisc not exists.\n");
		return ASFQOS_SUCCESS;
	}

	ASF_RCU_READ_LOCK(bLockFlag);

	qdisc->dev->asf_qdisc = NULL;

	/* Destroying Shaper */
	switch (root->qdisc_type) {
	case ASF_QDISC_PRIO_DRR:
	case ASF_QDISC_DRR:
	case ASF_QDISC_PRIO:
	{
		struct  asf_prio_sched_data *root_priv;

		root_priv = root->priv;
		for (i = 0; i < ASF_PRIO_MAX; i++) {
			if (root_priv->q[i].shaper)
				kfree(root_priv->q[i].shaper);
		}

	}
	break;
	default:
		asf_err("Ohh.., Unsupported Parent Qdisc\n");
	}

	kfree(root->priv);
	del_timer(&(root->timer));
	/* NAPI */
	napi_disable(&(root->qos_napi));
	netif_napi_del(&(root->qos_napi));

	for (i = 0; i < ASF_MAX_IFACES; i++) {
		if (qdisc_in_use[i] == root) {
			spin_lock(&cnt_lock);
			qdisc_in_use[i] = NULL;
			qdisc_cnt--;
			spin_unlock(&cnt_lock);
			asf_debug("Deleted Qdisc at index %d, qdisc_cnt %d\n",
					i, qdisc_cnt);
			break;
		}
	}
	/* IF Port Shaper exists, Set it as Root Qdisc */
	for (i = 0; i < ASF_MAX_IFACES; i++) {
		struct  asf_qdisc *x = qdisc_in_use[i];
		if (x && (x->dev == qdisc->dev)) {
			/* Configure De-queue NAPI */
			netif_napi_add(x->dev, &(x->qos_napi),
					prio_tx_napi, qos_budget);
			napi_enable(&(x->qos_napi));

			setup_timer(&x->timer, timer_handler,
					(unsigned long)x);
			qdisc->dev->asf_qdisc = x;
			break;
		}
	}

	kfree(root);
	ASF_RCU_READ_UNLOCK(bLockFlag);

	return ASFQOS_SUCCESS;
}
예제 #4
0
static int cpmac_poll(struct napi_struct *napi, int budget)
{
	struct sk_buff *skb;
	struct cpmac_desc *desc, *restart;
	struct cpmac_priv *priv = container_of(napi, struct cpmac_priv, napi);
	int received = 0, processed = 0;

	spin_lock(&priv->rx_lock);
	if (unlikely(!priv->rx_head)) {
		if (netif_msg_rx_err(priv) && net_ratelimit())
			printk(KERN_WARNING "%s: rx: polling, but no queue\n",
			       priv->dev->name);
		spin_unlock(&priv->rx_lock);
		napi_complete(napi);
		return 0;
	}

	desc = priv->rx_head;
	restart = NULL;
	while (((desc->dataflags & CPMAC_OWN) == 0) && (received < budget)) {
		processed++;

		if ((desc->dataflags & CPMAC_EOQ) != 0) {
			/* The last update to eoq->hw_next didn't happen
			* soon enough, and the receiver stopped here.
			*Remember this descriptor so we can restart
			* the receiver after freeing some space.
			*/
			if (unlikely(restart)) {
				if (netif_msg_rx_err(priv))
					printk(KERN_ERR "%s: poll found a"
						" duplicate EOQ: %p and %p\n",
						priv->dev->name, restart, desc);
				goto fatal_error;
			}

			restart = desc->next;
		}

		skb = cpmac_rx_one(priv, desc);
		if (likely(skb)) {
			netif_receive_skb(skb);
			received++;
		}
		desc = desc->next;
	}

	if (desc != priv->rx_head) {
		/* We freed some buffers, but not the whole ring,
		 * add what we did free to the rx list */
		desc->prev->hw_next = (u32)0;
		priv->rx_head->prev->hw_next = priv->rx_head->mapping;
	}

	/* Optimization: If we did not actually process an EOQ (perhaps because
	 * of quota limits), check to see if the tail of the queue has EOQ set.
	* We should immediately restart in that case so that the receiver can
	* restart and run in parallel with more packet processing.
	* This lets us handle slightly larger bursts before running
	* out of ring space (assuming dev->weight < ring_size) */

	if (!restart &&
	     (priv->rx_head->prev->dataflags & (CPMAC_OWN|CPMAC_EOQ))
		    == CPMAC_EOQ &&
	     (priv->rx_head->dataflags & CPMAC_OWN) != 0) {
		/* reset EOQ so the poll loop (above) doesn't try to
		* restart this when it eventually gets to this descriptor.
		*/
		priv->rx_head->prev->dataflags &= ~CPMAC_EOQ;
		restart = priv->rx_head;
	}

	if (restart) {
		priv->dev->stats.rx_errors++;
		priv->dev->stats.rx_fifo_errors++;
		if (netif_msg_rx_err(priv) && net_ratelimit())
			printk(KERN_WARNING "%s: rx dma ring overrun\n",
			       priv->dev->name);

		if (unlikely((restart->dataflags & CPMAC_OWN) == 0)) {
			if (netif_msg_drv(priv))
				printk(KERN_ERR "%s: cpmac_poll is trying to "
					"restart rx from a descriptor that's "
					"not free: %p\n",
					priv->dev->name, restart);
				goto fatal_error;
		}

		cpmac_write(priv->regs, CPMAC_RX_PTR(0), restart->mapping);
	}

	priv->rx_head = desc;
	spin_unlock(&priv->rx_lock);
	if (unlikely(netif_msg_rx_status(priv)))
		printk(KERN_DEBUG "%s: poll processed %d packets\n",
		       priv->dev->name, received);
	if (processed == 0) {
		/* we ran out of packets to read,
		 * revert to interrupt-driven mode */
		napi_complete(napi);
		cpmac_write(priv->regs, CPMAC_RX_INT_ENABLE, 1);
		return 0;
	}

	return 1;

fatal_error:
	/* Something went horribly wrong.
	 * Reset hardware to try to recover rather than wedging. */

	if (netif_msg_drv(priv)) {
		printk(KERN_ERR "%s: cpmac_poll is confused. "
				"Resetting hardware\n", priv->dev->name);
		cpmac_dump_all_desc(priv->dev);
		printk(KERN_DEBUG "%s: RX_PTR(0)=0x%08x RX_ACK(0)=0x%08x\n",
			priv->dev->name,
			cpmac_read(priv->regs, CPMAC_RX_PTR(0)),
			cpmac_read(priv->regs, CPMAC_RX_ACK(0)));
	}

	spin_unlock(&priv->rx_lock);
	napi_complete(napi);
	netif_tx_stop_all_queues(priv->dev);
	napi_disable(&priv->napi);

	atomic_inc(&priv->reset_pending);
	cpmac_hw_stop(priv->dev);
	if (!schedule_work(&priv->reset_work))
		atomic_dec(&priv->reset_pending);
	return 0;

}
예제 #5
0
static int qos_create_sch(ASF_uint32_t  ulVsgId,
			ASFQOSCreateQdisc_t *qdisc)
{
	struct  asf_qdisc *prio_root, *root =
			qdisc->dev->asf_qdisc;
	int i, replace_qdisc = 0;

	if (root) {
		if ((qdisc->parent == ROOT_ID) ||
			(root->qdisc_type != ASF_QDISC_TBF)) {
			asf_err("Root Qdisc already exists on dev %s\n",
						qdisc->dev->name);
			return ASFQOS_FAILURE;
		} else
			replace_qdisc = 1;
	}
	if (qdisc_cnt  >= ASF_MAX_IFACES) {
		asf_err("NO more Qdisc supported: limit[%d] reached\n",
							ASF_MAX_IFACES);
		return ASFQOS_FAILURE;
	}
	/* Now allocate Root Qdisc  */
	prio_root = (struct asf_qdisc *)
		kzalloc(sizeof(struct  asf_qdisc), GFP_KERNEL);
	if (NULL == prio_root) {
		asf_err("OHHHH   NO Memory for Root Qdisc\n");
		return ASFQOS_FAILURE;
	}
	/* fill up the structure data */
	prio_root->enqueue = qos_enqueue;
	prio_root->dequeue = qos_dequeue;
	prio_root->qdisc_type = qdisc->qdisc_type;
	prio_root->handle = qdisc->handle;
	prio_root->parent = qdisc->parent;
	prio_root->state = SCH_READY;
	prio_root->dev = qdisc->dev;
	prio_root->pShaper = NULL;

	switch (qdisc->qdisc_type) {
	case ASF_QDISC_PRIO:
	{
		struct  asf_prio_sched_data *prio_priv;

		prio_priv = (struct  asf_prio_sched_data *)
				kzalloc(sizeof(struct  asf_prio_sched_data),
				GFP_KERNEL);
		if (NULL == prio_priv) {
			asf_err("OHHHH   NO Memory for PRIV\n");
			kfree(prio_root);
			return ASFQOS_FAILURE;
		}

		prio_priv->bands = qdisc->u.prio.bands;
		for (i = 0; i < ASF_PRIO_MAX; i++) {
			prio_priv->q[i].head = NULL;
			prio_priv->q[i].tail = NULL;
			prio_priv->q[i].queue_size = 0;
			prio_priv->q[i].max_queue_size = queue_len;
			prio_priv->q[i].shaper = NULL;


			spin_lock_init(&(prio_priv->q[i].lock));
		}
		prio_root->priv = prio_priv;

		/* Configure De-queue NAPI */
		netif_napi_add(qdisc->dev, &(prio_root->qos_napi),
					prio_tx_napi, qos_budget);
		napi_enable(&(prio_root->qos_napi));

		setup_timer(&prio_root->timer, timer_handler,
					(unsigned long)prio_root);
	}
	break;

	case ASF_QDISC_PRIO_DRR:
	case ASF_QDISC_DRR:
	{
		struct  asf_prio_drr_sched_data *prio_priv;

		prio_priv = (struct  asf_prio_drr_sched_data *)
				kzalloc(sizeof(struct  asf_prio_drr_sched_data),
				GFP_KERNEL);
		if (NULL == prio_priv) {
			asf_err("OHHHH   NO Memory for PRIV\n");
			kfree(prio_root);
			return ASFQOS_FAILURE;
		}

		prio_priv->bands = 0;
		for (i = 0; i < ASF_PRIO_MAX; i++) {
			prio_priv->q[i].head = NULL;
			prio_priv->q[i].tail = NULL;
			prio_priv->q[i].queue_size = 0;
			prio_priv->q[i].max_queue_size = queue_len;
			prio_priv->q[i].shaper = NULL;
			prio_priv->q[i].classid = 0;
			spin_lock_init(&(prio_priv->q[i].lock));
		}
		prio_root->priv = prio_priv;
		/* Configure De-queue NAPI */
		netif_napi_add(qdisc->dev, &(prio_root->qos_napi),
					prio_drr_tx_napi, qos_budget);
		napi_enable(&(prio_root->qos_napi));

		setup_timer(&prio_root->timer, timer_handler,
					(unsigned long)prio_root);
	}
	break;
	default:
		asf_err("OHHHH, INVALID Scheduler Qdisc Type\n");
		kfree(prio_root);
		return ASFQOS_FAILURE;
	}

	/* Telling net_device to use this root qdisc */
	if (replace_qdisc) {
		prio_root->pShaper = root->pShaper;
		del_timer(&(root->timer));
		/* NAPI */
		napi_disable(&(root->qos_napi));
		netif_napi_del(&(root->qos_napi));
	}
	prio_root->dev->asf_qdisc = prio_root;

	for (i = 0; i < ASF_MAX_IFACES; i++) {
		if (qdisc_in_use[i] == NULL) {
			spin_lock(&cnt_lock);
			qdisc_in_use[i] = prio_root;
			qdisc_cnt++;
			spin_unlock(&cnt_lock);
			break;
		}
	}
	asf_debug("CPU [%d]:ASF PRIO[%d][%s]: handle = 0x%X\n parent = 0x%X,"
			" bands = %d\n", smp_processor_id(), qdisc->qdisc_type,
			qdisc->dev->name, qdisc->handle,
			qdisc->parent, qdisc->u.prio.bands);

	return 0;
}
예제 #6
0
static int fjes_change_mtu(struct net_device *netdev, int new_mtu)
{
	struct fjes_adapter *adapter = netdev_priv(netdev);
	bool running = netif_running(netdev);
	struct fjes_hw *hw = &adapter->hw;
	unsigned long flags;
	int ret = -EINVAL;
	int idx, epidx;

	for (idx = 0; fjes_support_mtu[idx] != 0; idx++) {
		if (new_mtu <= fjes_support_mtu[idx]) {
			new_mtu = fjes_support_mtu[idx];
			if (new_mtu == netdev->mtu)
				return 0;

			ret = 0;
			break;
		}
	}

	if (ret)
		return ret;

	if (running) {
		spin_lock_irqsave(&hw->rx_status_lock, flags);
		for (epidx = 0; epidx < hw->max_epid; epidx++) {
			if (epidx == hw->my_epid)
				continue;
			hw->ep_shm_info[epidx].tx.info->v1i.rx_status &=
				~FJES_RX_MTU_CHANGING_DONE;
		}
		spin_unlock_irqrestore(&hw->rx_status_lock, flags);

		netif_tx_stop_all_queues(netdev);
		netif_carrier_off(netdev);
		cancel_work_sync(&adapter->tx_stall_task);
		napi_disable(&adapter->napi);

		msleep(1000);

		netif_tx_stop_all_queues(netdev);
	}

	netdev->mtu = new_mtu;

	if (running) {
		spin_lock_irqsave(&hw->rx_status_lock, flags);
		for (epidx = 0; epidx < hw->max_epid; epidx++) {
			if (epidx == hw->my_epid)
				continue;

			spin_lock_irqsave(&hw->rx_status_lock, flags);
			fjes_hw_setup_epbuf(&hw->ep_shm_info[epidx].tx,
					    netdev->dev_addr,
					    netdev->mtu);

			hw->ep_shm_info[epidx].tx.info->v1i.rx_status |=
				FJES_RX_MTU_CHANGING_DONE;
			spin_unlock_irqrestore(&hw->rx_status_lock, flags);
		}

		netif_tx_wake_all_queues(netdev);
		netif_carrier_on(netdev);
		napi_enable(&adapter->napi);
		napi_schedule(&adapter->napi);
	}

	return ret;
}
예제 #7
0
void drv_close()
{
	dev_down();
    napi_disable();
	free_irq();
}
예제 #8
0
static int eth_close(struct net_device *dev)
{
	struct port *port = netdev_priv(dev);
	struct msg msg;
	int buffs = RX_DESCS; 
	int i;

	ports_open--;
	qmgr_disable_irq(port->plat->rxq);
	napi_disable(&port->napi);
	netif_stop_queue(dev);

	while (queue_get_desc(RXFREE_QUEUE(port->id), port, 0) >= 0)
		buffs--;

	memset(&msg, 0, sizeof(msg));
	msg.cmd = NPE_SETLOOPBACK_MODE;
	msg.eth_id = port->id;
	msg.byte3 = 1;
	if (npe_send_recv_message(port->npe, &msg, "ETH_ENABLE_LOOPBACK"))
		printk(KERN_CRIT "%s: unable to enable loopback\n", dev->name);

	i = 0;
	do {			
		while (queue_get_desc(port->plat->rxq, port, 0) >= 0)
			buffs--;
		if (!buffs)
			break;
		if (qmgr_stat_empty(TX_QUEUE(port->id))) {
			
			struct desc *desc;
			u32 phys;
			int n = queue_get_desc(port->plat->txreadyq, port, 1);
			BUG_ON(n < 0);
			desc = tx_desc_ptr(port, n);
			phys = tx_desc_phys(port, n);
			desc->buf_len = desc->pkt_len = 1;
			wmb();
			queue_put_desc(TX_QUEUE(port->id), phys, desc);
		}
		udelay(1);
	} while (++i < MAX_CLOSE_WAIT);

	if (buffs)
		printk(KERN_CRIT "%s: unable to drain RX queue, %i buffer(s)"
		       " left in NPE\n", dev->name, buffs);
#if DEBUG_CLOSE
	if (!buffs)
		printk(KERN_DEBUG "Draining RX queue took %i cycles\n", i);
#endif

	buffs = TX_DESCS;
	while (queue_get_desc(TX_QUEUE(port->id), port, 1) >= 0)
		buffs--; 

	i = 0;
	do {
		while (queue_get_desc(port->plat->txreadyq, port, 1) >= 0)
			buffs--;
		if (!buffs)
			break;
	} while (++i < MAX_CLOSE_WAIT);

	if (buffs)
		printk(KERN_CRIT "%s: unable to drain TX queue, %i buffer(s) "
		       "left in NPE\n", dev->name, buffs);
#if DEBUG_CLOSE
	if (!buffs)
		printk(KERN_DEBUG "Draining TX queues took %i cycles\n", i);
#endif

	msg.byte3 = 0;
	if (npe_send_recv_message(port->npe, &msg, "ETH_DISABLE_LOOPBACK"))
		printk(KERN_CRIT "%s: unable to disable loopback\n",
		       dev->name);

	phy_stop(port->phydev);

	if (!ports_open)
		qmgr_disable_irq(TXDONE_QUEUE);
	destroy_queues(port);
	release_queues(port);
	return 0;
}
예제 #9
0
void drv_close() { int thread_id = corral_getThreadID();
  dev_down();
  napi_disable();
  free_irq();
}
예제 #10
0
파일: ibmveth.c 프로젝트: 3null/fastsocket
static int ibmveth_open(struct net_device *netdev)
{
	struct ibmveth_adapter *adapter = netdev_priv(netdev);
	u64 mac_address = 0;
	int rxq_entries = 1;
	unsigned long lpar_rc;
	int rc;
	union ibmveth_buf_desc rxq_desc;
	int i;
	struct device *dev;

	netdev_dbg(netdev, "open starting\n");

	napi_enable(&adapter->napi);

	for(i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++)
		rxq_entries += adapter->rx_buff_pool[i].size;

	adapter->buffer_list_addr = (void*) get_zeroed_page(GFP_KERNEL);
	adapter->filter_list_addr = (void*) get_zeroed_page(GFP_KERNEL);

	if (!adapter->buffer_list_addr || !adapter->filter_list_addr) {
		netdev_err(netdev, "unable to allocate filter or buffer list "
			   "pages\n");
		rc = -ENOMEM;
		goto err_out;
	}

	adapter->rx_queue.queue_len = sizeof(struct ibmveth_rx_q_entry) *
						rxq_entries;
	adapter->rx_queue.queue_addr = kmalloc(adapter->rx_queue.queue_len,
						GFP_KERNEL);

	if (!adapter->rx_queue.queue_addr) {
		netdev_err(netdev, "unable to allocate rx queue pages\n");
		rc = -ENOMEM;
		goto err_out;
	}

	dev = &adapter->vdev->dev;

	adapter->buffer_list_dma = dma_map_single(dev,
			adapter->buffer_list_addr, 4096, DMA_BIDIRECTIONAL);
	adapter->filter_list_dma = dma_map_single(dev,
			adapter->filter_list_addr, 4096, DMA_BIDIRECTIONAL);
	adapter->rx_queue.queue_dma = dma_map_single(dev,
			adapter->rx_queue.queue_addr,
			adapter->rx_queue.queue_len, DMA_BIDIRECTIONAL);

	if ((dma_mapping_error(dev, adapter->buffer_list_dma)) ||
	    (dma_mapping_error(dev, adapter->filter_list_dma)) ||
	    (dma_mapping_error(dev, adapter->rx_queue.queue_dma))) {
		netdev_err(netdev, "unable to map filter or buffer list "
			   "pages\n");
		rc = -ENOMEM;
		goto err_out;
	}

	adapter->rx_queue.index = 0;
	adapter->rx_queue.num_slots = rxq_entries;
	adapter->rx_queue.toggle = 1;

	memcpy(&mac_address, netdev->dev_addr, netdev->addr_len);
	mac_address = mac_address >> 16;

	rxq_desc.fields.flags_len = IBMVETH_BUF_VALID |
					adapter->rx_queue.queue_len;
	rxq_desc.fields.address = adapter->rx_queue.queue_dma;

	netdev_dbg(netdev, "buffer list @ 0x%p\n", adapter->buffer_list_addr);
	netdev_dbg(netdev, "filter list @ 0x%p\n", adapter->filter_list_addr);
	netdev_dbg(netdev, "receive q   @ 0x%p\n", adapter->rx_queue.queue_addr);

	h_vio_signal(adapter->vdev->unit_address, VIO_IRQ_DISABLE);

	lpar_rc = ibmveth_register_logical_lan(adapter, rxq_desc, mac_address);

	if (lpar_rc != H_SUCCESS) {
		netdev_err(netdev, "h_register_logical_lan failed with %ld\n",
			   lpar_rc);
		netdev_err(netdev, "buffer TCE:0x%llx filter TCE:0x%llx rxq "
			   "desc:0x%llx MAC:0x%llx\n",
				     adapter->buffer_list_dma,
				     adapter->filter_list_dma,
				     rxq_desc.desc,
				     mac_address);
		rc = -ENONET;
		goto err_out;
	}

	for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++) {
		if (!adapter->rx_buff_pool[i].active)
			continue;
		if (ibmveth_alloc_buffer_pool(&adapter->rx_buff_pool[i])) {
			netdev_err(netdev, "unable to alloc pool\n");
			adapter->rx_buff_pool[i].active = 0;
			rc = -ENOMEM;
			goto err_out;
		}
	}

	netdev_dbg(netdev, "registering irq 0x%x\n", netdev->irq);
	rc = request_irq(netdev->irq, ibmveth_interrupt, 0, netdev->name,
			 netdev);
	if (rc != 0) {
		netdev_err(netdev, "unable to request irq 0x%x, rc %d\n",
			   netdev->irq, rc);
		do {
			rc = h_free_logical_lan(adapter->vdev->unit_address);
		} while (H_IS_LONG_BUSY(rc) || (rc == H_BUSY));

		goto err_out;
	}

	adapter->bounce_buffer =
	    kmalloc(netdev->mtu + IBMVETH_BUFF_OH, GFP_KERNEL);
	if (!adapter->bounce_buffer) {
		netdev_err(netdev, "unable to allocate bounce buffer\n");
		rc = -ENOMEM;
		goto err_out_free_irq;
	}
	adapter->bounce_buffer_dma =
	    dma_map_single(&adapter->vdev->dev, adapter->bounce_buffer,
			   netdev->mtu + IBMVETH_BUFF_OH, DMA_BIDIRECTIONAL);
	if (dma_mapping_error(dev, adapter->bounce_buffer_dma)) {
		netdev_err(netdev, "unable to map bounce buffer\n");
		rc = -ENOMEM;
		goto err_out_free_irq;
	}

	netdev_dbg(netdev, "initial replenish cycle\n");
	ibmveth_interrupt(netdev->irq, netdev);

	netif_start_queue(netdev);

	netdev_dbg(netdev, "open complete\n");

	return 0;

err_out_free_irq:
	free_irq(netdev->irq, netdev);
err_out:
	ibmveth_cleanup(adapter);
	napi_disable(&adapter->napi);
	return rc;
}