コード例 #1
0
ファイル: dl2k.c プロジェクト: 19Dan01/linux
static int
rio_open (struct net_device *dev)
{
	struct netdev_private *np = netdev_priv(dev);
	void __iomem *ioaddr = np->ioaddr;
	const int irq = np->pdev->irq;
	int i;
	u16 macctrl;

	i = request_irq(irq, rio_interrupt, IRQF_SHARED, dev->name, dev);
	if (i)
		return i;

	/* Reset all logic functions */
	dw16(ASICCtrl + 2,
	     GlobalReset | DMAReset | FIFOReset | NetworkReset | HostReset);
	mdelay(10);

	/* DebugCtrl bit 4, 5, 9 must set */
	dw32(DebugCtrl, dr32(DebugCtrl) | 0x0230);

	/* Jumbo frame */
	if (np->jumbo != 0)
		dw16(MaxFrameSize, MAX_JUMBO+14);

	alloc_list (dev);

	/* Get station address */
	for (i = 0; i < 6; i++)
		dw8(StationAddr0 + i, dev->dev_addr[i]);

	set_multicast (dev);
	if (np->coalesce) {
		dw32(RxDMAIntCtrl, np->rx_coalesce | np->rx_timeout << 16);
	}
	/* Set RIO to poll every N*320nsec. */
	dw8(RxDMAPollPeriod, 0x20);
	dw8(TxDMAPollPeriod, 0xff);
	dw8(RxDMABurstThresh, 0x30);
	dw8(RxDMAUrgentThresh, 0x30);
	dw32(RmonStatMask, 0x0007ffff);
	/* clear statistics */
	clear_stats (dev);

	/* VLAN supported */
	if (np->vlan) {
		/* priority field in RxDMAIntCtrl  */
		dw32(RxDMAIntCtrl, dr32(RxDMAIntCtrl) | 0x7 << 10);
		/* VLANId */
		dw16(VLANId, np->vlan);
		/* Length/Type should be 0x8100 */
		dw32(VLANTag, 0x8100 << 16 | np->vlan);
		/* Enable AutoVLANuntagging, but disable AutoVLANtagging.
		   VLAN information tagged by TFC' VID, CFI fields. */
		dw32(MACCtrl, dr32(MACCtrl) | AutoVLANuntagging);
	}

	init_timer (&np->timer);
	np->timer.expires = jiffies + 1*HZ;
	np->timer.data = (unsigned long) dev;
	np->timer.function = rio_timer;
	add_timer (&np->timer);

	/* Start Tx/Rx */
	dw32(MACCtrl, dr32(MACCtrl) | StatsEnable | RxEnable | TxEnable);

	macctrl = 0;
	macctrl |= (np->vlan) ? AutoVLANuntagging : 0;
	macctrl |= (np->full_duplex) ? DuplexSelect : 0;
	macctrl |= (np->tx_flow) ? TxFlowControlEnable : 0;
	macctrl |= (np->rx_flow) ? RxFlowControlEnable : 0;
	dw16(MACCtrl, macctrl);

	netif_start_queue (dev);

	dl2k_enable_int(np);
	return 0;
}
コード例 #2
0
static int myri_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
	struct myri_eth *mp = netdev_priv(dev);
	struct sendq __iomem *sq = mp->sq;
	struct myri_txd __iomem *txd;
	unsigned long flags;
	unsigned int head, tail;
	int len, entry;
	u32 dma_addr;

	DTX(("myri_start_xmit: "));

	myri_tx(mp, dev);

	netif_stop_queue(dev);

	/* This is just to prevent multiple PIO reads for TX_BUFFS_AVAIL. */
	head = sbus_readl(&sq->head);
	tail = sbus_readl(&sq->tail);

	if (!TX_BUFFS_AVAIL(head, tail)) {
		DTX(("no buffs available, returning 1\n"));
		return 1;
	}

	spin_lock_irqsave(&mp->irq_lock, flags);

	DHDR(("xmit[skbdata(%p)]\n", skb->data));
#ifdef DEBUG_HEADER
	dump_ehdr_and_myripad(((unsigned char *) skb->data));
#endif

	/* XXX Maybe this can go as well. */
	len = skb->len;
	if (len & 3) {
		DTX(("len&3 "));
		len = (len + 4) & (~3);
	}

	entry = sbus_readl(&sq->tail);

	txd = &sq->myri_txd[entry];
	mp->tx_skbs[entry] = skb;

	/* Must do this before we sbus map it. */
	if (skb->data[MYRI_PAD_LEN] & 0x1) {
		sbus_writew(0xffff, &txd->addr[0]);
		sbus_writew(0xffff, &txd->addr[1]);
		sbus_writew(0xffff, &txd->addr[2]);
		sbus_writew(0xffff, &txd->addr[3]);
	} else {
		sbus_writew(0xffff, &txd->addr[0]);
		sbus_writew((skb->data[0] << 8) | skb->data[1], &txd->addr[1]);
		sbus_writew((skb->data[2] << 8) | skb->data[3], &txd->addr[2]);
		sbus_writew((skb->data[4] << 8) | skb->data[5], &txd->addr[3]);
	}

	dma_addr = dma_map_single(&mp->myri_op->dev, skb->data,
				  len, DMA_TO_DEVICE);
	sbus_writel(dma_addr, &txd->myri_gathers[0].addr);
	sbus_writel(len, &txd->myri_gathers[0].len);
	sbus_writel(1, &txd->num_sg);
	sbus_writel(KERNEL_CHANNEL, &txd->chan);
	sbus_writel(len, &txd->len);
	sbus_writel((u32)-1, &txd->csum_off);
	sbus_writel(0, &txd->csum_field);

	sbus_writel(NEXT_TX(entry), &sq->tail);
	DTX(("BangTheChip "));
	bang_the_chip(mp);

	DTX(("tbusy=0, returning 0\n"));
	netif_start_queue(dev);
	spin_unlock_irqrestore(&mp->irq_lock, flags);
	return 0;
}
コード例 #3
0
ファイル: usbnet.c プロジェクト: Lance0312/osdi-exercise
int usbnet_open (struct net_device *net)
{
	struct usbnet		*dev = netdev_priv(net);
	int			retval;
	struct driver_info	*info = dev->driver_info;

	if ((retval = usb_autopm_get_interface(dev->intf)) < 0) {
		if (netif_msg_ifup (dev))
			devinfo (dev,
				"resumption fail (%d) usbnet usb-%s-%s, %s",
				retval,
				dev->udev->bus->bus_name, dev->udev->devpath,
			info->description);
		goto done_nopm;
	}

	// put into "known safe" state
	if (info->reset && (retval = info->reset (dev)) < 0) {
		if (netif_msg_ifup (dev))
			devinfo (dev,
				"open reset fail (%d) usbnet usb-%s-%s, %s",
				retval,
				dev->udev->bus->bus_name, dev->udev->devpath,
			info->description);
		goto done;
	}

	// insist peer be connected
	if (info->check_connect && (retval = info->check_connect (dev)) < 0) {
		if (netif_msg_ifup (dev))
			devdbg (dev, "can't open; %d", retval);
		goto done;
	}

	/* start any status interrupt transfer */
	if (dev->interrupt) {
		retval = usb_submit_urb (dev->interrupt, GFP_KERNEL);
		if (retval < 0) {
			if (netif_msg_ifup (dev))
				deverr (dev, "intr submit %d", retval);
			goto done;
		}
	}

	netif_start_queue (net);
	if (netif_msg_ifup (dev)) {
		char	*framing;

		if (dev->driver_info->flags & FLAG_FRAMING_NC)
			framing = "NetChip";
		else if (dev->driver_info->flags & FLAG_FRAMING_GL)
			framing = "GeneSys";
		else if (dev->driver_info->flags & FLAG_FRAMING_Z)
			framing = "Zaurus";
		else if (dev->driver_info->flags & FLAG_FRAMING_RN)
			framing = "RNDIS";
		else if (dev->driver_info->flags & FLAG_FRAMING_AX)
			framing = "ASIX";
		else
			framing = "simple";

		devinfo (dev, "open: enable queueing "
				"(rx %d, tx %d) mtu %d %s framing",
			(int)RX_QLEN (dev), (int)TX_QLEN (dev), dev->net->mtu,
			framing);
	}

	// delay posting reads until we're fully open
	tasklet_schedule (&dev->bh);
	return retval;
done:
	usb_autopm_put_interface(dev->intf);
done_nopm:
	return retval;
}
コード例 #4
0
ファイル: u_ether.c プロジェクト: jekapaty/SebastianFM-kernel
static netdev_tx_t eth_start_xmit(struct sk_buff *skb,
                                  struct net_device *net)
{
    struct eth_dev		*dev = netdev_priv(net);
    int			length = skb->len;
    int			retval;
    struct usb_request	*req = NULL;
    unsigned long		flags;
    struct usb_ep		*in;
    u16			cdc_filter;

    spin_lock_irqsave(&dev->lock, flags);
    if (dev->port_usb) {
        in = dev->port_usb->in_ep;
        cdc_filter = dev->port_usb->cdc_filter;
    } else {
        in = NULL;
        cdc_filter = 0;
    }
    spin_unlock_irqrestore(&dev->lock, flags);

    if (!in) {
        dev_kfree_skb_any(skb);
        return NETDEV_TX_OK;
    }


    if (!is_promisc(cdc_filter)) {
        u8		*dest = skb->data;

        if (is_multicast_ether_addr(dest)) {
            u16	type;

            if (is_broadcast_ether_addr(dest))
                type = USB_CDC_PACKET_TYPE_BROADCAST;
            else
                type = USB_CDC_PACKET_TYPE_ALL_MULTICAST;
            if (!(cdc_filter & type)) {
                dev_kfree_skb_any(skb);
                return NETDEV_TX_OK;
            }
        }

    }

    spin_lock_irqsave(&dev->req_lock, flags);
    if (list_empty(&dev->tx_reqs)) {
        spin_unlock_irqrestore(&dev->req_lock, flags);
        return NETDEV_TX_BUSY;
    }

    req = container_of(dev->tx_reqs.next, struct usb_request, list);
    list_del(&req->list);


    if (list_empty(&dev->tx_reqs))
        netif_stop_queue(net);
    spin_unlock_irqrestore(&dev->req_lock, flags);

    if (dev->wrap) {
        unsigned long	flags;

        spin_lock_irqsave(&dev->lock, flags);
        if (dev->port_usb)
            skb = dev->wrap(dev->port_usb, skb);
        spin_unlock_irqrestore(&dev->lock, flags);
        if (!skb)
            goto drop;

        length = skb->len;
    }
    req->buf = skb->data;
    req->context = skb;
    req->complete = tx_complete;


    if (dev->port_usb->is_fixed &&
            length == dev->port_usb->fixed_in_len &&
            (length % in->maxpacket) == 0)
        req->zero = 0;
    else
        req->zero = 1;

    if (req->zero && !dev->zlp && (length % in->maxpacket) == 0)
        length++;

    req->length = length;


    if (gadget_is_dualspeed(dev->gadget) &&
            (dev->gadget->speed == USB_SPEED_HIGH)) {
        dev->tx_qlen++;
        if (dev->tx_qlen == qmult) {
            req->no_interrupt = 0;
            dev->tx_qlen = 0;
        } else {
            req->no_interrupt = 1;
        }
    } else {
        req->no_interrupt = 0;
    }

    retval = usb_ep_queue(in, req, GFP_ATOMIC);
    switch (retval) {
    default:
        DBG(dev, "tx queue err %d\n", retval);
        break;
    case 0:
        net->trans_start = jiffies;
    }

    if (retval) {
        dev_kfree_skb_any(skb);
drop:
        dev->net->stats.tx_dropped++;
        spin_lock_irqsave(&dev->req_lock, flags);
        if (list_empty(&dev->tx_reqs))
            netif_start_queue(net);
        list_add(&req->list, &dev->tx_reqs);
        spin_unlock_irqrestore(&dev->req_lock, flags);
    }
    return NETDEV_TX_OK;
}
コード例 #5
0
static int ks959_net_open(struct net_device *netdev)
{
	struct ks959_cb *kingsun = netdev_priv(netdev);
	int err = -ENOMEM;
	char hwname[16];

	/* At this point, urbs are NULL, and skb is NULL (see kingsun_probe) */
	kingsun->receiving = 0;

	/* Initialize for SIR to copy data directly into skb.  */
	kingsun->rx_unwrap_buff.in_frame = FALSE;
	kingsun->rx_unwrap_buff.state = OUTSIDE_FRAME;
	kingsun->rx_unwrap_buff.truesize = IRDA_SKB_MAX_MTU;
	kingsun->rx_unwrap_buff.skb = dev_alloc_skb(IRDA_SKB_MAX_MTU);
	if (!kingsun->rx_unwrap_buff.skb)
		goto free_mem;

	skb_reserve(kingsun->rx_unwrap_buff.skb, 1);
	kingsun->rx_unwrap_buff.head = kingsun->rx_unwrap_buff.skb->data;
	do_gettimeofday(&kingsun->rx_time);

	kingsun->rx_urb = usb_alloc_urb(0, GFP_KERNEL);
	if (!kingsun->rx_urb)
		goto free_mem;

	kingsun->tx_urb = usb_alloc_urb(0, GFP_KERNEL);
	if (!kingsun->tx_urb)
		goto free_mem;

	kingsun->speed_urb = usb_alloc_urb(0, GFP_KERNEL);
	if (!kingsun->speed_urb)
		goto free_mem;

	/* Initialize speed for dongle */
	kingsun->new_speed = 9600;
	err = ks959_change_speed(kingsun, 9600);
	if (err < 0)
		goto free_mem;

	/*
	 * Now that everything should be initialized properly,
	 * Open new IrLAP layer instance to take care of us...
	 */
	sprintf(hwname, "usb#%d", kingsun->usbdev->devnum);
	kingsun->irlap = irlap_open(netdev, &kingsun->qos, hwname);
	if (!kingsun->irlap) {
		err("ks959-sir: irlap_open failed");
		goto free_mem;
	}

	/* Start reception. Setup request already pre-filled in ks959_probe */
	usb_fill_control_urb(kingsun->rx_urb, kingsun->usbdev,
			     usb_rcvctrlpipe(kingsun->usbdev, 0),
			     (unsigned char *)kingsun->rx_setuprequest,
			     kingsun->rx_buf, KINGSUN_RCV_FIFO_SIZE,
			     ks959_rcv_irq, kingsun);
	kingsun->rx_urb->status = 0;
	err = usb_submit_urb(kingsun->rx_urb, GFP_KERNEL);
	if (err) {
		err("ks959-sir: first urb-submit failed: %d", err);
		goto close_irlap;
	}

	netif_start_queue(netdev);

	/* Situation at this point:
	   - all work buffers allocated
	   - urbs allocated and ready to fill
	   - max rx packet known (in max_rx)
	   - unwrap state machine initialized, in state outside of any frame
	   - receive request in progress
	   - IrLAP layer started, about to hand over packets to send
	 */

	return 0;

      close_irlap:
	irlap_close(kingsun->irlap);
      free_mem:
	usb_free_urb(kingsun->speed_urb);
	kingsun->speed_urb = NULL;
	usb_free_urb(kingsun->tx_urb);
	kingsun->tx_urb = NULL;
	usb_free_urb(kingsun->rx_urb);
	kingsun->rx_urb = NULL;
	if (kingsun->rx_unwrap_buff.skb) {
		kfree_skb(kingsun->rx_unwrap_buff.skb);
		kingsun->rx_unwrap_buff.skb = NULL;
		kingsun->rx_unwrap_buff.head = NULL;
	}
	return err;
}
コード例 #6
0
static int internal_dev_open(struct net_device *netdev)
{
    netif_start_queue(netdev);
    return 0;
}
コード例 #7
0
static int de600_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
	unsigned long flags;
	int	transmit_from;
	int	len;
	int	tickssofar;
	u8	*buffer = skb->data;
	int	i;

	if (free_tx_pages <= 0) {	/* Do timeouts, to avoid hangs. */
		tickssofar = jiffies - dev->trans_start;
		if (tickssofar < 5)
			return 1;
		/* else */
		printk(KERN_WARNING "%s: transmit timed out (%d), %s?\n", dev->name, tickssofar, "network cable problem");
		/* Restart the adapter. */
		spin_lock_irqsave(&de600_lock, flags);
		if (adapter_init(dev)) {
			spin_unlock_irqrestore(&de600_lock, flags);
			return 1;
		}
		spin_unlock_irqrestore(&de600_lock, flags);
	}

	/* Start real output */
	PRINTK(("de600_start_xmit:len=%d, page %d/%d\n", skb->len, tx_fifo_in, free_tx_pages));

	if ((len = skb->len) < RUNT)
		len = RUNT;

	spin_lock_irqsave(&de600_lock, flags);
	select_nic();
	tx_fifo[tx_fifo_in] = transmit_from = tx_page_adr(tx_fifo_in) - len;
	tx_fifo_in = (tx_fifo_in + 1) % TX_PAGES; /* Next free tx page */

	if(check_lost)
	{
		/* This costs about 40 instructions per packet... */
		de600_setup_address(NODE_ADDRESS, RW_ADDR);
		de600_read_byte(READ_DATA, dev);
		if (was_down || (de600_read_byte(READ_DATA, dev) != 0xde)) {
			if (adapter_init(dev)) {
				spin_unlock_irqrestore(&de600_lock, flags);
				return 1;
			}
		}
	}

	de600_setup_address(transmit_from, RW_ADDR);
	for (i = 0;  i < skb->len ; ++i, ++buffer)
		de600_put_byte(*buffer);
	for (; i < len; ++i)
		de600_put_byte(0);

	if (free_tx_pages-- == TX_PAGES) { /* No transmission going on */
		dev->trans_start = jiffies;
		netif_start_queue(dev); /* allow more packets into adapter */
		/* Send page and generate a faked interrupt */
		de600_setup_address(transmit_from, TX_ADDR);
		de600_put_command(TX_ENABLE);
	}
	else {
		if (free_tx_pages)
			netif_start_queue(dev);
		else
			netif_stop_queue(dev);
		select_prn();
	}
	spin_unlock_irqrestore(&de600_lock, flags);
	dev_kfree_skb(skb);
	return 0;
}
コード例 #8
0
static int c2_up(struct net_device *netdev)
{
	struct c2_port *c2_port = netdev_priv(netdev);
	struct c2_dev *c2dev = c2_port->c2dev;
	struct c2_element *elem;
	struct c2_rxp_hdr *rxp_hdr;
	struct in_device *in_dev;
	size_t rx_size, tx_size;
	int ret, i;
	unsigned int netimr0;

	if (netif_msg_ifup(c2_port))
		pr_debug("%s: enabling interface\n", netdev->name);

	
	c2_set_rxbufsize(c2_port);

	
	rx_size = c2_port->rx_ring.count * sizeof(struct c2_rx_desc);
	tx_size = c2_port->tx_ring.count * sizeof(struct c2_tx_desc);

	c2_port->mem_size = tx_size + rx_size;
	c2_port->mem = pci_alloc_consistent(c2dev->pcidev, c2_port->mem_size,
					    &c2_port->dma);
	if (c2_port->mem == NULL) {
		pr_debug("Unable to allocate memory for "
			"host descriptor rings\n");
		return -ENOMEM;
	}

	memset(c2_port->mem, 0, c2_port->mem_size);

	
	if ((ret =
	     c2_rx_ring_alloc(&c2_port->rx_ring, c2_port->mem, c2_port->dma,
			      c2dev->mmio_rxp_ring))) {
		pr_debug("Unable to create RX ring\n");
		goto bail0;
	}

	
	if (c2_rx_fill(c2_port)) {
		pr_debug("Unable to fill RX ring\n");
		goto bail1;
	}

	
	if ((ret = c2_tx_ring_alloc(&c2_port->tx_ring, c2_port->mem + rx_size,
				    c2_port->dma + rx_size,
				    c2dev->mmio_txp_ring))) {
		pr_debug("Unable to create TX ring\n");
		goto bail1;
	}

	
	c2_port->tx_avail = c2_port->tx_ring.count - 1;
	c2_port->tx_ring.to_use = c2_port->tx_ring.to_clean =
	    c2_port->tx_ring.start + c2dev->cur_tx;

	

	BUG_ON(c2_port->tx_ring.to_use != c2_port->tx_ring.to_clean);

	
	c2_reset(c2_port);

	
	for (i = 0, elem = c2_port->rx_ring.start; i < c2_port->rx_ring.count;
	     i++, elem++) {
		rxp_hdr = (struct c2_rxp_hdr *) elem->skb->data;
		rxp_hdr->flags = 0;
		__raw_writew((__force u16) cpu_to_be16(RXP_HRXD_READY),
			     elem->hw_desc + C2_RXP_FLAGS);
	}

	
	netif_start_queue(netdev);

	
	writel(0, c2dev->regs + C2_IDIS);
	netimr0 = readl(c2dev->regs + C2_NIMR0);
	netimr0 &= ~(C2_PCI_HTX_INT | C2_PCI_HRX_INT);
	writel(netimr0, c2dev->regs + C2_NIMR0);

	in_dev = in_dev_get(netdev);
	IN_DEV_CONF_SET(in_dev, ARP_IGNORE, 1);
	in_dev_put(in_dev);

	return 0;

      bail1:
	c2_rx_clean(c2_port);
	kfree(c2_port->rx_ring.start);

      bail0:
	pci_free_consistent(c2dev->pcidev, c2_port->mem_size, c2_port->mem,
			    c2_port->dma);

	return ret;
}
コード例 #9
0
static int eth_open(struct net_device *dev)
{
	struct port *port = netdev_priv(dev);
	struct npe *npe = port->npe;
	struct msg msg;
	int i, err;

	if (!npe_running(npe)) {
		err = npe_load_firmware(npe, npe_name(npe), &dev->dev);
		if (err)
			return err;

		if (npe_recv_message(npe, &msg, "ETH_GET_STATUS")) {
			printk(KERN_ERR "%s: %s not responding\n", dev->name,
			       npe_name(npe));
			return -EIO;
		}
		port->firmware[0] = msg.byte4;
		port->firmware[1] = msg.byte5;
		port->firmware[2] = msg.byte6;
		port->firmware[3] = msg.byte7;
	}

	memset(&msg, 0, sizeof(msg));
	msg.cmd = NPE_VLAN_SETRXQOSENTRY;
	msg.eth_id = port->id;
	msg.byte5 = port->plat->rxq | 0x80;
	msg.byte7 = port->plat->rxq << 4;
	for (i = 0; i < 8; i++) {
		msg.byte3 = i;
		if (npe_send_recv_message(port->npe, &msg, "ETH_SET_RXQ"))
			return -EIO;
	}

	msg.cmd = NPE_EDB_SETPORTADDRESS;
	msg.eth_id = PHYSICAL_ID(port->id);
	msg.byte2 = dev->dev_addr[0];
	msg.byte3 = dev->dev_addr[1];
	msg.byte4 = dev->dev_addr[2];
	msg.byte5 = dev->dev_addr[3];
	msg.byte6 = dev->dev_addr[4];
	msg.byte7 = dev->dev_addr[5];
	if (npe_send_recv_message(port->npe, &msg, "ETH_SET_MAC"))
		return -EIO;

	memset(&msg, 0, sizeof(msg));
	msg.cmd = NPE_FW_SETFIREWALLMODE;
	msg.eth_id = port->id;
	if (npe_send_recv_message(port->npe, &msg, "ETH_SET_FIREWALL_MODE"))
		return -EIO;

	if ((err = request_queues(port)) != 0)
		return err;

	if ((err = init_queues(port)) != 0) {
		destroy_queues(port);
		release_queues(port);
		return err;
	}

	port->speed = 0;	/* force "link up" message */
	phy_start(port->phydev);

	for (i = 0; i < ETH_ALEN; i++)
		__raw_writel(dev->dev_addr[i], &port->regs->hw_addr[i]);
	__raw_writel(0x08, &port->regs->random_seed);
	__raw_writel(0x12, &port->regs->partial_empty_threshold);
	__raw_writel(0x30, &port->regs->partial_full_threshold);
	__raw_writel(0x08, &port->regs->tx_start_bytes);
	__raw_writel(0x15, &port->regs->tx_deferral);
	__raw_writel(0x08, &port->regs->tx_2part_deferral[0]);
	__raw_writel(0x07, &port->regs->tx_2part_deferral[1]);
	__raw_writel(0x80, &port->regs->slot_time);
	__raw_writel(0x01, &port->regs->int_clock_threshold);

	/* Populate queues with buffers, no failure after this point */
	for (i = 0; i < TX_DESCS; i++)
		queue_put_desc(port->plat->txreadyq,
			       tx_desc_phys(port, i), tx_desc_ptr(port, i));

	for (i = 0; i < RX_DESCS; i++)
		queue_put_desc(RXFREE_QUEUE(port->id),
			       rx_desc_phys(port, i), rx_desc_ptr(port, i));

	__raw_writel(TX_CNTRL1_RETRIES, &port->regs->tx_control[1]);
	__raw_writel(DEFAULT_TX_CNTRL0, &port->regs->tx_control[0]);
	__raw_writel(0, &port->regs->rx_control[1]);
	__raw_writel(DEFAULT_RX_CNTRL0, &port->regs->rx_control[0]);

	napi_enable(&port->napi);
	eth_set_mcast_list(dev);
	netif_start_queue(dev);

	qmgr_set_irq(port->plat->rxq, QUEUE_IRQ_SRC_NOT_EMPTY,
		     eth_rx_irq, dev);
	if (!ports_open) {
		qmgr_set_irq(TXDONE_QUEUE, QUEUE_IRQ_SRC_NOT_EMPTY,
			     eth_txdone_irq, NULL);
		qmgr_enable_irq(TXDONE_QUEUE);
	}
	ports_open++;
	/* we may already have RX data, enables IRQ */
	napi_schedule(&port->napi);
	return 0;
}
コード例 #10
0
static int my_open( struct net_device *dev ) {
   printk( KERN_INFO "! Hit: my_open(%s)\n", dev->name );
   /* start up the transmission queue */
   netif_start_queue( dev );
   return 0;
}
コード例 #11
0
static int vnet_open(struct net_device *ndev)
{
	netif_start_queue(ndev);
	return 0;
}
コード例 #12
0
ファイル: sh_eth.c プロジェクト: andi34/Dhollmen_Kernel
static int sh_eth_dev_init(struct net_device *ndev)
{
	int ret = 0;
	struct sh_eth_private *mdp = netdev_priv(ndev);
	u_int32_t rx_int_var, tx_int_var;
	u32 val;

	/* Soft Reset */
	sh_eth_reset(ndev);

	/* Descriptor format */
	sh_eth_ring_format(ndev);
	if (mdp->cd->rpadir)
		sh_eth_write(ndev, mdp->cd->rpadir_value, RPADIR);

	/* all sh_eth int mask */
	sh_eth_write(ndev, 0, EESIPR);

#if defined(__LITTLE_ENDIAN__)
	if (mdp->cd->hw_swap)
		sh_eth_write(ndev, EDMR_EL, EDMR);
	else
#endif
		sh_eth_write(ndev, 0, EDMR);

	/* FIFO size set */
	sh_eth_write(ndev, mdp->cd->fdr_value, FDR);
	sh_eth_write(ndev, 0, TFTR);

	/* Frame recv control */
	sh_eth_write(ndev, mdp->cd->rmcr_value, RMCR);

	rx_int_var = mdp->rx_int_var = DESC_I_RINT8 | DESC_I_RINT5;
	tx_int_var = mdp->tx_int_var = DESC_I_TINT2;
	sh_eth_write(ndev, rx_int_var | tx_int_var, TRSCER);

	if (mdp->cd->bculr)
		sh_eth_write(ndev, 0x800, BCULR);	/* Burst sycle set */

	sh_eth_write(ndev, mdp->cd->fcftr_value, FCFTR);

	if (!mdp->cd->no_trimd)
		sh_eth_write(ndev, 0, TRIMD);

	/* Recv frame limit set register */
	sh_eth_write(ndev, RFLR_VALUE, RFLR);

	sh_eth_write(ndev, sh_eth_read(ndev, EESR), EESR);
	sh_eth_write(ndev, mdp->cd->eesipr_value, EESIPR);

	/* PAUSE Prohibition */
	val = (sh_eth_read(ndev, ECMR) & ECMR_DM) |
		ECMR_ZPF | (mdp->duplex ? ECMR_DM : 0) | ECMR_TE | ECMR_RE;

	sh_eth_write(ndev, val, ECMR);

	if (mdp->cd->set_rate)
		mdp->cd->set_rate(ndev);

	/* E-MAC Status Register clear */
	sh_eth_write(ndev, mdp->cd->ecsr_value, ECSR);

	/* E-MAC Interrupt Enable register */
	sh_eth_write(ndev, mdp->cd->ecsipr_value, ECSIPR);

	/* Set MAC address */
	update_mac_address(ndev);

	/* mask reset */
	if (mdp->cd->apr)
		sh_eth_write(ndev, APR_AP, APR);
	if (mdp->cd->mpr)
		sh_eth_write(ndev, MPR_MP, MPR);
	if (mdp->cd->tpauser)
		sh_eth_write(ndev, TPAUSER_UNLIMITED, TPAUSER);

	/* Setting the Rx mode will start the Rx process. */
	sh_eth_write(ndev, EDRRR_R, EDRRR);

	netif_start_queue(ndev);

	return ret;
}
コード例 #13
0
ファイル: u_ether.c プロジェクト: KHATEEBNSIT/lsdk_ar9531
static int eth_start_xmit(struct sk_buff *skb, struct net_device *net)
{
	struct eth_dev		*dev = netdev_priv(net);
	int			length = skb->len;
	int			retval;
	struct pci_request	*req = NULL;
	unsigned long		flags;
	struct pci_ep		*in;
	u16			cdc_filter;


	total_tx_packet++;
	spin_lock_irqsave(&dev->lock, flags);
	if (dev->port_pci) {
		in = dev->port_pci->in_ep;
		cdc_filter = dev->port_pci->cdc_filter;
	} else {
		in = NULL;
		cdc_filter = 0;
	}
	spin_unlock_irqrestore(&dev->lock, flags);

	if (!in) {
		dev_kfree_skb_any(skb);
		printk("Error: %s:%d\n", __FILE__, __LINE__);
		return 0;
	}
#if 0
	/* apply outgoing CDC or RNDIS filters */
	if (!is_promisc(cdc_filter)) {
		u8		*dest = skb->data;

		if (is_multicast_ether_addr(dest)) {
			u16	type;

			/* ignores USB_CDC_PACKET_TYPE_MULTICAST and host
			 * SET_ETHERNET_MULTICAST_FILTERS requests
			 */
			if (is_broadcast_ether_addr(dest))
				type = USB_CDC_PACKET_TYPE_BROADCAST;
			else
				type = USB_CDC_PACKET_TYPE_ALL_MULTICAST;
			if (!(cdc_filter & type)) {
				dev_kfree_skb_any(skb);
		printk("Error: %s:%d\n", __FILE__, __LINE__);
				return 0;
			}
		}
		/* ignores USB_CDC_PACKET_TYPE_DIRECTED */
	}
#endif

	spin_lock_irqsave(&dev->req_lock, flags);
	/*
	 * this freelist can be empty if an interrupt triggered disconnect()
	 * and reconfigured the gadget (shutting down this queue) after the
	 * network stack decided to xmit but before we got the spinlock.
	 */
	if (list_empty(&dev->tx_reqs)) {
		spin_unlock_irqrestore(&dev->req_lock, flags);
		printk("Error: %s:%d\n", __FILE__, __LINE__);
		return NETDEV_TX_BUSY;
	}

	req = container_of(dev->tx_reqs.next, struct pci_request, list);
	list_del(&req->list);

	/* temporarily stop TX queue when the freelist empties */
	if (list_empty(&dev->tx_reqs)) {
		netif_stop_queue(net);
//		printk("Error: %s:%d\n", __FILE__, __LINE__);
		total_throttle++;
	}
	spin_unlock_irqrestore(&dev->req_lock, flags);

	/* no buffer copies needed, unless the network stack did it
	 * or the hardware can't use skb buffers.
	 * or there's not enough space for extra headers we need
	 */
#if 0
	if (dev->wrap) {
		struct sk_buff	*skb_new;

		skb_new = dev->wrap(skb);
		if (!skb_new) {
		printk("Error: %s:%d\n", __FILE__, __LINE__);
			goto drop;
		}
		dev_kfree_skb_any(skb);
		skb = skb_new;
		length = skb->len;
	}
#endif
	req->buf = skb->data;
	req->context = skb;
	req->complete = tx_complete;

	/* use zlp framing on tx for strict CDC-Ether conformance,
	 * though any robust network rx path ignores extra padding.
	 * and some hardware doesn't like to write zlps.
	 */
	if (!dev->zlp && (length % in->maxpacket) == 0)
		length++;

	req->length = length + 2;

	retval = ath_pci_ep_queue(in, req, GFP_ATOMIC);
	switch (retval) {
	default:
		DBG(dev, "tx queue err %d\n", retval);
		break;
	case 0:
		net->trans_start = jiffies;
		atomic_inc(&dev->tx_qlen);
	}

	if (retval) {
//drop:
		dev->net->stats.tx_dropped++;
		dev_kfree_skb_any(skb);
		spin_lock_irqsave(&dev->req_lock, flags);
		if (list_empty(&dev->tx_reqs))
			netif_start_queue(net);
		list_add(&req->list, &dev->tx_reqs);
		spin_unlock_irqrestore(&dev->req_lock, flags);
	}
	return 0;
}
コード例 #14
0
static int de600_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
	unsigned long flags;
	int	transmit_from;
	int	len;
	int	tickssofar;
	u8	*buffer = skb->data;
	int	i;

	if (free_tx_pages <= 0) {	
		tickssofar = jiffies - dev->trans_start;
		if (tickssofar < 5)
			return NETDEV_TX_BUSY;
		
		printk(KERN_WARNING "%s: transmit timed out (%d), %s?\n", dev->name, tickssofar, "network cable problem");
		
		spin_lock_irqsave(&de600_lock, flags);
		if (adapter_init(dev)) {
			spin_unlock_irqrestore(&de600_lock, flags);
			return NETDEV_TX_BUSY;
		}
		spin_unlock_irqrestore(&de600_lock, flags);
	}

	
	pr_debug("de600_start_xmit:len=%d, page %d/%d\n", skb->len, tx_fifo_in, free_tx_pages);

	if ((len = skb->len) < RUNT)
		len = RUNT;

	spin_lock_irqsave(&de600_lock, flags);
	select_nic();
	tx_fifo[tx_fifo_in] = transmit_from = tx_page_adr(tx_fifo_in) - len;
	tx_fifo_in = (tx_fifo_in + 1) % TX_PAGES; 

	if(check_lost)
	{
		
		de600_setup_address(NODE_ADDRESS, RW_ADDR);
		de600_read_byte(READ_DATA, dev);
		if (was_down || (de600_read_byte(READ_DATA, dev) != 0xde)) {
			if (adapter_init(dev)) {
				spin_unlock_irqrestore(&de600_lock, flags);
				return NETDEV_TX_BUSY;
			}
		}
	}

	de600_setup_address(transmit_from, RW_ADDR);
	for (i = 0;  i < skb->len ; ++i, ++buffer)
		de600_put_byte(*buffer);
	for (; i < len; ++i)
		de600_put_byte(0);

	if (free_tx_pages-- == TX_PAGES) { 
		dev->trans_start = jiffies;
		netif_start_queue(dev); 
		
		de600_setup_address(transmit_from, TX_ADDR);
		de600_put_command(TX_ENABLE);
	}
	else {
		if (free_tx_pages)
			netif_start_queue(dev);
		else
			netif_stop_queue(dev);
		select_prn();
	}
	spin_unlock_irqrestore(&de600_lock, flags);
	dev_kfree_skb(skb);
	return NETDEV_TX_OK;
}
コード例 #15
0
ファイル: u_ether.c プロジェクト: dhs-shine/sprd_project
static netdev_tx_t eth_start_xmit(struct sk_buff *skb,
					struct net_device *net)
{
	struct eth_dev		*dev = netdev_priv(net);
	int			length = skb->len;
	int			retval=0;
	struct usb_request	*req = NULL;
	unsigned long		flags;
	struct usb_ep		*in;
	u16			cdc_filter;
    struct rndis_msg    *msg = get_rndis_msg();
    struct pkt_msg      *r_q=NULL,*w_q = NULL;
    int index;
    struct sk_buff *skb_old;

	spin_lock_irqsave(&dev->lock, flags);
	if (dev->port_usb) {
		in = dev->port_usb->in_ep;
		cdc_filter = dev->port_usb->cdc_filter;
	} else {
		in = NULL;
		cdc_filter = 0;
	}
	spin_unlock_irqrestore(&dev->lock, flags);

	if (!in) {
		dev_kfree_skb_any(skb);
		return NETDEV_TX_OK;
	}

	/* apply outgoing CDC or RNDIS filters */
	if (!is_promisc(cdc_filter)) {
		u8		*dest = skb->data;

		if (is_multicast_ether_addr(dest)) {
			u16	type;

			/* ignores USB_CDC_PACKET_TYPE_MULTICAST and host
			 * SET_ETHERNET_MULTICAST_FILTERS requests
			 */
			if (is_broadcast_ether_addr(dest))
				type = USB_CDC_PACKET_TYPE_BROADCAST;
			else
				type = USB_CDC_PACKET_TYPE_ALL_MULTICAST;
			if (!(cdc_filter & type)) {
				dev_kfree_skb_any(skb);
				return NETDEV_TX_OK;
			}
		}
		/* ignores USB_CDC_PACKET_TYPE_DIRECTED */
	}
    skb_old=skb;
    eth_alloc_req(net,&req,&w_q,skb);
    if(req == NULL)
        return NETDEV_TX_BUSY;
	/* no buffer copies needed, unless the network stack did it
	 * or the hardware can't use skb buffers.
	 * or there's not enough space for extra headers we need
	 */
	if (dev->wrap) {
		unsigned long	flags;

		spin_lock_irqsave(&dev->lock, flags);

		if (dev->port_usb)
			skb = dev->wrap(dev->port_usb, skb);
		spin_unlock_irqrestore(&dev->lock, flags);
		if (!skb)
			goto drop;

		length = skb->len;
	}
	/*
	 * Align data to 32bit if the dma controller requires it
	 */
	if (gadget_dma32(dev->gadget)) {
		unsigned long align = (unsigned long)skb->data & 3;
		if (WARN_ON(skb_headroom(skb) < align)) {
			dev_kfree_skb_any(skb);
			goto drop;
		} else if (align) {
			u8 *data = skb->data;
			size_t len = skb_headlen(skb);
			skb->data -= align;
			memmove(skb->data, data, len);
			skb_set_tail_pointer(skb, len);
		}
	}

    retval = 0;
    if(w_q==NULL)
		retval = save_to_queue(dev,msg,skb,req,&w_q);
	else if(skb!=skb_old)
		update_sbks_in_queue(w_q,skb,skb_old);

    if(retval == 0){
        spin_lock_irqsave(&msg->buffer_lock, flags);
        if(w_q && w_q->bit_map==FULL_BIT_MAP && (w_q->state != RNDIS_QUEUE_SEND)){
            msg->last_sent = w_q->q_idx;
            w_q->state = RNDIS_QUEUE_SEND;
            spin_unlock_irqrestore(&msg->buffer_lock, flags);
            pkt_msg_send(msg,w_q,net);
            return NETDEV_TX_OK;
        }

        w_q = msg->q + msg->w_idx;
        if(w_q->bit_map==FULL_BIT_MAP && w_q->state != RNDIS_QUEUE_SEND){
            msg->last_sent = w_q->q_idx;
            w_q->state = RNDIS_QUEUE_SEND;
            spin_unlock_irqrestore(&msg->buffer_lock, flags);
            pkt_msg_send(msg,w_q,net);
            return NETDEV_TX_OK;
        }
        if( msg->w_idx == msg->r_idx &&
            w_q->state == RNDIS_QUEUE_GATHER &&
            w_q->skb_idx == w_q->pkt_cnt){
			msg->last_sent = w_q->q_idx;
            w_q->state = RNDIS_QUEUE_SEND;
            spin_unlock_irqrestore(&msg->buffer_lock, flags);
            pkt_msg_send(msg,w_q,net);
            return NETDEV_TX_OK;
        }
        spin_unlock_irqrestore(&msg->buffer_lock, flags);
        return NETDEV_TX_OK;
    }
	if (retval) {
		dev_kfree_skb_any(skb);
drop:
        printk("drop req\n");
		dev->net->stats.tx_dropped++;
		spin_lock_irqsave(&dev->req_lock, flags);
		if (list_empty(&dev->tx_reqs))
			netif_start_queue(net);
		list_add(&req->list, &dev->tx_reqs);
		spin_unlock_irqrestore(&dev->req_lock, flags);
	}
	return NETDEV_TX_OK;
}
コード例 #16
0
ファイル: sundance.c プロジェクト: liexusong/Linux-2.4.16
static int netdev_open(struct net_device *dev)
{
	struct netdev_private *np = dev->priv;
	long ioaddr = dev->base_addr;
	int i;

	/* Do we need to reset the chip??? */

	i = request_irq(dev->irq, &intr_handler, SA_SHIRQ, dev->name, dev);
	if (i)
		return i;

	if (debug > 1)
		printk(KERN_DEBUG "%s: netdev_open() irq %d.\n",
			   dev->name, dev->irq);

	init_ring(dev);

	writel(np->rx_ring_dma, ioaddr + RxListPtr);
	/* The Tx list pointer is written as packets are queued. */

	for (i = 0; i < 6; i++)
		writeb(dev->dev_addr[i], ioaddr + StationAddr + i);

	/* Initialize other registers. */
	/* Configure the PCI bus bursts and FIFO thresholds. */

	if (dev->if_port == 0)
		dev->if_port = np->default_port;

	np->mcastlock = (spinlock_t) SPIN_LOCK_UNLOCKED;

	set_rx_mode(dev);
	writew(0, ioaddr + IntrEnable);
	writew(0, ioaddr + DownCounter);
	/* Set the chip to poll every N*320nsec. */
	writeb(100, ioaddr + RxDescPoll);
	writeb(127, ioaddr + TxDescPoll);
	netif_start_queue(dev);

	/* Enable interrupts by setting the interrupt mask. */
	writew(IntrRxDone | IntrRxDMADone | IntrPCIErr | IntrDrvRqst | IntrTxDone
		   | StatsMax | LinkChange, ioaddr + IntrEnable);

	writew(StatsEnable | RxEnable | TxEnable, ioaddr + MACCtrl1);

	if (debug > 2)
		printk(KERN_DEBUG "%s: Done netdev_open(), status: Rx %x Tx %x "
			   "MAC Control %x, %4.4x %4.4x.\n",
			   dev->name, readl(ioaddr + RxStatus), readb(ioaddr + TxStatus),
			   readl(ioaddr + MACCtrl0),
			   readw(ioaddr + MACCtrl1), readw(ioaddr + MACCtrl0));

	/* Set the timer to check for link beat. */
	init_timer(&np->timer);
	np->timer.expires = jiffies + 3*HZ;
	np->timer.data = (unsigned long)dev;
	np->timer.function = &netdev_timer;				/* timer handler */
	add_timer(&np->timer);

	return 0;
}
コード例 #17
0
ファイル: u_ether.c プロジェクト: dhs-shine/sprd_project
static int pkt_msg_send(struct rndis_msg *msg, struct pkt_msg *q,
                                struct net_device *net)
{
    struct eth_dev		*dev = netdev_priv(net);
    struct usb_ep		*in = dev->port_usb->in_ep;
    struct usb_request  *req = NULL;
    unsigned long   flags;
    int retval;
    int	length ;
    int i;
    struct sk_buff* skb;
    int offset = 0;

    req = q->req;
    req->context = q;
    req->complete = tx_complete;
	/* NCM requires no zlp if transfer is dwNtbInMaxSize */
    if(q->skb_idx > 1){
        for(i=0;i<q->skb_idx;i++){
            skb = q->skbs[i];
            memcpy(&q->data[q->len],skb->data,skb->len);
            q->len += skb->len;
        }
        req->buf = q->data;
        length = q->len;
    }else{
        skb = q->skbs[0];
        req->buf = skb->data;
        length = skb->len;
    }
    if(debug_enabled)
        printk("pkt_msg_send: quque[%d].len = %d map=0x%x\n",q->q_idx,length,q->bit_map);
    q->bit_map = 0;
    if (dev->port_usb->is_fixed &&
        length == dev->port_usb->fixed_in_len &&
        (length % in->maxpacket) == 0)
        req->zero = 0;
    else
        req->zero = 1;

	/* use zlp framing on tx for strict CDC-Ether conformance,
	 * though any robust network rx path ignores extra padding.
	 * and some hardware doesn't like to write zlps.
	 */
    if (req->zero && !dev->zlp && (length % in->maxpacket) == 0)
        length++;

    req->length = length;
    //printk("pkt_msg_send: %p[%d] %i !\n",req->context,q->q_idx, q->pkt_cnt);
    /* throttle high/super speed IRQ rate back slightly*/
    if (gadget_is_dualspeed(dev->gadget))
        req->no_interrupt = (dev->gadget->speed == USB_SPEED_HIGH ||
                            dev->gadget->speed == USB_SPEED_SUPER)
                ? ((atomic_read(&dev->tx_qlen) % qmult) != 0)
                : 0;

    retval = usb_ep_queue(in, req, GFP_ATOMIC);
    switch (retval) {
    default:
        printk("tx queue err %d\n", retval);
        break;
    case 0:

        net->trans_start = jiffies;
        atomic_inc(&dev->tx_qlen);
    }
    if (retval){
        printk("rndis gather, %i IP diagrams are lost! eth_start_xmit\n", q->pkt_cnt);
        dev->net->stats.tx_dropped += q->skb_idx;
        pkt_msg_clean(q);
        spin_lock_irqsave(&dev->req_lock, flags);
        if (list_empty(&dev->tx_reqs)){
            printk("tx_complete, netif_start_queue1 [%d,%d],[%d,%d]\n", msg->w_idx,msg->r_idx,msg->last_sent,msg->last_complete);
            netif_start_queue(dev->net);
            msg->flow_stop= 0;
        }
        list_add(&req->list, &dev->tx_reqs);
        spin_unlock_irqrestore(&dev->req_lock, flags);
    }
}
コード例 #18
0
int tms380tr_open(struct net_device *dev)
{
	struct net_local *tp = netdev_priv(dev);
	int err;
	
	
	spin_lock_init(&tp->lock);
	init_timer(&tp->timer);

	

#ifdef CONFIG_ISA
	if(dev->dma > 0) 
	{
		unsigned long flags=claim_dma_lock();
		disable_dma(dev->dma);
		set_dma_mode(dev->dma, DMA_MODE_CASCADE);
		enable_dma(dev->dma);
		release_dma_lock(flags);
	}
#endif
	
	err = tms380tr_chipset_init(dev);
  	if(err)
	{
		printk(KERN_INFO "%s: Chipset initialization error\n", 
			dev->name);
		return (-1);
	}

	tp->timer.expires	= jiffies + 30*HZ;
	tp->timer.function	= tms380tr_timer_end_wait;
	tp->timer.data		= (unsigned long)dev;
	add_timer(&tp->timer);

	printk(KERN_DEBUG "%s: Adapter RAM size: %dK\n", 
	       dev->name, tms380tr_read_ptr(dev));

	tms380tr_enable_interrupts(dev);
	tms380tr_open_adapter(dev);

	netif_start_queue(dev);
	
	
	tp->Sleeping = 1;
	interruptible_sleep_on(&tp->wait_for_tok_int);
	del_timer(&tp->timer);

	
	if(tp->AdapterVirtOpenFlag == 0)
	{
		tms380tr_disable_interrupts(dev);
		return (-1);
	}

	tp->StartTime = jiffies;

	
	tp->timer.expires	= jiffies + 2*HZ;
	tp->timer.function	= tms380tr_timer_chk;
	tp->timer.data		= (unsigned long)dev;
	add_timer(&tp->timer);

	return (0);
}
コード例 #19
0
/*----------------------------------------------------------------
* p80211knetdev_hard_start_xmit
*
* Linux netdevice method for transmitting a frame.
*
* Arguments:
*	skb	Linux sk_buff containing the frame.
*	netdev	Linux netdevice.
*
* Side effects:
*	If the lower layers report that buffers are full. netdev->tbusy
*	will be set to prevent higher layers from sending more traffic.
*
*	Note: If this function returns non-zero, higher layers retain
*	      ownership of the skb.
*
* Returns:
*	zero on success, non-zero on failure.
----------------------------------------------------------------*/
static int p80211knetdev_hard_start_xmit( struct sk_buff *skb, netdevice_t *netdev)
{
	int		result = 0;
	int		txresult = -1;
	wlandevice_t	*wlandev = netdev->ml_priv;
	p80211_hdr_t    p80211_hdr;
	p80211_metawep_t p80211_wep;

	DBFENTER;

	if (skb == NULL) {
		return 0;
	}

        if (wlandev->state != WLAN_DEVICE_OPEN) {
		result = 1;
		goto failed;
	}

	memset(&p80211_hdr, 0, sizeof(p80211_hdr_t));
	memset(&p80211_wep, 0, sizeof(p80211_metawep_t));

	if ( netif_queue_stopped(netdev) ) {
		WLAN_LOG_DEBUG(1, "called when queue stopped.\n");
		result = 1;
		goto failed;
	}

	netif_stop_queue(netdev);

	/* Check to see that a valid mode is set */
	switch( wlandev->macmode ) {
	case WLAN_MACMODE_IBSS_STA:
	case WLAN_MACMODE_ESS_STA:
	case WLAN_MACMODE_ESS_AP:
		break;
	default:
		/* Mode isn't set yet, just drop the frame
		 * and return success .
		 * TODO: we need a saner way to handle this
		 */
		if(skb->protocol != ETH_P_80211_RAW) {
			netif_start_queue(wlandev->netdev);
			WLAN_LOG_NOTICE(
				"Tx attempt prior to association, frame dropped.\n");
			wlandev->linux_stats.tx_dropped++;
			result = 0;
			goto failed;
		}
		break;
	}

	/* Check for raw transmits */
	if(skb->protocol == ETH_P_80211_RAW) {
		if (!capable(CAP_NET_ADMIN)) {
			result = 1;
			goto failed;
		}
		/* move the header over */
		memcpy(&p80211_hdr, skb->data, sizeof(p80211_hdr_t));
		skb_pull(skb, sizeof(p80211_hdr_t));
	} else {
		if ( skb_ether_to_p80211(wlandev, wlandev->ethconv, skb, &p80211_hdr, &p80211_wep) != 0 ) {
			/* convert failed */
			WLAN_LOG_DEBUG(1, "ether_to_80211(%d) failed.\n",
					wlandev->ethconv);
			result = 1;
			goto failed;
		}
	}
	if ( wlandev->txframe == NULL ) {
		result = 1;
		goto failed;
	}

	netdev->trans_start = jiffies;

	wlandev->linux_stats.tx_packets++;
	/* count only the packet payload */
	wlandev->linux_stats.tx_bytes += skb->len;

	txresult = wlandev->txframe(wlandev, skb, &p80211_hdr, &p80211_wep);

	if ( txresult == 0) {
		/* success and more buf */
		/* avail, re: hw_txdata */
		netif_wake_queue(wlandev->netdev);
		result = 0;
	} else if ( txresult == 1 ) {
		/* success, no more avail */
		WLAN_LOG_DEBUG(3, "txframe success, no more bufs\n");
		/* netdev->tbusy = 1;  don't set here, irqhdlr */
		/*   may have already cleared it */
		result = 0;
	} else if ( txresult == 2 ) {
		/* alloc failure, drop frame */
		WLAN_LOG_DEBUG(3, "txframe returned alloc_fail\n");
		result = 1;
	} else {
		/* buffer full or queue busy, drop frame. */
		WLAN_LOG_DEBUG(3, "txframe returned full or busy\n");
		result = 1;
	}

 failed:
	/* Free up the WEP buffer if it's not the same as the skb */
	if ((p80211_wep.data) && (p80211_wep.data != skb->data))
		kfree(p80211_wep.data);

	/* we always free the skb here, never in a lower level. */
	if (!result)
		dev_kfree_skb(skb);

	DBFEXIT;
	return result;
}
コード例 #20
0
ファイル: sh_eth.c プロジェクト: artm1248/linux
static int sh_eth_dev_init(struct net_device *ndev)
{
    int ret = 0;
    struct sh_eth_private *mdp = netdev_priv(ndev);
    u32 ioaddr = ndev->base_addr;
    u_int32_t rx_int_var, tx_int_var;
    u32 val;

    /* Soft Reset */
    sh_eth_reset(ndev);

    /* Descriptor format */
    sh_eth_ring_format(ndev);
    ctrl_outl(RPADIR_INIT, ioaddr + RPADIR);

    /* all sh_eth int mask */
    ctrl_outl(0, ioaddr + EESIPR);

#if defined(CONFIG_CPU_SUBTYPE_SH7763)
    ctrl_outl(EDMR_EL, ioaddr + EDMR);
#else
    ctrl_outl(0, ioaddr + EDMR);	/* Endian change */
#endif

    /* FIFO size set */
    ctrl_outl((FIFO_SIZE_T | FIFO_SIZE_R), ioaddr + FDR);
    ctrl_outl(0, ioaddr + TFTR);

    /* Frame recv control */
    ctrl_outl(0, ioaddr + RMCR);

    rx_int_var = mdp->rx_int_var = DESC_I_RINT8 | DESC_I_RINT5;
    tx_int_var = mdp->tx_int_var = DESC_I_TINT2;
    ctrl_outl(rx_int_var | tx_int_var, ioaddr + TRSCER);

#if defined(CONFIG_CPU_SUBTYPE_SH7763)
    /* Burst sycle set */
    ctrl_outl(0x800, ioaddr + BCULR);
#endif

    ctrl_outl((FIFO_F_D_RFF | FIFO_F_D_RFD), ioaddr + FCFTR);

#if !defined(CONFIG_CPU_SUBTYPE_SH7763)
    ctrl_outl(0, ioaddr + TRIMD);
#endif

    /* Recv frame limit set register */
    ctrl_outl(RFLR_VALUE, ioaddr + RFLR);

    ctrl_outl(ctrl_inl(ioaddr + EESR), ioaddr + EESR);
    ctrl_outl((DMAC_M_RFRMER | DMAC_M_ECI | 0x003fffff), ioaddr + EESIPR);

    /* PAUSE Prohibition */
    val = (ctrl_inl(ioaddr + ECMR) & ECMR_DM) |
          ECMR_ZPF | (mdp->duplex ? ECMR_DM : 0) | ECMR_TE | ECMR_RE;

    ctrl_outl(val, ioaddr + ECMR);

    /* E-MAC Status Register clear */
    ctrl_outl(ECSR_INIT, ioaddr + ECSR);

    /* E-MAC Interrupt Enable register */
    ctrl_outl(ECSIPR_INIT, ioaddr + ECSIPR);

    /* Set MAC address */
    update_mac_address(ndev);

    /* mask reset */
#if defined(CONFIG_CPU_SUBTYPE_SH7710) || defined(CONFIG_CPU_SUBTYPE_SH7763)
    ctrl_outl(APR_AP, ioaddr + APR);
    ctrl_outl(MPR_MP, ioaddr + MPR);
    ctrl_outl(TPAUSER_UNLIMITED, ioaddr + TPAUSER);
#endif
#if defined(CONFIG_CPU_SUBTYPE_SH7710)
    ctrl_outl(BCFR_UNLIMITED, ioaddr + BCFR);
#endif

    /* Setting the Rx mode will start the Rx process. */
    ctrl_outl(EDRRR_R, ioaddr + EDRRR);

    netif_start_queue(ndev);

    return ret;
}
コード例 #21
0
static int
orinoco_cs_event(event_t event, int priority,
		       event_callback_args_t * args)
{
	dev_link_t *link = args->client_data;
	struct orinoco_private *priv = (struct orinoco_private *)link->priv;
	struct net_device *dev = &priv->ndev;

	TRACE_ENTER("orinoco");

	switch (event) {
	case CS_EVENT_CARD_REMOVAL:
		/* FIXME: Erg.. this whole hw_ready thing looks racy
		   to me.  this may not be fixable without changin the
		   PCMCIA subsystem, though */
		priv->hw_ready = 0;
		orinoco_shutdown(priv);
		link->state &= ~DEV_PRESENT;
		if (link->state & DEV_CONFIG) {
			netif_stop_queue(dev);
			netif_device_detach(dev);
			mod_timer(&link->release, jiffies + HZ / 20);
		}
		break;
	case CS_EVENT_CARD_INSERTION:
		link->state |= DEV_PRESENT | DEV_CONFIG_PENDING;
		orinoco_cs_config(link);
		break;
	case CS_EVENT_PM_SUSPEND:

		link->state |= DEV_SUSPEND;
		/* Fall through... */
	case CS_EVENT_RESET_PHYSICAL:
		orinoco_shutdown(priv);
		/* Mark the device as stopped, to block IO until later */

		if (link->state & DEV_CONFIG) {
			if (link->open) {
				netif_stop_queue(dev);
				netif_device_detach(dev);
			}
			CardServices(ReleaseConfiguration, link->handle);
		}
		break;
	case CS_EVENT_PM_RESUME:
		link->state &= ~DEV_SUSPEND;
		/* Fall through... */
	case CS_EVENT_CARD_RESET:
		if (link->state & DEV_CONFIG) {
			CardServices(RequestConfiguration, link->handle,
				     &link->conf);

			if (link->open) {
				if (orinoco_reset(priv) == 0) {
					netif_device_attach(dev);
					netif_start_queue(dev);
				} else {
					printk(KERN_ERR "%s: Error resetting device on PCMCIA event\n",
					       dev->name);
					orinoco_cs_stop(dev);
				}
			}
		}
		/*
		   In a normal driver, additional code may go here to restore
		   the device state and restart IO. 
		 */
		break;
	}

	TRACE_EXIT("orinoco");

	return 0;
}				/* orinoco_cs_event */
コード例 #22
0
ファイル: ibmveth.c プロジェクト: CSCLOG/beaglebone
static int ibmveth_open(struct net_device *netdev)
{
	struct ibmveth_adapter *adapter = netdev_priv(netdev);
	u64 mac_address = 0;
	int rxq_entries = 1;
	unsigned long lpar_rc;
	int rc;
	union ibmveth_buf_desc rxq_desc;
	int i;
	struct device *dev;

	netdev_dbg(netdev, "open starting\n");

	napi_enable(&adapter->napi);

	for(i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++)
		rxq_entries += adapter->rx_buff_pool[i].size;

	adapter->buffer_list_addr = (void*) get_zeroed_page(GFP_KERNEL);
	adapter->filter_list_addr = (void*) get_zeroed_page(GFP_KERNEL);

	if (!adapter->buffer_list_addr || !adapter->filter_list_addr) {
		netdev_err(netdev, "unable to allocate filter or buffer list "
			   "pages\n");
		rc = -ENOMEM;
		goto err_out;
	}

	adapter->rx_queue.queue_len = sizeof(struct ibmveth_rx_q_entry) *
						rxq_entries;
	adapter->rx_queue.queue_addr = kmalloc(adapter->rx_queue.queue_len,
						GFP_KERNEL);

	if (!adapter->rx_queue.queue_addr) {
		netdev_err(netdev, "unable to allocate rx queue pages\n");
		rc = -ENOMEM;
		goto err_out;
	}

	dev = &adapter->vdev->dev;

	adapter->buffer_list_dma = dma_map_single(dev,
			adapter->buffer_list_addr, 4096, DMA_BIDIRECTIONAL);
	adapter->filter_list_dma = dma_map_single(dev,
			adapter->filter_list_addr, 4096, DMA_BIDIRECTIONAL);
	adapter->rx_queue.queue_dma = dma_map_single(dev,
			adapter->rx_queue.queue_addr,
			adapter->rx_queue.queue_len, DMA_BIDIRECTIONAL);

	if ((dma_mapping_error(dev, adapter->buffer_list_dma)) ||
	    (dma_mapping_error(dev, adapter->filter_list_dma)) ||
	    (dma_mapping_error(dev, adapter->rx_queue.queue_dma))) {
		netdev_err(netdev, "unable to map filter or buffer list "
			   "pages\n");
		rc = -ENOMEM;
		goto err_out;
	}

	adapter->rx_queue.index = 0;
	adapter->rx_queue.num_slots = rxq_entries;
	adapter->rx_queue.toggle = 1;

	memcpy(&mac_address, netdev->dev_addr, netdev->addr_len);
	mac_address = mac_address >> 16;

	rxq_desc.fields.flags_len = IBMVETH_BUF_VALID |
					adapter->rx_queue.queue_len;
	rxq_desc.fields.address = adapter->rx_queue.queue_dma;

	netdev_dbg(netdev, "buffer list @ 0x%p\n", adapter->buffer_list_addr);
	netdev_dbg(netdev, "filter list @ 0x%p\n", adapter->filter_list_addr);
	netdev_dbg(netdev, "receive q   @ 0x%p\n", adapter->rx_queue.queue_addr);

	h_vio_signal(adapter->vdev->unit_address, VIO_IRQ_DISABLE);

	lpar_rc = ibmveth_register_logical_lan(adapter, rxq_desc, mac_address);

	if (lpar_rc != H_SUCCESS) {
		netdev_err(netdev, "h_register_logical_lan failed with %ld\n",
			   lpar_rc);
		netdev_err(netdev, "buffer TCE:0x%llx filter TCE:0x%llx rxq "
			   "desc:0x%llx MAC:0x%llx\n",
				     adapter->buffer_list_dma,
				     adapter->filter_list_dma,
				     rxq_desc.desc,
				     mac_address);
		rc = -ENONET;
		goto err_out;
	}

	for (i = 0; i < IBMVETH_NUM_BUFF_POOLS; i++) {
		if (!adapter->rx_buff_pool[i].active)
			continue;
		if (ibmveth_alloc_buffer_pool(&adapter->rx_buff_pool[i])) {
			netdev_err(netdev, "unable to alloc pool\n");
			adapter->rx_buff_pool[i].active = 0;
			rc = -ENOMEM;
			goto err_out;
		}
	}

	netdev_dbg(netdev, "registering irq 0x%x\n", netdev->irq);
	rc = request_irq(netdev->irq, ibmveth_interrupt, 0, netdev->name,
			 netdev);
	if (rc != 0) {
		netdev_err(netdev, "unable to request irq 0x%x, rc %d\n",
			   netdev->irq, rc);
		do {
			lpar_rc = h_free_logical_lan(adapter->vdev->unit_address);
		} while (H_IS_LONG_BUSY(lpar_rc) || (lpar_rc == H_BUSY));

		goto err_out;
	}

	adapter->bounce_buffer =
	    kmalloc(netdev->mtu + IBMVETH_BUFF_OH, GFP_KERNEL);
	if (!adapter->bounce_buffer) {
		netdev_err(netdev, "unable to allocate bounce buffer\n");
		rc = -ENOMEM;
		goto err_out_free_irq;
	}
	adapter->bounce_buffer_dma =
	    dma_map_single(&adapter->vdev->dev, adapter->bounce_buffer,
			   netdev->mtu + IBMVETH_BUFF_OH, DMA_BIDIRECTIONAL);
	if (dma_mapping_error(dev, adapter->bounce_buffer_dma)) {
		netdev_err(netdev, "unable to map bounce buffer\n");
		rc = -ENOMEM;
		goto err_out_free_irq;
	}

	netdev_dbg(netdev, "initial replenish cycle\n");
	ibmveth_interrupt(netdev->irq, netdev);

	netif_start_queue(netdev);

	netdev_dbg(netdev, "open complete\n");

	return 0;

err_out_free_irq:
	free_irq(netdev->irq, netdev);
err_out:
	ibmveth_cleanup(adapter);
	napi_disable(&adapter->napi);
	return rc;
}
コード例 #23
0
ファイル: pci_main_dev.c プロジェクト: jing-git/rt-n56u
static int rt2860_resume(
	struct pci_dev *pci_dev)
{
	struct net_device *net_dev = pci_get_drvdata(pci_dev);
	PRTMP_ADAPTER pAd = (PRTMP_ADAPTER)NULL;

#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,10)
	INT32 retval;


	// set the power state of a PCI device
	// PCI has 4 power states, DO (normal) ~ D3(less power)
	// in include/linux/pci.h, you can find that
	// #define PCI_D0          ((pci_power_t __force) 0)
	// #define PCI_D1          ((pci_power_t __force) 1)
	// #define PCI_D2          ((pci_power_t __force) 2)
	// #define PCI_D3hot       ((pci_power_t __force) 3)
	// #define PCI_D3cold      ((pci_power_t __force) 4)
	// #define PCI_UNKNOWN     ((pci_power_t __force) 5)
	// #define PCI_POWER_ERROR ((pci_power_t __force) -1)
	retval = pci_set_power_state(pci_dev, PCI_D0);

	// restore the saved state of a PCI device
	pci_restore_state(pci_dev);

	// initialize device before it's used by a driver
	if (pci_enable_device(pci_dev))
	{
		printk("pci enable fail!\n");
		return 0;
	}
#endif

	DBGPRINT(RT_DEBUG_TRACE, ("===> rt2860_resume()\n"));

	if (net_dev == NULL)
	{
		DBGPRINT(RT_DEBUG_ERROR, ("net_dev == NULL!\n"));
	}
	else
		GET_PAD_FROM_NET_DEV(pAd, net_dev);

	if (pAd != NULL)
	{
		/* we can not use IFF_UP because ra0 down but ra1 up */
		/* and 1 suspend/resume function for 1 module, not for each interface */
		/* so Linux will call suspend/resume function once */
		if (VIRTUAL_IF_NUM(pAd) > 0)
		{
			// mark device as attached from system and restart if needed
			netif_device_attach(net_dev);

			if (rt28xx_open((PNET_DEV)net_dev) != 0)
			{
				// open fail
				DBGPRINT(RT_DEBUG_TRACE, ("<=== rt2860_resume()\n"));
				return 0;
			}

			// increase MODULE use count
			RT_MOD_INC_USE_COUNT();

			RTMP_CLEAR_FLAG(pAd, fRTMP_ADAPTER_HALT_IN_PROGRESS);
			RTMP_CLEAR_FLAG(pAd, fRTMP_ADAPTER_RADIO_OFF);

			netif_start_queue(net_dev);
			netif_carrier_on(net_dev);
			netif_wake_queue(net_dev);
		}
	}

	DBGPRINT(RT_DEBUG_TRACE, ("<=== rt2860_resume()\n"));
	return 0;
}
コード例 #24
0
ファイル: wrap_ev.c プロジェクト: johnny/CobraDroidBeta
void zfLnxConnectNotify(zdev_t* dev, u16_t status, u16_t* bssid)
{
    union iwreq_data wreq;
    u8_t *addr = (u8_t *) bssid;
    struct usbdrv_private *macp = dev->ml_priv;

    if (bssid != NULL)
    {
        memset(&wreq, 0, sizeof(wreq));
        if (status == ZM_STATUS_MEDIA_CONNECT)
            memcpy(wreq.addr.sa_data, bssid, ETH_ALEN);
        wreq.addr.sa_family = ARPHRD_ETHER;

        if (status == ZM_STATUS_MEDIA_CONNECT)
        {
#ifdef ZM_CONFIG_BIG_ENDIAN
            printk(KERN_DEBUG "Connected to AP, MAC: %02x:%02x:%02x:%02x:%02x:%02x\n",
                    addr[1], addr[0], addr[3], addr[2], addr[5], addr[4]);
#else
            printk(KERN_DEBUG "Connected to AP, MAC: %02x:%02x:%02x:%02x:%02x:%02x\n",
                    addr[0], addr[1], addr[2], addr[3], addr[4], addr[5]);
#endif

            netif_start_queue(dev);
        }
        else if ((status == ZM_STATUS_MEDIA_DISCONNECT) ||
                 (status == ZM_STATUS_MEDIA_DISABLED) ||
                 (status == ZM_STATUS_MEDIA_CONNECTION_DISABLED) ||
	         (status == ZM_STATUS_MEDIA_CONNECTION_RESET) ||
	         (status == ZM_STATUS_MEDIA_RESET) ||
	         (status == ZM_STATUS_MEDIA_DISCONNECT_DEAUTH) ||
	         (status == ZM_STATUS_MEDIA_DISCONNECT_DISASOC) ||
	         (status == ZM_STATUS_MEDIA_DISCONNECT_BEACON_MISS) ||
                 (status == ZM_STATUS_MEDIA_DISCONNECT_NOT_FOUND) ||
	         (status == ZM_STATUS_MEDIA_DISCONNECT_TIMEOUT))
        {
            printk(KERN_DEBUG "Disconnection Notify\n");

            netif_stop_queue(dev);
        }

	/* Save the connected status */
	macp->adapterState = status;

        if(zfiWlanQueryWlanMode(dev) == ZM_MODE_INFRASTRUCTURE) {
        //            //wireless_send_event(dev, SIOCGIWSCAN, &wreq, NULL);
            wireless_send_event(dev, SIOCGIWAP, &wreq, NULL);
        }
#if WIRELESS_EXT >= 15
        else if(zfiWlanQueryWlanMode(dev) == ZM_MODE_AP) {
            //if (port == 0)
            //{
                wireless_send_event(dev, IWEVREGISTERED, &wreq, NULL);
            //}
            //else
            //{
            //    /* Check whether the VAP device is valid */
            //    if (vap[port].dev != NULL)
            //    {
            //        wireless_send_event(vap[port].dev, IWEVREGISTERED, &wreq, NULL);
            //    }
            //    else
            //    {
            //        printk(KERN_ERR "Can' find a valid VAP device, port: %d\n", port);
            //    }
            //}
        }
#endif
    }
    //return 0;
}
コード例 #25
0
ファイル: sa1100_ir.c プロジェクト: maraz/linux-2.6
static int sa1100_irda_start(struct net_device *dev)
{
	struct sa1100_irda *si = dev->priv;
	int err;

	si->speed = 9600;

	err = request_irq(dev->irq, sa1100_irda_irq, 0, dev->name, dev);
	if (err)
		goto err_irq;

	err = sa1100_request_dma(DMA_Ser2HSSPRd, "IrDA receive",
				 NULL, NULL, &si->rxdma);
	if (err)
		goto err_rx_dma;

	err = sa1100_request_dma(DMA_Ser2HSSPWr, "IrDA transmit",
				 sa1100_irda_txdma_irq, dev, &si->txdma);
	if (err)
		goto err_tx_dma;

	/*
	 * The interrupt must remain disabled for now.
	 */
	disable_irq(dev->irq);

	/*
	 * Setup the serial port for the specified speed.
	 */
	err = sa1100_irda_startup(si);
	if (err)
		goto err_startup;

	/*
	 * Open a new IrLAP layer instance.
	 */
	si->irlap = irlap_open(dev, &si->qos, "sa1100");
	err = -ENOMEM;
	if (!si->irlap)
		goto err_irlap;

	/*
	 * Now enable the interrupt and start the queue
	 */
	si->open = 1;
	sa1100_set_power(si, power_level); /* low power mode */
	enable_irq(dev->irq);
	netif_start_queue(dev);
	return 0;

err_irlap:
	si->open = 0;
	sa1100_irda_shutdown(si);
err_startup:
	sa1100_free_dma(si->txdma);
err_tx_dma:
	sa1100_free_dma(si->rxdma);
err_rx_dma:
	free_irq(dev->irq, dev);
err_irq:
	return err;
}
コード例 #26
0
/*
 * Linux interface functions
 */
static int
sb1000_open(struct net_device *dev)
{
	char *name;
	int ioaddr[2], status;
	struct sb1000_private *lp = netdev_priv(dev);
	const unsigned short FirmwareVersion[] = {0x01, 0x01};

	ioaddr[0] = dev->base_addr;
	/* mem_start holds the second I/O address */
	ioaddr[1] = dev->mem_start;
	name = dev->name;

	/* initialize sb1000 */
	if ((status = sb1000_reset(ioaddr, name)))
		return status;
	ssleep(1);
	if ((status = sb1000_check_CRC(ioaddr, name)))
		return status;

	/* initialize private data before board can catch interrupts */
	lp->rx_skb[0] = NULL;
	lp->rx_skb[1] = NULL;
	lp->rx_skb[2] = NULL;
	lp->rx_skb[3] = NULL;
	lp->rx_dlen[0] = 0;
	lp->rx_dlen[1] = 0;
	lp->rx_dlen[2] = 0;
	lp->rx_dlen[3] = 0;
	lp->rx_frames = 0;
	lp->rx_error_count = 0;
	lp->rx_error_dpc_count = 0;
	lp->rx_session_id[0] = 0x50;
	lp->rx_session_id[0] = 0x48;
	lp->rx_session_id[0] = 0x44;
	lp->rx_session_id[0] = 0x42;
	lp->rx_frame_id[0] = 0;
	lp->rx_frame_id[1] = 0;
	lp->rx_frame_id[2] = 0;
	lp->rx_frame_id[3] = 0;
	if (request_irq(dev->irq, sb1000_interrupt, 0, "sb1000", dev)) {
		return -EAGAIN;
	}

	if (sb1000_debug > 2)
		printk(KERN_DEBUG "%s: Opening, IRQ %d\n", name, dev->irq);

	/* Activate board and check firmware version */
	udelay(1000);
	if ((status = sb1000_activate(ioaddr, name)))
		return status;
	udelay(0);
	if ((status = sb1000_get_firmware_version(ioaddr, name, version, 0)))
		return status;
	if (version[0] != FirmwareVersion[0] || version[1] != FirmwareVersion[1])
		printk(KERN_WARNING "%s: found firmware version %x.%02x "
			"(should be %x.%02x)\n", name, version[0], version[1],
			FirmwareVersion[0], FirmwareVersion[1]);


	netif_start_queue(dev);
	return 0;					/* Always succeed */
}
コード例 #27
0
ファイル: u_ether.c プロジェクト: Banjo0917/mt6577_kernel3.4
static netdev_tx_t eth_start_xmit(struct sk_buff *skb,
					struct net_device *net)
{
	struct eth_dev		*dev = netdev_priv(net);
	int			length = skb->len;
	int			retval;
	struct usb_request	*req = NULL;
	unsigned long		flags;
	struct usb_ep		*in;
	u16			cdc_filter;

	spin_lock_irqsave(&dev->lock, flags);
	if (dev->port_usb) {
		in = dev->port_usb->in_ep;
		cdc_filter = dev->port_usb->cdc_filter;
	} else {
		in = NULL;
		cdc_filter = 0;
	}
	spin_unlock_irqrestore(&dev->lock, flags);

	if (!in) {
		dev_kfree_skb_any(skb);
		return NETDEV_TX_OK;
	}

	/* apply outgoing CDC or RNDIS filters */
	if (!is_promisc(cdc_filter)) {
		u8		*dest = skb->data;

		if (is_multicast_ether_addr(dest)) {
			u16	type;

			/* ignores USB_CDC_PACKET_TYPE_MULTICAST and host
			 * SET_ETHERNET_MULTICAST_FILTERS requests
			 */
			if (is_broadcast_ether_addr(dest))
				type = USB_CDC_PACKET_TYPE_BROADCAST;
			else
				type = USB_CDC_PACKET_TYPE_ALL_MULTICAST;
			if (!(cdc_filter & type)) {
				dev_kfree_skb_any(skb);
				return NETDEV_TX_OK;
			}
		}
		/* ignores USB_CDC_PACKET_TYPE_DIRECTED */
	}

	spin_lock_irqsave(&dev->req_lock, flags);
	/*
	 * this freelist can be empty if an interrupt triggered disconnect()
	 * and reconfigured the gadget (shutting down this queue) after the
	 * network stack decided to xmit but before we got the spinlock.
	 */
	if (list_empty(&dev->tx_reqs)) {
		spin_unlock_irqrestore(&dev->req_lock, flags);
		return NETDEV_TX_BUSY;
	}

	req = container_of(dev->tx_reqs.next, struct usb_request, list);
	list_del(&req->list);

	/* temporarily stop TX queue when the freelist empties */
	if (list_empty(&dev->tx_reqs))
		netif_stop_queue(net);
	spin_unlock_irqrestore(&dev->req_lock, flags);

	/* no buffer copies needed, unless the network stack did it
	 * or the hardware can't use skb buffers.
	 * or there's not enough space for extra headers we need
	 */
	if (dev->wrap) {
		unsigned long	flags;

		spin_lock_irqsave(&dev->lock, flags);
		if (dev->port_usb)
			skb = dev->wrap(dev->port_usb, skb);
		spin_unlock_irqrestore(&dev->lock, flags);
		if (!skb)
			goto drop;

		length = skb->len;
	}
	req->buf = skb->data;
	req->context = skb;
	req->complete = tx_complete;

	/* NCM requires no zlp if transfer is dwNtbInMaxSize */
	if (dev->port_usb->is_fixed &&
	    length == dev->port_usb->fixed_in_len &&
	    (length % in->maxpacket) == 0)
		req->zero = 0;
	else
		req->zero = 1;

	/* use zlp framing on tx for strict CDC-Ether conformance,
	 * though any robust network rx path ignores extra padding.
	 * and some hardware doesn't like to write zlps.
	 */
	if (req->zero && !dev->zlp && (length % in->maxpacket) == 0)
		length++;

	req->length = length;

	/* throttle high/super speed IRQ rate back slightly */
	if (gadget_is_dualspeed(dev->gadget))
		req->no_interrupt = (dev->gadget->speed == USB_SPEED_HIGH ||
				     dev->gadget->speed == USB_SPEED_SUPER)
			? ((atomic_read(&dev->tx_qlen) % qmult) != 0)
			: 0;

	retval = usb_ep_queue(in, req, GFP_ATOMIC);
	switch (retval) {
	default:
		DBG(dev, "tx queue err %d\n", retval);
		break;
	case 0:
		net->trans_start = jiffies;
		atomic_inc(&dev->tx_qlen);
	}

	if (retval) {
		dev_kfree_skb_any(skb);
drop:
		dev->net->stats.tx_dropped++;
		spin_lock_irqsave(&dev->req_lock, flags);
		if (list_empty(&dev->tx_reqs))
			netif_start_queue(net);
		list_add(&req->list, &dev->tx_reqs);
		spin_unlock_irqrestore(&dev->req_lock, flags);
	}
	return NETDEV_TX_OK;
}
コード例 #28
0
ファイル: kingsun-sir.c プロジェクト: AshishNamdev/linux
/*
 * Function kingsun_net_open (dev)
 *
 *    Network device is taken up. Usually this is done by "ifconfig irda0 up"
 */
static int kingsun_net_open(struct net_device *netdev)
{
	struct kingsun_cb *kingsun = netdev_priv(netdev);
	int err = -ENOMEM;
	char hwname[16];

	/* At this point, urbs are NULL, and skb is NULL (see kingsun_probe) */
	kingsun->receiving = 0;

	/* Initialize for SIR to copy data directly into skb.  */
	kingsun->rx_buff.in_frame = FALSE;
	kingsun->rx_buff.state = OUTSIDE_FRAME;
	kingsun->rx_buff.truesize = IRDA_SKB_MAX_MTU;
	kingsun->rx_buff.skb = dev_alloc_skb(IRDA_SKB_MAX_MTU);
	if (!kingsun->rx_buff.skb)
		goto free_mem;

	skb_reserve(kingsun->rx_buff.skb, 1);
	kingsun->rx_buff.head = kingsun->rx_buff.skb->data;

	kingsun->rx_urb = usb_alloc_urb(0, GFP_KERNEL);
	if (!kingsun->rx_urb)
		goto free_mem;

	kingsun->tx_urb = usb_alloc_urb(0, GFP_KERNEL);
	if (!kingsun->tx_urb)
		goto free_mem;

	/*
	 * Now that everything should be initialized properly,
	 * Open new IrLAP layer instance to take care of us...
	 */
	sprintf(hwname, "usb#%d", kingsun->usbdev->devnum);
	kingsun->irlap = irlap_open(netdev, &kingsun->qos, hwname);
	if (!kingsun->irlap) {
		dev_err(&kingsun->usbdev->dev, "irlap_open failed\n");
		goto free_mem;
	}

	/* Start first reception */
	usb_fill_int_urb(kingsun->rx_urb, kingsun->usbdev,
			  usb_rcvintpipe(kingsun->usbdev, kingsun->ep_in),
			  kingsun->in_buf, kingsun->max_rx,
			  kingsun_rcv_irq, kingsun, 1);
	kingsun->rx_urb->status = 0;
	err = usb_submit_urb(kingsun->rx_urb, GFP_KERNEL);
	if (err) {
		dev_err(&kingsun->usbdev->dev,
			"first urb-submit failed: %d\n", err);
		goto close_irlap;
	}

	netif_start_queue(netdev);

	/* Situation at this point:
	   - all work buffers allocated
	   - urbs allocated and ready to fill
	   - max rx packet known (in max_rx)
	   - unwrap state machine initialized, in state outside of any frame
	   - receive request in progress
	   - IrLAP layer started, about to hand over packets to send
	 */

	return 0;

 close_irlap:
	irlap_close(kingsun->irlap);
 free_mem:
	if (kingsun->tx_urb) {
		usb_free_urb(kingsun->tx_urb);
		kingsun->tx_urb = NULL;
	}
	if (kingsun->rx_urb) {
		usb_free_urb(kingsun->rx_urb);
		kingsun->rx_urb = NULL;
	}
	if (kingsun->rx_buff.skb) {
		kfree_skb(kingsun->rx_buff.skb);
		kingsun->rx_buff.skb = NULL;
		kingsun->rx_buff.head = NULL;
	}
	return err;
}
コード例 #29
0
int usbnet_open (struct net_device *net)
{
	struct usbnet		*dev = netdev_priv(net);
	int			retval;
	struct driver_info	*info = dev->driver_info;

	if ((retval = usb_autopm_get_interface(dev->intf)) < 0) {
		netif_info(dev, ifup, dev->net,
			   "resumption fail (%d) usbnet usb-%s-%s, %s\n",
			   retval,
			   dev->udev->bus->bus_name,
			   dev->udev->devpath,
			   info->description);
		goto done_nopm;
	}

	// put into "known safe" state
	if (info->reset && (retval = info->reset (dev)) < 0) {
		netif_info(dev, ifup, dev->net,
			   "open reset fail (%d) usbnet usb-%s-%s, %s\n",
			   retval,
			   dev->udev->bus->bus_name,
			   dev->udev->devpath,
			   info->description);
		goto done;
	}

	// insist peer be connected
	if (info->check_connect && (retval = info->check_connect (dev)) < 0) {
		netif_dbg(dev, ifup, dev->net, "can't open; %d\n", retval);
		goto done;
	}

	/* start any status interrupt transfer */
	if (dev->interrupt) {
		retval = usb_submit_urb (dev->interrupt, GFP_KERNEL);
		if (retval < 0) {
			netif_err(dev, ifup, dev->net,
				  "intr submit %d\n", retval);
			goto done;
		}
	}

	set_bit(EVENT_DEV_OPEN, &dev->flags);
	netif_start_queue (net);
	netif_info(dev, ifup, dev->net,
		   "open: enable queueing (rx %d, tx %d) mtu %d %s framing\n",
		   (int)RX_QLEN(dev), (int)TX_QLEN(dev),
		   dev->net->mtu,
		   (dev->driver_info->flags & FLAG_FRAMING_NC) ? "NetChip" :
		   (dev->driver_info->flags & FLAG_FRAMING_GL) ? "GeneSys" :
		   (dev->driver_info->flags & FLAG_FRAMING_Z) ? "Zaurus" :
		   (dev->driver_info->flags & FLAG_FRAMING_RN) ? "RNDIS" :
		   (dev->driver_info->flags & FLAG_FRAMING_AX) ? "ASIX" :
		   "simple");

	// delay posting reads until we're fully open
	tasklet_schedule (&dev->bh);
	if (info->manage_power) {
		retval = info->manage_power(dev, 1);
		if (retval < 0)
			goto done;
		usb_autopm_put_interface(dev->intf);
	}
	return retval;

done:
	usb_autopm_put_interface(dev->intf);
done_nopm:
	return retval;
}
コード例 #30
0
ファイル: niit.c プロジェクト: SchallumPierre/fabfi
static int niit_open(struct net_device *dev)
{
	netif_start_queue(dev);
	return 0;
}