static void ether1_xmit_done (struct net_device *dev) { nop_t nop; int caddr, tst; caddr = priv(dev)->tx_tail; again: ether1_readbuffer (dev, &nop, caddr, NOP_SIZE); switch (nop.nop_command & CMD_MASK) { case CMD_TDR: /* special case */ if (ether1_readw(dev, SCB_ADDR, scb_t, scb_cbl_offset, NORMALIRQS) != (unsigned short)I82586_NULL) { ether1_writew(dev, SCB_CMDCUCSTART | SCB_CMDRXSTART, SCB_ADDR, scb_t, scb_command, NORMALIRQS); writeb(CTRL_CA, REG_CONTROL); } priv(dev)->tx_tail = NOP_ADDR; return; case CMD_NOP: if (nop.nop_link == caddr) { if (priv(dev)->initialising == 0) printk (KERN_WARNING "%s: strange command complete with no tx command!\n", dev->name); else priv(dev)->initialising = 0; return; } if (caddr == nop.nop_link) return; caddr = nop.nop_link; goto again; case CMD_TX: if (nop.nop_status & STAT_COMPLETE) break; printk (KERN_ERR "%s: strange command complete without completed command\n", dev->name); priv(dev)->restart = 1; return; default: printk (KERN_WARNING "%s: strange command %d complete! (offset %04X)", dev->name, nop.nop_command & CMD_MASK, caddr); priv(dev)->restart = 1; return; } while (nop.nop_status & STAT_COMPLETE) { if (nop.nop_status & STAT_OK) { priv(dev)->stats.tx_packets ++; priv(dev)->stats.collisions += (nop.nop_status & STAT_COLLISIONS); } else { priv(dev)->stats.tx_errors ++; if (nop.nop_status & STAT_COLLAFTERTX) priv(dev)->stats.collisions ++; if (nop.nop_status & STAT_NOCARRIER) priv(dev)->stats.tx_carrier_errors ++; if (nop.nop_status & STAT_TXLOSTCTS) printk (KERN_WARNING "%s: cts lost\n", dev->name); if (nop.nop_status & STAT_TXSLOWDMA) priv(dev)->stats.tx_fifo_errors ++; if (nop.nop_status & STAT_COLLEXCESSIVE) priv(dev)->stats.collisions += 16; } if (nop.nop_link == caddr) { printk (KERN_ERR "%s: tx buffer chaining error: tx command points to itself\n", dev->name); break; } caddr = nop.nop_link; ether1_readbuffer (dev, &nop, caddr, NOP_SIZE); if ((nop.nop_command & CMD_MASK) != CMD_NOP) { printk (KERN_ERR "%s: tx buffer chaining error: no nop after tx command\n", dev->name); break; } if (caddr == nop.nop_link) break; caddr = nop.nop_link; ether1_readbuffer (dev, &nop, caddr, NOP_SIZE); if ((nop.nop_command & CMD_MASK) != CMD_TX) { printk (KERN_ERR "%s: tx buffer chaining error: no tx command after nop\n", dev->name); break; } } priv(dev)->tx_tail = caddr; caddr = priv(dev)->tx_head; tst = ether1_txalloc (dev, TX_SIZE + TBD_SIZE + NOP_SIZE + ETH_FRAME_LEN); priv(dev)->tx_head = caddr; if (tst != -1) netif_wake_queue(dev); }
static int rtusb_resume(struct usb_interface *intf) { struct net_device *net_dev; #if (defined(WOW_SUPPORT) && defined(RTMP_MAC_USB)) || defined(NEW_WOW_SUPPORT) UCHAR WOWRun; #endif /* (defined(WOW_SUPPORT) && defined(RTMP_MAC_USB)) || defined(NEW_WOW_SUPPORT) */ #ifdef USB_SUPPORT_SELECTIVE_SUSPEND INT pm_usage_cnt; UCHAR Flag; #endif /* USB_SUPPORT_SELECTIVE_SUSPEND */ VOID *pAd = usb_get_intfdata(intf); DBGPRINT(RT_DEBUG_TRACE, ("%s()=>\n", __func__)); if (!RTMP_TEST_FLAG((PRTMP_ADAPTER)pAd, fRTMP_ADAPTER_START_UP)) return 0; #ifdef RESUME_WITH_USB_RESET_SUPPORT if (last_pm_cnt != os_get_sync_anchor()) { DBGPRINT(RT_DEBUG_ERROR, ("real suspend before\n")); return 0; } #endif /* RESUME_WITH_USB_RESET_SUPPORT */ #ifdef USB_SUPPORT_SELECTIVE_SUSPEND #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,32) pm_usage_cnt = atomic_read(&intf->pm_usage_cnt); #else pm_usage_cnt = intf->pm_usage_cnt; #endif if(pm_usage_cnt <= 0) usb_autopm_get_interface(intf); RTMP_DRIVER_ADAPTER_RT28XX_CMD_RADIO_ON(pAd); DBGPRINT(RT_DEBUG_ERROR, ("%s():=>autosuspend\n", __func__)); return 0; #endif /* USB_SUPPORT_SELECTIVE_SUSPEND */ #if (defined(WOW_SUPPORT) && defined(RTMP_MAC_USB)) || defined(NEW_WOW_SUPPORT) RTMP_DRIVER_ADAPTER_RT28XX_WOW_RUNSTATUS(pAd, &WOWRun); if (WOWRun) RTMP_DRIVER_ADAPTER_RT28XX_WOW_DISABLE(pAd); else #endif /* (defined(WOW_SUPPORT) && defined(RTMP_MAC_USB)) || defined(NEW_WOW_SUPPORT) */ { DBGPRINT(RT_DEBUG_ERROR, ("%s :radio_on \n", __func__)); RTMP_DRIVER_ADAPTER_RT28XX_CMD_RADIO_ON(pAd); RTMP_DRIVER_NET_DEV_GET(pAd, &net_dev); netif_device_attach(net_dev); netif_start_queue(net_dev); netif_carrier_on(net_dev); netif_wake_queue(net_dev); } RTMP_DRIVER_USB_RESUME(pAd); DBGPRINT(RT_DEBUG_TRACE, ("<=%s()\n", __func__)); return 0; }
static void el_interrupt(int irq, void *dev_id, struct pt_regs *regs) { struct net_device *dev = dev_id; struct net_local *lp; int ioaddr; int axsr; /* Aux. status reg. */ ioaddr = dev->base_addr; lp = (struct net_local *)dev->priv; spin_lock(&lp->lock); /* * What happened ? */ axsr = inb(AX_STATUS); /* * Log it */ if (el_debug > 3) printk(KERN_DEBUG "%s: el_interrupt() aux=%#02x", dev->name, axsr); if(lp->loading==1 && !lp->txing) printk(KERN_WARNING "%s: Inconsistent state loading while not in tx\n", dev->name); if (lp->txing) { /* * Board in transmit mode. May be loading. If we are * loading we shouldn't have got this. */ int txsr = inb(TX_STATUS); if(lp->loading==1) { if(el_debug > 2) { printk(KERN_DEBUG "%s: Interrupt while loading [", dev->name); printk(KERN_DEBUG " txsr=%02x gp=%04x rp=%04x]\n", txsr, inw(GP_LOW),inw(RX_LOW)); } lp->loading=2; /* Force a reload */ spin_unlock(&lp->lock); return; } if (el_debug > 6) printk(KERN_DEBUG " txsr=%02x gp=%04x rp=%04x", txsr, inw(GP_LOW),inw(RX_LOW)); if ((axsr & 0x80) && (txsr & TX_READY) == 0) { /* * FIXME: is there a logic to whether to keep on trying or * reset immediately ? */ if(el_debug>1) printk(KERN_DEBUG "%s: Unusual interrupt during Tx, txsr=%02x axsr=%02x" " gp=%03x rp=%03x.\n", dev->name, txsr, axsr, inw(ioaddr + EL1_DATAPTR), inw(ioaddr + EL1_RXPTR)); lp->txing = 0; netif_wake_queue(dev); } else if (txsr & TX_16COLLISIONS) { /* * Timed out */ if (el_debug) printk (KERN_DEBUG "%s: Transmit failed 16 times, Ethernet jammed?\n",dev->name); outb(AX_SYS, AX_CMD); lp->txing = 0; lp->stats.tx_aborted_errors++; netif_wake_queue(dev); } else if (txsr & TX_COLLISION) { /* * Retrigger xmit. */ if (el_debug > 6) printk(KERN_DEBUG " retransmitting after a collision.\n"); /* * Poor little chip can't reset its own start pointer */ outb(AX_SYS, AX_CMD); outw(lp->tx_pkt_start, GP_LOW); outb(AX_XMIT, AX_CMD); lp->stats.collisions++; spin_unlock(&lp->lock); return; } else { /* * It worked.. we will now fall through and receive */ lp->stats.tx_packets++; if (el_debug > 6) printk(KERN_DEBUG " Tx succeeded %s\n", (txsr & TX_RDY) ? "." : "but tx is busy!"); /* * This is safe the interrupt is atomic WRT itself. */ lp->txing = 0; netif_wake_queue(dev); /* In case more to transmit */ } } else { /* * In receive mode. */ int rxsr = inb(RX_STATUS); if (el_debug > 5) printk(KERN_DEBUG " rxsr=%02x txsr=%02x rp=%04x", rxsr, inb(TX_STATUS),inw(RX_LOW)); /* * Just reading rx_status fixes most errors. */ if (rxsr & RX_MISSED) lp->stats.rx_missed_errors++; else if (rxsr & RX_RUNT) { /* Handled to avoid board lock-up. */ lp->stats.rx_length_errors++; if (el_debug > 5) printk(KERN_DEBUG " runt.\n"); } else if (rxsr & RX_GOOD) { /* * Receive worked. */ el_receive(dev); } else { /* * Nothing? Something is broken! */ if (el_debug > 2) printk(KERN_DEBUG "%s: No packet seen, rxsr=%02x **resetting 3c501***\n", dev->name, rxsr); el_reset(dev); } if (el_debug > 3) printk(KERN_DEBUG ".\n"); } /* * Move into receive mode */ outb(AX_RX, AX_CMD); outw(0x00, RX_BUF_CLR); inb(RX_STATUS); /* Be certain that interrupts are cleared. */ inb(TX_STATUS); spin_unlock(&lp->lock); return; }
/* work that cannot be done in interrupt context uses keventd. * * NOTE: with 2.5 we could do more of this using completion callbacks, * especially now that control transfers can be queued. */ static void kevent (struct work_struct *work) { struct usbnet *dev = container_of(work, struct usbnet, kevent); int status; /* usb_clear_halt() needs a thread context */ if (test_bit (EVENT_TX_HALT, &dev->flags)) { unlink_urbs (dev, &dev->txq); status = usb_autopm_get_interface(dev->intf); if (status < 0) goto fail_pipe; status = usb_clear_halt (dev->udev, dev->out); usb_autopm_put_interface(dev->intf); if (status < 0 && status != -EPIPE && status != -ESHUTDOWN) { if (netif_msg_tx_err (dev)) fail_pipe: netdev_err(dev->net, "can't clear tx halt, status %d\n", status); } else { clear_bit (EVENT_TX_HALT, &dev->flags); if (status != -ESHUTDOWN) netif_wake_queue (dev->net); } } if (test_bit (EVENT_RX_HALT, &dev->flags)) { unlink_urbs (dev, &dev->rxq); status = usb_autopm_get_interface(dev->intf); if (status < 0) goto fail_halt; status = usb_clear_halt (dev->udev, dev->in); usb_autopm_put_interface(dev->intf); if (status < 0 && status != -EPIPE && status != -ESHUTDOWN) { if (netif_msg_rx_err (dev)) fail_halt: netdev_err(dev->net, "can't clear rx halt, status %d\n", status); } else { clear_bit (EVENT_RX_HALT, &dev->flags); tasklet_schedule (&dev->bh); } } /* tasklet could resubmit itself forever if memory is tight */ if (test_bit (EVENT_RX_MEMORY, &dev->flags)) { struct urb *urb = NULL; int resched = 1; if (netif_running (dev->net)) urb = usb_alloc_urb (0, GFP_KERNEL); else clear_bit (EVENT_RX_MEMORY, &dev->flags); if (urb != NULL) { clear_bit (EVENT_RX_MEMORY, &dev->flags); status = usb_autopm_get_interface(dev->intf); if (status < 0) { usb_free_urb(urb); goto fail_lowmem; } if (rx_submit (dev, urb, GFP_KERNEL) == -ENOLINK) resched = 0; usb_autopm_put_interface(dev->intf); fail_lowmem: if (resched) tasklet_schedule (&dev->bh); } } if (test_bit (EVENT_LINK_RESET, &dev->flags)) { struct driver_info *info = dev->driver_info; int retval = 0; clear_bit (EVENT_LINK_RESET, &dev->flags); status = usb_autopm_get_interface(dev->intf); if (status < 0) goto skip_reset; if(info->link_reset && (retval = info->link_reset(dev)) < 0) { usb_autopm_put_interface(dev->intf); skip_reset: netdev_info(dev->net, "link reset failed (%d) usbnet usb-%s-%s, %s\n", retval, dev->udev->bus->bus_name, dev->udev->devpath, info->description); } else { usb_autopm_put_interface(dev->intf); } } if (dev->flags) netdev_dbg(dev->net, "kevent done, flags = 0x%lx\n", dev->flags); }
static inline void x25_asy_unlock(struct x25_asy *sl) { netif_wake_queue(sl->dev); }
irqreturn_t islpci_interrupt(int irq, void *config, struct pt_regs *regs) { u32 reg; islpci_private *priv = config; struct net_device *ndev = priv->ndev; void __iomem *device = priv->device_base; int powerstate = ISL38XX_PSM_POWERSAVE_STATE; /* lock the interrupt handler */ spin_lock(&priv->slock); /* received an interrupt request on a shared IRQ line * first check whether the device is in sleep mode */ reg = readl(device + ISL38XX_CTRL_STAT_REG); if (reg & ISL38XX_CTRL_STAT_SLEEPMODE) /* device is in sleep mode, IRQ was generated by someone else */ { #if VERBOSE > SHOW_ERROR_MESSAGES DEBUG(SHOW_TRACING, "Assuming someone else called the IRQ\n"); #endif spin_unlock(&priv->slock); return IRQ_NONE; } /* check whether there is any source of interrupt on the device */ reg = readl(device + ISL38XX_INT_IDENT_REG); /* also check the contents of the Interrupt Enable Register, because this * will filter out interrupt sources from other devices on the same irq ! */ reg &= readl(device + ISL38XX_INT_EN_REG); reg &= ISL38XX_INT_SOURCES; if (reg != 0) { if (islpci_get_state(priv) != PRV_STATE_SLEEP) powerstate = ISL38XX_PSM_ACTIVE_STATE; /* reset the request bits in the Identification register */ isl38xx_w32_flush(device, reg, ISL38XX_INT_ACK_REG); #if VERBOSE > SHOW_ERROR_MESSAGES DEBUG(SHOW_FUNCTION_CALLS, "IRQ: Identification register 0x%p 0x%x \n", device, reg); #endif /* check for each bit in the register separately */ if (reg & ISL38XX_INT_IDENT_UPDATE) { #if VERBOSE > SHOW_ERROR_MESSAGES /* Queue has been updated */ DEBUG(SHOW_TRACING, "IRQ: Update flag \n"); DEBUG(SHOW_QUEUE_INDEXES, "CB drv Qs: [%i][%i][%i][%i][%i][%i]\n", le32_to_cpu(priv->control_block-> driver_curr_frag[0]), le32_to_cpu(priv->control_block-> driver_curr_frag[1]), le32_to_cpu(priv->control_block-> driver_curr_frag[2]), le32_to_cpu(priv->control_block-> driver_curr_frag[3]), le32_to_cpu(priv->control_block-> driver_curr_frag[4]), le32_to_cpu(priv->control_block-> driver_curr_frag[5]) ); DEBUG(SHOW_QUEUE_INDEXES, "CB dev Qs: [%i][%i][%i][%i][%i][%i]\n", le32_to_cpu(priv->control_block-> device_curr_frag[0]), le32_to_cpu(priv->control_block-> device_curr_frag[1]), le32_to_cpu(priv->control_block-> device_curr_frag[2]), le32_to_cpu(priv->control_block-> device_curr_frag[3]), le32_to_cpu(priv->control_block-> device_curr_frag[4]), le32_to_cpu(priv->control_block-> device_curr_frag[5]) ); #endif /* cleanup the data low transmit queue */ islpci_eth_cleanup_transmit(priv, priv->control_block); /* device is in active state, update the * powerstate flag if necessary */ powerstate = ISL38XX_PSM_ACTIVE_STATE; /* check all three queues in priority order * call the PIMFOR receive function until the * queue is empty */ if (isl38xx_in_queue(priv->control_block, ISL38XX_CB_RX_MGMTQ) != 0) { #if VERBOSE > SHOW_ERROR_MESSAGES DEBUG(SHOW_TRACING, "Received frame in Management Queue\n"); #endif islpci_mgt_receive(ndev); islpci_mgt_cleanup_transmit(ndev); /* Refill slots in receive queue */ islpci_mgmt_rx_fill(ndev); /* no need to trigger the device, next islpci_mgt_transaction does it */ } while (isl38xx_in_queue(priv->control_block, ISL38XX_CB_RX_DATA_LQ) != 0) { #if VERBOSE > SHOW_ERROR_MESSAGES DEBUG(SHOW_TRACING, "Received frame in Data Low Queue \n"); #endif islpci_eth_receive(priv); } /* check whether the data transmit queues were full */ if (priv->data_low_tx_full) { /* check whether the transmit is not full anymore */ if (ISL38XX_CB_TX_QSIZE - isl38xx_in_queue(priv->control_block, ISL38XX_CB_TX_DATA_LQ) >= ISL38XX_MIN_QTHRESHOLD) { /* nope, the driver is ready for more network frames */ netif_wake_queue(priv->ndev); /* reset the full flag */ priv->data_low_tx_full = 0; } } } if (reg & ISL38XX_INT_IDENT_INIT) { /* Device has been initialized */ #if VERBOSE > SHOW_ERROR_MESSAGES DEBUG(SHOW_TRACING, "IRQ: Init flag, device initialized \n"); #endif wake_up(&priv->reset_done); } if (reg & ISL38XX_INT_IDENT_SLEEP) { /* Device intends to move to powersave state */ #if VERBOSE > SHOW_ERROR_MESSAGES DEBUG(SHOW_TRACING, "IRQ: Sleep flag \n"); #endif isl38xx_handle_sleep_request(priv->control_block, &powerstate, priv->device_base); } if (reg & ISL38XX_INT_IDENT_WAKEUP) { /* Device has been woken up to active state */ #if VERBOSE > SHOW_ERROR_MESSAGES DEBUG(SHOW_TRACING, "IRQ: Wakeup flag \n"); #endif isl38xx_handle_wakeup(priv->control_block, &powerstate, priv->device_base); } } else { #if VERBOSE > SHOW_ERROR_MESSAGES DEBUG(SHOW_TRACING, "Assuming someone else called the IRQ\n"); #endif spin_unlock(&priv->slock); return IRQ_NONE; } /* sleep -> ready */ if (islpci_get_state(priv) == PRV_STATE_SLEEP && powerstate == ISL38XX_PSM_ACTIVE_STATE) islpci_set_state(priv, PRV_STATE_READY); /* !sleep -> sleep */ if (islpci_get_state(priv) != PRV_STATE_SLEEP && powerstate == ISL38XX_PSM_POWERSAVE_STATE) islpci_set_state(priv, PRV_STATE_SLEEP); /* unlock the interrupt handler */ spin_unlock(&priv->slock); return IRQ_HANDLED; }
static irqreturn_t elp_interrupt(int irq, void *dev_id) { int len; int dlen; int icount = 0; struct net_device *dev = dev_id; elp_device *adapter = netdev_priv(dev); unsigned long timeout; spin_lock(&adapter->lock); do { /* * has a DMA transfer finished? */ if (inb_status(dev->base_addr) & DONE) { if (!adapter->dmaing) pr_warning("%s: phantom DMA completed\n", dev->name); if (elp_debug >= 3) pr_debug("%s: %s DMA complete, status %02x\n", dev->name, adapter->current_dma.direction ? "tx" : "rx", inb_status(dev->base_addr)); outb_control(adapter->hcr_val & ~(DMAE | TCEN | DIR), dev); if (adapter->current_dma.direction) { dev_kfree_skb_irq(adapter->current_dma.skb); } else { struct sk_buff *skb = adapter->current_dma.skb; if (skb) { if (adapter->current_dma.target) { /* have already done the skb_put() */ memcpy(adapter->current_dma.target, adapter->dma_buffer, adapter->current_dma.length); } skb->protocol = eth_type_trans(skb,dev); dev->stats.rx_bytes += skb->len; netif_rx(skb); } } adapter->dmaing = 0; if (adapter->rx_backlog.in != adapter->rx_backlog.out) { int t = adapter->rx_backlog.length[adapter->rx_backlog.out]; adapter->rx_backlog.out = backlog_next(adapter->rx_backlog.out); if (elp_debug >= 2) pr_debug("%s: receiving backlogged packet (%d)\n", dev->name, t); receive_packet(dev, t); } else { adapter->busy = 0; } } else { /* has one timed out? */ check_3c505_dma(dev); } /* * receive a PCB from the adapter */ timeout = jiffies + 3*HZ/100; while ((inb_status(dev->base_addr) & ACRF) != 0 && time_before(jiffies, timeout)) { if (receive_pcb(dev, &adapter->irx_pcb)) { switch (adapter->irx_pcb.command) { case 0: break; /* * received a packet - this must be handled fast */ case 0xff: case CMD_RECEIVE_PACKET_COMPLETE: /* if the device isn't open, don't pass packets up the stack */ if (!netif_running(dev)) break; len = adapter->irx_pcb.data.rcv_resp.pkt_len; dlen = adapter->irx_pcb.data.rcv_resp.buf_len; if (adapter->irx_pcb.data.rcv_resp.timeout != 0) { pr_err("%s: interrupt - packet not received correctly\n", dev->name); } else { if (elp_debug >= 3) { pr_debug("%s: interrupt - packet received of length %i (%i)\n", dev->name, len, dlen); } if (adapter->irx_pcb.command == 0xff) { if (elp_debug >= 2) pr_debug("%s: adding packet to backlog (len = %d)\n", dev->name, dlen); adapter->rx_backlog.length[adapter->rx_backlog.in] = dlen; adapter->rx_backlog.in = backlog_next(adapter->rx_backlog.in); } else { receive_packet(dev, dlen); } if (elp_debug >= 3) pr_debug("%s: packet received\n", dev->name); } break; /* * 82586 configured correctly */ case CMD_CONFIGURE_82586_RESPONSE: adapter->got[CMD_CONFIGURE_82586] = 1; if (elp_debug >= 3) pr_debug("%s: interrupt - configure response received\n", dev->name); break; /* * Adapter memory configuration */ case CMD_CONFIGURE_ADAPTER_RESPONSE: adapter->got[CMD_CONFIGURE_ADAPTER_MEMORY] = 1; if (elp_debug >= 3) pr_debug("%s: Adapter memory configuration %s.\n", dev->name, adapter->irx_pcb.data.failed ? "failed" : "succeeded"); break; /* * Multicast list loading */ case CMD_LOAD_MULTICAST_RESPONSE: adapter->got[CMD_LOAD_MULTICAST_LIST] = 1; if (elp_debug >= 3) pr_debug("%s: Multicast address list loading %s.\n", dev->name, adapter->irx_pcb.data.failed ? "failed" : "succeeded"); break; /* * Station address setting */ case CMD_SET_ADDRESS_RESPONSE: adapter->got[CMD_SET_STATION_ADDRESS] = 1; if (elp_debug >= 3) pr_debug("%s: Ethernet address setting %s.\n", dev->name, adapter->irx_pcb.data.failed ? "failed" : "succeeded"); break; /* * received board statistics */ case CMD_NETWORK_STATISTICS_RESPONSE: dev->stats.rx_packets += adapter->irx_pcb.data.netstat.tot_recv; dev->stats.tx_packets += adapter->irx_pcb.data.netstat.tot_xmit; dev->stats.rx_crc_errors += adapter->irx_pcb.data.netstat.err_CRC; dev->stats.rx_frame_errors += adapter->irx_pcb.data.netstat.err_align; dev->stats.rx_fifo_errors += adapter->irx_pcb.data.netstat.err_ovrrun; dev->stats.rx_over_errors += adapter->irx_pcb.data.netstat.err_res; adapter->got[CMD_NETWORK_STATISTICS] = 1; if (elp_debug >= 3) pr_debug("%s: interrupt - statistics response received\n", dev->name); break; /* * sent a packet */ case CMD_TRANSMIT_PACKET_COMPLETE: if (elp_debug >= 3) pr_debug("%s: interrupt - packet sent\n", dev->name); if (!netif_running(dev)) break; switch (adapter->irx_pcb.data.xmit_resp.c_stat) { case 0xffff: dev->stats.tx_aborted_errors++; pr_info("%s: transmit timed out, network cable problem?\n", dev->name); break; case 0xfffe: dev->stats.tx_fifo_errors++; pr_info("%s: transmit timed out, FIFO underrun\n", dev->name); break; } netif_wake_queue(dev); break; /* * some unknown PCB */ default: pr_debug("%s: unknown PCB received - %2.2x\n", dev->name, adapter->irx_pcb.command); break; } } else { pr_warning("%s: failed to read PCB on interrupt\n", dev->name); adapter_reset(dev); } } } while (icount++ < 5 && (inb_status(dev->base_addr) & (ACRF | DONE))); prime_rx(dev); /* * indicate no longer in interrupt routine */ spin_unlock(&adapter->lock); return IRQ_HANDLED; }
void xenvif_notify_tx_completion(struct xenvif *vif) { if (netif_queue_stopped(vif->dev) && xenvif_rx_schedulable(vif)) netif_wake_queue(vif->dev); }
/* work that cannot be done in interrupt context uses keventd. * * NOTE: with 2.5 we could do more of this using completion callbacks, * especially now that control transfers can be queued. */ static void kevent (struct work_struct *work) { struct usbnet *dev = container_of(work, struct usbnet, kevent); int status; /* usb_clear_halt() needs a thread context */ if (test_bit (EVENT_TX_HALT, &dev->flags)) { unlink_urbs (dev, &dev->txq); status = usb_autopm_get_interface(dev->intf); if (status < 0) goto fail_pipe; status = usb_clear_halt (dev->udev, dev->out); usb_autopm_put_interface(dev->intf); if (status < 0 && status != -EPIPE && status != -ESHUTDOWN) { if (netif_msg_tx_err (dev)) fail_pipe: netdev_err(dev->net, "can't clear tx halt, status %d\n", status); } else { clear_bit (EVENT_TX_HALT, &dev->flags); if (status != -ESHUTDOWN) netif_wake_queue (dev->net); } } if (test_bit (EVENT_RX_HALT, &dev->flags)) { //HTC+++ //lock cpu perf usbnet_lock_perf(); //queue usbnet_unlock_perf_delayed_work usbnet_rx_len = 0; schedule_delayed_work(&usbnet_unlock_perf_delayed_work, msecs_to_jiffies(PM_QOS_USBNET_PERF_UNLOCK_TIMER)); pr_info("%s(%d) [USBNET] EVENT_RX_HALT unlink_urbs !!!\n", __func__, __LINE__); pr_info("%s(%d) [USBNET] dev->rxq.qlen:%d\n", __func__, __LINE__, dev->rxq.qlen); //HTC--- unlink_urbs (dev, &dev->rxq); status = usb_autopm_get_interface(dev->intf); if (status < 0) goto fail_halt; status = usb_clear_halt (dev->udev, dev->in); //HTC+++ pr_info("%s(%d) [USBNET] EVENT_RX_HALT usb_clear_halt:%d !!!\n", __func__, __LINE__, status); //HTC--- usb_autopm_put_interface(dev->intf); if (status < 0 && status != -EPIPE && status != -ESHUTDOWN) { if (netif_msg_rx_err (dev)) fail_halt: netdev_err(dev->net, "can't clear rx halt, status %d\n", status); } else { //HTC+++ pr_info("%s(%d) [USBNET] clear_bit EVENT_RX_HALT !!!\n", __func__, __LINE__); //HTC--- clear_bit (EVENT_RX_HALT, &dev->flags); tasklet_schedule (&dev->bh); } } /* tasklet could resubmit itself forever if memory is tight */ if (test_bit (EVENT_RX_MEMORY, &dev->flags)) { struct urb *urb = NULL; int resched = 1; if (netif_running (dev->net)) urb = usb_alloc_urb (0, GFP_KERNEL); else clear_bit (EVENT_RX_MEMORY, &dev->flags); if (urb != NULL) { clear_bit (EVENT_RX_MEMORY, &dev->flags); status = usb_autopm_get_interface(dev->intf); if (status < 0) { usb_free_urb(urb); goto fail_lowmem; } if (rx_submit (dev, urb, GFP_KERNEL) == -ENOLINK) resched = 0; usb_autopm_put_interface(dev->intf); fail_lowmem: if (resched) tasklet_schedule (&dev->bh); } } if (test_bit (EVENT_LINK_RESET, &dev->flags)) { struct driver_info *info = dev->driver_info; int retval = 0; clear_bit (EVENT_LINK_RESET, &dev->flags); status = usb_autopm_get_interface(dev->intf); if (status < 0) goto skip_reset; if(info->link_reset && (retval = info->link_reset(dev)) < 0) { usb_autopm_put_interface(dev->intf); skip_reset: netdev_info(dev->net, "link reset failed (%d) usbnet usb-%s-%s, %s\n", retval, dev->udev->bus->bus_name, dev->udev->devpath, info->description); } else { usb_autopm_put_interface(dev->intf); } } if (dev->flags) netdev_dbg(dev->net, "kevent done, flags = 0x%lx\n", dev->flags); }
static void ri_tasklet(unsigned long dev) { struct net_device *_dev = (struct net_device *)dev; struct ifb_private *dp = netdev_priv(_dev); struct netdev_queue *txq; struct sk_buff *skb; txq = netdev_get_tx_queue(_dev, 0); if ((skb = skb_peek(&dp->tq)) == NULL) { if (__netif_tx_trylock(txq)) { skb_queue_splice_tail_init(&dp->rq, &dp->tq); __netif_tx_unlock(txq); } else { /* reschedule */ goto resched; } } while ((skb = __skb_dequeue(&dp->tq)) != NULL) { u32 from = G_TC_FROM(skb->tc_verd); skb->tc_verd = 0; skb->tc_verd = SET_TC_NCLS(skb->tc_verd); u64_stats_update_begin(&dp->tsync); dp->tx_packets++; dp->tx_bytes += skb->len; u64_stats_update_end(&dp->tsync); rcu_read_lock(); skb->dev = dev_get_by_index_rcu(&init_net, skb->skb_iif); if (!skb->dev) { rcu_read_unlock(); dev_kfree_skb(skb); _dev->stats.tx_dropped++; if (skb_queue_len(&dp->tq) != 0) goto resched; break; } rcu_read_unlock(); skb->skb_iif = _dev->ifindex; if (from & AT_EGRESS) { dev_queue_xmit(skb); } else if (from & AT_INGRESS) { skb_pull(skb, skb->dev->hard_header_len); netif_receive_skb(skb); } else BUG(); } if (__netif_tx_trylock(txq)) { if ((skb = skb_peek(&dp->rq)) == NULL) { dp->tasklet_pending = 0; if (netif_queue_stopped(_dev)) netif_wake_queue(_dev); } else { __netif_tx_unlock(txq); goto resched; } __netif_tx_unlock(txq); } else { resched: dp->tasklet_pending = 1; tasklet_schedule(&dp->ifb_tasklet); } }
/* ---------------------------------------------------------------------------- mace_interrupt The interrupt handler. ---------------------------------------------------------------------------- */ static irqreturn_t mace_interrupt(int irq, void *dev_id) { struct net_device *dev = (struct net_device *) dev_id; mace_private *lp = netdev_priv(dev); unsigned int ioaddr; int status; int IntrCnt = MACE_MAX_IR_ITERATIONS; if (dev == NULL) { pr_debug("mace_interrupt(): irq 0x%X for unknown device.\n", irq); return IRQ_NONE; } ioaddr = dev->base_addr; if (lp->tx_irq_disabled) { const char *msg; if (lp->tx_irq_disabled) msg = "Interrupt with tx_irq_disabled"; else msg = "Re-entering the interrupt handler"; netdev_notice(dev, "%s [isr=%02X, imr=%02X]\n", msg, inb(ioaddr + AM2150_MACE_BASE + MACE_IR), inb(ioaddr + AM2150_MACE_BASE + MACE_IMR)); /* WARNING: MACE_IR has been read! */ return IRQ_NONE; } if (!netif_device_present(dev)) { netdev_dbg(dev, "interrupt from dead card\n"); return IRQ_NONE; } do { /* WARNING: MACE_IR is a READ/CLEAR port! */ status = inb(ioaddr + AM2150_MACE_BASE + MACE_IR); pr_debug("mace_interrupt: irq 0x%X status 0x%X.\n", irq, status); if (status & MACE_IR_RCVINT) { mace_rx(dev, MACE_MAX_RX_ITERATIONS); } if (status & MACE_IR_XMTINT) { unsigned char fifofc; unsigned char xmtrc; unsigned char xmtfs; fifofc = inb(ioaddr + AM2150_MACE_BASE + MACE_FIFOFC); if ((fifofc & MACE_FIFOFC_XMTFC)==0) { lp->linux_stats.tx_errors++; outb(0xFF, ioaddr + AM2150_XMT_SKIP); } /* Transmit Retry Count (XMTRC, reg 4) */ xmtrc = inb(ioaddr + AM2150_MACE_BASE + MACE_XMTRC); if (xmtrc & MACE_XMTRC_EXDEF) lp->mace_stats.exdef++; lp->mace_stats.xmtrc += (xmtrc & MACE_XMTRC_XMTRC); if ( (xmtfs = inb(ioaddr + AM2150_MACE_BASE + MACE_XMTFS)) & MACE_XMTFS_XMTSV /* Transmit Status Valid */ ) { lp->mace_stats.xmtsv++; if (xmtfs & ~MACE_XMTFS_XMTSV) { if (xmtfs & MACE_XMTFS_UFLO) { /* Underflow. Indicates that the Transmit FIFO emptied before the end of frame was reached. */ lp->mace_stats.uflo++; } if (xmtfs & MACE_XMTFS_LCOL) { /* Late Collision */ lp->mace_stats.lcol++; } if (xmtfs & MACE_XMTFS_MORE) { /* MORE than one retry was needed */ lp->mace_stats.more++; } if (xmtfs & MACE_XMTFS_ONE) { /* Exactly ONE retry occurred */ lp->mace_stats.one++; } if (xmtfs & MACE_XMTFS_DEFER) { /* Transmission was defered */ lp->mace_stats.defer++; } if (xmtfs & MACE_XMTFS_LCAR) { /* Loss of carrier */ lp->mace_stats.lcar++; } if (xmtfs & MACE_XMTFS_RTRY) { /* Retry error: transmit aborted after 16 attempts */ lp->mace_stats.rtry++; } } /* if (xmtfs & ~MACE_XMTFS_XMTSV) */ } /* if (xmtfs & MACE_XMTFS_XMTSV) */ lp->linux_stats.tx_packets++; lp->tx_free_frames++; netif_wake_queue(dev); } /* if (status & MACE_IR_XMTINT) */ if (status & ~MACE_IMR_DEFAULT & ~MACE_IR_RCVINT & ~MACE_IR_XMTINT) { if (status & MACE_IR_JAB) { /* Jabber Error. Excessive transmit duration (20-150ms). */ lp->mace_stats.jab++; } if (status & MACE_IR_BABL) { /* Babble Error. >1518 bytes transmitted. */ lp->mace_stats.babl++; } if (status & MACE_IR_CERR) { /* Collision Error. CERR indicates the absence of the Signal Quality Error Test message after a packet transmission. */ lp->mace_stats.cerr++; } if (status & MACE_IR_RCVCCO) { /* Receive Collision Count Overflow; */ lp->mace_stats.rcvcco++; } if (status & MACE_IR_RNTPCO) { /* Runt Packet Count Overflow */ lp->mace_stats.rntpco++; } if (status & MACE_IR_MPCO) { /* Missed Packet Count Overflow */ lp->mace_stats.mpco++; } } /* if (status & ~MACE_IMR_DEFAULT & ~MACE_IR_RCVINT & ~MACE_IR_XMTINT) */ } while ((status & ~MACE_IMR_DEFAULT) && (--IntrCnt)); return IRQ_HANDLED; } /* mace_interrupt */
static irqreturn_t ariadne_interrupt(int irq, void *data) { struct net_device *dev = (struct net_device *)data; volatile struct Am79C960 *lance = (struct Am79C960 *)dev->base_addr; struct ariadne_private *priv; int csr0, boguscnt; int handled = 0; lance->RAP = CSR0; /* PCnet-ISA Controller Status */ if (!(lance->RDP & INTR)) /* Check if any interrupt has been */ return IRQ_NONE; /* generated by the board */ priv = netdev_priv(dev); boguscnt = 10; while ((csr0 = lance->RDP) & (ERR | RINT | TINT) && --boguscnt >= 0) { /* Acknowledge all of the current interrupt sources ASAP */ lance->RDP = csr0 & ~(INEA | TDMD | STOP | STRT | INIT); #ifdef DEBUG if (ariadne_debug > 5) { netdev_dbg(dev, "interrupt csr0=%#02x new csr=%#02x [", csr0, lance->RDP); if (csr0 & INTR) pr_cont(" INTR"); if (csr0 & INEA) pr_cont(" INEA"); if (csr0 & RXON) pr_cont(" RXON"); if (csr0 & TXON) pr_cont(" TXON"); if (csr0 & TDMD) pr_cont(" TDMD"); if (csr0 & STOP) pr_cont(" STOP"); if (csr0 & STRT) pr_cont(" STRT"); if (csr0 & INIT) pr_cont(" INIT"); if (csr0 & ERR) pr_cont(" ERR"); if (csr0 & BABL) pr_cont(" BABL"); if (csr0 & CERR) pr_cont(" CERR"); if (csr0 & MISS) pr_cont(" MISS"); if (csr0 & MERR) pr_cont(" MERR"); if (csr0 & RINT) pr_cont(" RINT"); if (csr0 & TINT) pr_cont(" TINT"); if (csr0 & IDON) pr_cont(" IDON"); pr_cont(" ]\n"); } #endif if (csr0 & RINT) { /* Rx interrupt */ handled = 1; ariadne_rx(dev); } if (csr0 & TINT) { /* Tx-done interrupt */ int dirty_tx = priv->dirty_tx; handled = 1; while (dirty_tx < priv->cur_tx) { int entry = dirty_tx % TX_RING_SIZE; int status = lowb(priv->tx_ring[entry]->TMD1); if (status & TF_OWN) break; /* It still hasn't been Txed */ priv->tx_ring[entry]->TMD1 &= 0xff00; if (status & TF_ERR) { /* There was an major error, log it */ int err_status = priv->tx_ring[entry]->TMD3; dev->stats.tx_errors++; if (err_status & EF_RTRY) dev->stats.tx_aborted_errors++; if (err_status & EF_LCAR) dev->stats.tx_carrier_errors++; if (err_status & EF_LCOL) dev->stats.tx_window_errors++; if (err_status & EF_UFLO) { /* Ackk! On FIFO errors the Tx unit is turned off! */ dev->stats.tx_fifo_errors++; /* Remove this verbosity later! */ netdev_err(dev, "Tx FIFO error! Status %04x\n", csr0); /* Restart the chip */ lance->RDP = STRT; } } else { if (status & (TF_MORE | TF_ONE)) dev->stats.collisions++; dev->stats.tx_packets++; } dirty_tx++; } #ifndef final_version if (priv->cur_tx - dirty_tx >= TX_RING_SIZE) { netdev_err(dev, "out-of-sync dirty pointer, %d vs. %d, full=%d\n", dirty_tx, priv->cur_tx, priv->tx_full); dirty_tx += TX_RING_SIZE; } #endif if (priv->tx_full && netif_queue_stopped(dev) && dirty_tx > priv->cur_tx - TX_RING_SIZE + 2) { /* The ring is no longer full */ priv->tx_full = 0; netif_wake_queue(dev); } priv->dirty_tx = dirty_tx; } /* Log misc errors */ if (csr0 & BABL) { handled = 1; dev->stats.tx_errors++; /* Tx babble */ } if (csr0 & MISS) { handled = 1; dev->stats.rx_errors++; /* Missed a Rx frame */ } if (csr0 & MERR) { handled = 1; netdev_err(dev, "Bus master arbitration failure, status %04x\n", csr0); /* Restart the chip */ lance->RDP = STRT; } } /* Clear any other interrupt, and set interrupt enable */ lance->RAP = CSR0; /* PCnet-ISA Controller Status */ lance->RDP = INEA | BABL | CERR | MISS | MERR | IDON; if (ariadne_debug > 4) netdev_dbg(dev, "exiting interrupt, csr%d=%#04x\n", lance->RAP, lance->RDP); return IRQ_RETVAL(handled); }
static void z8530_tx_begin(struct z8530_channel *c) { unsigned long flags; if(c->tx_skb) return; c->tx_skb=c->tx_next_skb; c->tx_next_skb=NULL; c->tx_ptr=c->tx_next_ptr; if(c->tx_skb==NULL) { /* Idle on */ if(c->dma_tx) { flags=claim_dma_lock(); disable_dma(c->txdma); /* * Check if we crapped out. */ if (get_dma_residue(c->txdma)) { c->netdevice->stats.tx_dropped++; c->netdevice->stats.tx_fifo_errors++; } release_dma_lock(flags); } c->txcount=0; } else { c->txcount=c->tx_skb->len; if(c->dma_tx) { /* * FIXME. DMA is broken for the original 8530, * on the older parts we need to set a flag and * wait for a further TX interrupt to fire this * stage off */ flags=claim_dma_lock(); disable_dma(c->txdma); /* * These two are needed by the 8530/85C30 * and must be issued when idling. */ if(c->dev->type!=Z85230) { write_zsctrl(c, RES_Tx_CRC); write_zsctrl(c, RES_EOM_L); } write_zsreg(c, R10, c->regs[10]&~ABUNDER); clear_dma_ff(c->txdma); set_dma_addr(c->txdma, virt_to_bus(c->tx_ptr)); set_dma_count(c->txdma, c->txcount); enable_dma(c->txdma); release_dma_lock(flags); write_zsctrl(c, RES_EOM_L); write_zsreg(c, R5, c->regs[R5]|TxENAB); } else { /* ABUNDER off */ write_zsreg(c, R10, c->regs[10]); write_zsctrl(c, RES_Tx_CRC); while(c->txcount && (read_zsreg(c,R0)&Tx_BUF_EMP)) { write_zsreg(c, R8, *c->tx_ptr++); c->txcount--; } } } /* * Since we emptied tx_skb we can ask for more */ netif_wake_queue(c->netdevice); }
int _netdev_open(struct net_device *pnetdev) { uint status; _adapter *padapter = (_adapter *)rtw_netdev_priv(pnetdev); struct pwrctrl_priv *pwrctrlpriv = &padapter->pwrctrlpriv; RT_TRACE(_module_os_intfs_c_,_drv_info_,("+871x_drv - dev_open\n")); DBG_8192C("+871x_drv - drv_open, bup=%d\n", padapter->bup); if(pwrctrlpriv->ps_flag == _TRUE){ padapter->net_closed = _FALSE; goto netdev_open_normal_process; } if(padapter->bup == _FALSE) { padapter->bDriverStopped = _FALSE; padapter->bSurpriseRemoved = _FALSE; padapter->bCardDisableWOHSM = _FALSE; status = rtw_hal_init(padapter); if (status ==_FAIL) { RT_TRACE(_module_os_intfs_c_,_drv_err_,("rtl871x_hal_init(): Can't init h/w!\n")); goto netdev_open_error; } DBG_8192C("MAC Address = "MAC_FMT"\n", MAC_ARG(pnetdev->dev_addr)); status=rtw_start_drv_threads(padapter); if(status ==_FAIL) { RT_TRACE(_module_os_intfs_c_,_drv_err_,("Initialize driver software resource Failed!\n")); goto netdev_open_error; } if (init_hw_mlme_ext(padapter) == _FAIL) { RT_TRACE(_module_os_intfs_c_,_drv_err_,("can't init mlme_ext_priv\n")); goto netdev_open_error; } #ifdef CONFIG_DRVEXT_MODULE init_drvext(padapter); #endif if(padapter->intf_start) { padapter->intf_start(padapter); } #ifdef CONFIG_PROC_DEBUG #ifndef RTK_DMP_PLATFORM rtw_proc_init_one(pnetdev); #endif #endif #ifdef CONFIG_IOCTL_CFG80211 rtw_cfg80211_init_wiphy(padapter); #endif rtw_led_control(padapter, LED_CTL_NO_LINK); padapter->bup = _TRUE; } padapter->net_closed = _FALSE; _set_timer(&padapter->mlmepriv.dynamic_chk_timer, 2000); if(( pwrctrlpriv->power_mgnt != PS_MODE_ACTIVE ) ||(padapter->pwrctrlpriv.bHWPwrPindetect)) { padapter->pwrctrlpriv.bips_processing = _FALSE; rtw_set_pwr_state_check_timer(&padapter->pwrctrlpriv); } //netif_carrier_on(pnetdev);//call this func when rtw_joinbss_event_callback return success if(!netif_queue_stopped(pnetdev)) netif_start_queue(pnetdev); else netif_wake_queue(pnetdev); #ifdef CONFIG_BR_EXT #if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 35)) rcu_read_lock(); #endif // (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 35)) //if(check_fwstate(pmlmepriv, WIFI_STATION_STATE|WIFI_ADHOC_STATE) == _TRUE) { //struct net_bridge *br = pnetdev->br_port->br;//->dev->dev_addr; #if (LINUX_VERSION_CODE <= KERNEL_VERSION(2, 6, 35)) if (pnetdev->br_port) #else // (LINUX_VERSION_CODE <= KERNEL_VERSION(2, 6, 35)) if (rcu_dereference(padapter->pnetdev->rx_handler_data)) #endif // (LINUX_VERSION_CODE <= KERNEL_VERSION(2, 6, 35)) { struct net_device *br_netdev; #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24)) br_netdev = dev_get_by_name(CONFIG_BR_EXT_BRNAME); #else // (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24)) struct net *devnet = NULL; #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,26)) devnet = pnetdev->nd_net; #else // (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,26)) devnet = dev_net(pnetdev); #endif // (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,26)) br_netdev = dev_get_by_name(devnet, CONFIG_BR_EXT_BRNAME); #endif // (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24)) if (br_netdev) { memcpy(padapter->br_mac, br_netdev->dev_addr, ETH_ALEN); dev_put(br_netdev); } else printk("%s()-%d: dev_get_by_name(%s) failed!", __FUNCTION__, __LINE__, CONFIG_BR_EXT_BRNAME); } padapter->ethBrExtInfo.addPPPoETag = 1; } #if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 35)) rcu_read_unlock(); #endif // (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 35)) #endif // CONFIG_BR_EXT netdev_open_normal_process: RT_TRACE(_module_os_intfs_c_,_drv_info_,("-871x_drv - dev_open\n")); DBG_8192C("-871x_drv - drv_open, bup=%d\n", padapter->bup); return 0; netdev_open_error: padapter->bup = _FALSE; netif_carrier_off(pnetdev); netif_stop_queue(pnetdev); RT_TRACE(_module_os_intfs_c_,_drv_err_,("-871x_drv - dev_open, fail!\n")); DBG_8192C("-871x_drv - drv_open fail, bup=%d\n", padapter->bup); return (-1); }
/* Callback after sending out a packet */ static void zd1201_usbtx(struct urb *urb) { struct zd1201 *zd = urb->context; netif_wake_queue(zd->dev); }
static void sirdev_config_fsm(struct work_struct *work) { struct sir_dev *dev = container_of(work, struct sir_dev, fsm.work.work); struct sir_fsm *fsm = &dev->fsm; int next_state; int ret = -1; unsigned delay; IRDA_DEBUG(2, "%s(), <%ld>\n", __func__, jiffies); do { IRDA_DEBUG(3, "%s - state=0x%04x / substate=0x%04x\n", __func__, fsm->state, fsm->substate); next_state = fsm->state; delay = 0; switch(fsm->state) { case SIRDEV_STATE_DONGLE_OPEN: if (dev->dongle_drv != NULL) { ret = sirdev_put_dongle(dev); if (ret) { fsm->result = -EINVAL; next_state = SIRDEV_STATE_ERROR; break; } } /* Initialize dongle */ ret = sirdev_get_dongle(dev, fsm->param); if (ret) { fsm->result = ret; next_state = SIRDEV_STATE_ERROR; break; } /* Dongles are powered through the modem control lines which * were just set during open. Before resetting, let's wait for * the power to stabilize. This is what some dongle drivers did * in open before, while others didn't - should be safe anyway. */ delay = 50; fsm->substate = SIRDEV_STATE_DONGLE_RESET; next_state = SIRDEV_STATE_DONGLE_RESET; fsm->param = 9600; break; case SIRDEV_STATE_DONGLE_CLOSE: /* shouldn't we just treat this as success=? */ if (dev->dongle_drv == NULL) { fsm->result = -EINVAL; next_state = SIRDEV_STATE_ERROR; break; } ret = sirdev_put_dongle(dev); if (ret) { fsm->result = ret; next_state = SIRDEV_STATE_ERROR; break; } next_state = SIRDEV_STATE_DONE; break; case SIRDEV_STATE_SET_DTR_RTS: ret = sirdev_set_dtr_rts(dev, (fsm->param&0x02) ? TRUE : FALSE, (fsm->param&0x01) ? TRUE : FALSE); next_state = SIRDEV_STATE_DONE; break; case SIRDEV_STATE_SET_SPEED: fsm->substate = SIRDEV_STATE_WAIT_XMIT; next_state = SIRDEV_STATE_DONGLE_CHECK; break; case SIRDEV_STATE_DONGLE_CHECK: ret = sirdev_tx_complete_fsm(dev); if (ret < 0) { fsm->result = ret; next_state = SIRDEV_STATE_ERROR; break; } if ((delay=ret) != 0) break; if (dev->dongle_drv) { fsm->substate = SIRDEV_STATE_DONGLE_RESET; next_state = SIRDEV_STATE_DONGLE_RESET; } else { dev->speed = fsm->param; next_state = SIRDEV_STATE_PORT_SPEED; } break; case SIRDEV_STATE_DONGLE_RESET: if (dev->dongle_drv->reset) { ret = dev->dongle_drv->reset(dev); if (ret < 0) { fsm->result = ret; next_state = SIRDEV_STATE_ERROR; break; } } else ret = 0; if ((delay=ret) == 0) { /* set serial port according to dongle default speed */ if (dev->drv->set_speed) dev->drv->set_speed(dev, dev->speed); fsm->substate = SIRDEV_STATE_DONGLE_SPEED; next_state = SIRDEV_STATE_DONGLE_SPEED; } break; case SIRDEV_STATE_DONGLE_SPEED: if (dev->dongle_drv->reset) { ret = dev->dongle_drv->set_speed(dev, fsm->param); if (ret < 0) { fsm->result = ret; next_state = SIRDEV_STATE_ERROR; break; } } else ret = 0; if ((delay=ret) == 0) next_state = SIRDEV_STATE_PORT_SPEED; break; case SIRDEV_STATE_PORT_SPEED: /* Finally we are ready to change the serial port speed */ if (dev->drv->set_speed) dev->drv->set_speed(dev, dev->speed); dev->new_speed = 0; next_state = SIRDEV_STATE_DONE; break; case SIRDEV_STATE_DONE: /* Signal network layer so it can send more frames */ netif_wake_queue(dev->netdev); next_state = SIRDEV_STATE_COMPLETE; break; default: IRDA_ERROR("%s - undefined state\n", __func__); fsm->result = -EINVAL; /* fall thru */ case SIRDEV_STATE_ERROR: IRDA_ERROR("%s - error: %d\n", __func__, fsm->result); #if 0 /* don't enable this before we have netdev->tx_timeout to recover */ netif_stop_queue(dev->netdev); #else netif_wake_queue(dev->netdev); #endif /* fall thru */ case SIRDEV_STATE_COMPLETE: /* config change finished, so we are not busy any longer */ sirdev_enable_rx(dev); up(&fsm->sem); return; } fsm->state = next_state; } while(!delay); queue_delayed_work(irda_sir_wq, &fsm->work, msecs_to_jiffies(delay)); }
static void sprdwl_tx_timeout(struct net_device *dev) { dev_info(&dev->dev, "%s\n", __func__); dev->trans_start = jiffies; netif_wake_queue(dev); }
void sirdev_write_complete(struct sir_dev *dev) { unsigned long flags; struct sk_buff *skb; int actual = 0; int err; spin_lock_irqsave(&dev->tx_lock, flags); IRDA_DEBUG(3, "%s() - dev->tx_buff.len = %d\n", __func__, dev->tx_buff.len); if (likely(dev->tx_buff.len > 0)) { /* Write data left in transmit buffer */ actual = dev->drv->do_write(dev, dev->tx_buff.data, dev->tx_buff.len); if (likely(actual>0)) { dev->tx_buff.data += actual; dev->tx_buff.len -= actual; } else if (unlikely(actual<0)) { /* could be dropped later when we have tx_timeout to recover */ IRDA_ERROR("%s: drv->do_write failed (%d)\n", __func__, actual); if ((skb=dev->tx_skb) != NULL) { dev->tx_skb = NULL; dev_kfree_skb_any(skb); dev->netdev->stats.tx_errors++; dev->netdev->stats.tx_dropped++; } dev->tx_buff.len = 0; } if (dev->tx_buff.len > 0) goto done; /* more data to send later */ } if (unlikely(dev->raw_tx != 0)) { /* in raw mode we are just done now after the buffer was sent * completely. Since this was requested by some dongle driver * running under the control of the irda-thread we must take * care here not to re-enable the queue. The queue will be * restarted when the irda-thread has completed the request. */ IRDA_DEBUG(3, "%s(), raw-tx done\n", __func__); dev->raw_tx = 0; goto done; /* no post-frame handling in raw mode */ } /* we have finished now sending this skb. * update statistics and free the skb. * finally we check and trigger a pending speed change, if any. * if not we switch to rx mode and wake the queue for further * packets. * note the scheduled speed request blocks until the lower * client driver and the corresponding hardware has really * finished sending all data (xmit fifo drained f.e.) * before the speed change gets finally done and the queue * re-activated. */ IRDA_DEBUG(5, "%s(), finished with frame!\n", __func__); if ((skb=dev->tx_skb) != NULL) { dev->tx_skb = NULL; dev->netdev->stats.tx_packets++; dev->netdev->stats.tx_bytes += skb->len; dev_kfree_skb_any(skb); } if (unlikely(dev->new_speed > 0)) { IRDA_DEBUG(5, "%s(), Changing speed!\n", __func__); err = sirdev_schedule_speed(dev, dev->new_speed); if (unlikely(err)) { /* should never happen * forget the speed change and hope the stack recovers */ IRDA_ERROR("%s - schedule speed change failed: %d\n", __func__, err); netif_wake_queue(dev->netdev); } /* else: success * speed change in progress now * on completion dev->new_speed gets cleared, * rx-reenabled and the queue restarted */ } else { sirdev_enable_rx(dev); netif_wake_queue(dev->netdev); } done: spin_unlock_irqrestore(&dev->tx_lock, flags); }
static void greth_clean_tx_gbit(struct net_device *dev) { struct greth_private *greth; struct greth_bd *bdp, *bdp_last_frag; struct sk_buff *skb = NULL; u32 stat; int nr_frags, i; u16 tx_last; greth = netdev_priv(dev); tx_last = greth->tx_last; while (tx_last != greth->tx_next) { skb = greth->tx_skbuff[tx_last]; nr_frags = skb_shinfo(skb)->nr_frags; /* We only clean fully completed SKBs */ bdp_last_frag = greth->tx_bd_base + SKIP_TX(tx_last, nr_frags); GRETH_REGSAVE(greth->regs->status, GRETH_INT_TE | GRETH_INT_TX); mb(); stat = greth_read_bd(&bdp_last_frag->stat); if (stat & GRETH_BD_EN) break; greth->tx_skbuff[tx_last] = NULL; greth_update_tx_stats(dev, stat); dev->stats.tx_bytes += skb->len; bdp = greth->tx_bd_base + tx_last; tx_last = NEXT_TX(tx_last); dma_unmap_single(greth->dev, greth_read_bd(&bdp->addr), skb_headlen(skb), DMA_TO_DEVICE); for (i = 0; i < nr_frags; i++) { skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; bdp = greth->tx_bd_base + tx_last; dma_unmap_page(greth->dev, greth_read_bd(&bdp->addr), skb_frag_size(frag), DMA_TO_DEVICE); tx_last = NEXT_TX(tx_last); } dev_kfree_skb(skb); } if (skb) { /* skb is set only if the above while loop was entered */ wmb(); greth->tx_last = tx_last; if (netif_queue_stopped(dev) && (greth_num_free_bds(tx_last, greth->tx_next) > (MAX_SKB_FRAGS+1))) netif_wake_queue(dev); } }
/* * Send an AX.25 frame via an ethernet interface */ static int bpq_xmit(struct sk_buff *skb, struct net_device *dev) { struct sk_buff *newskb; unsigned char *ptr; struct bpqdev *bpq; int size; /* * Just to be *really* sure not to send anything if the interface * is down, the ethernet device may have gone. */ if (!netif_running(dev)) { bpq_check_devices(dev); kfree_skb(skb); return -ENODEV; } skb_pull(skb, 1); size = skb->len; /* * The AX.25 code leaves enough room for the ethernet header, but * sendto() does not. */ if (skb_headroom(skb) < AX25_BPQ_HEADER_LEN) { /* Ough! */ if ((newskb = skb_realloc_headroom(skb, AX25_BPQ_HEADER_LEN)) == NULL) { printk(KERN_WARNING "bpqether: out of memory\n"); kfree_skb(skb); return -ENOMEM; } if (skb->sk != NULL) skb_set_owner_w(newskb, skb->sk); kfree_skb(skb); skb = newskb; } skb->protocol = htons(ETH_P_AX25); ptr = skb_push(skb, 2); *ptr++ = (size + 5) % 256; *ptr++ = (size + 5) / 256; bpq = (struct bpqdev *)dev->priv; if ((dev = bpq_get_ether_dev(dev)) == NULL) { bpq->stats.tx_dropped++; kfree_skb(skb); return -ENODEV; } skb->dev = dev; skb->nh.raw = skb->data; dev->hard_header(skb, dev, ETH_P_BPQ, bpq->dest_addr, NULL, 0); bpq->stats.tx_packets++; bpq->stats.tx_bytes+=skb->len; dev_queue_xmit(skb); netif_wake_queue(dev); return 0; }
static void usbnet_bh (unsigned long param) { struct usbnet *dev = (struct usbnet *) param; struct sk_buff *skb; struct skb_data *entry; while ((skb = skb_dequeue (&dev->done))) { entry = (struct skb_data *) skb->cb; switch (entry->state) { case rx_done: entry->state = rx_cleanup; rx_process (dev, skb); continue; case tx_done: case rx_cleanup: usb_free_urb (entry->urb); dev_kfree_skb (skb); continue; default: netdev_dbg(dev->net, "bogus skb state %d\n", entry->state); } } // waiting for all pending urbs to complete? if (dev->wait) { if ((dev->txq.qlen + dev->rxq.qlen + dev->done.qlen) == 0) { wake_up (dev->wait); } // or are we maybe short a few urbs? } else if (netif_running (dev->net) && netif_device_present (dev->net) && !timer_pending (&dev->delay) && !test_bit (EVENT_RX_HALT, &dev->flags)) { int temp = dev->rxq.qlen; int qlen = RX_QLEN (dev); if (temp < qlen) { struct urb *urb; int i; // don't refill the queue all at once for (i = 0; i < 10 && dev->rxq.qlen < qlen; i++) { urb = usb_alloc_urb (0, GFP_ATOMIC); if (urb != NULL) { if (rx_submit (dev, urb, GFP_ATOMIC) == -ENOLINK) return; } } if (temp != dev->rxq.qlen) netif_dbg(dev, link, dev->net, "rxqlen %d --> %d\n", temp, dev->rxq.qlen); if (dev->rxq.qlen < qlen) tasklet_schedule (&dev->bh); } if (dev->txq.qlen < TX_QLEN (dev)) netif_wake_queue (dev->net); } }
int softing_startstop(struct net_device *dev, int up) { int ret; struct softing *card; struct softing_priv *priv; struct net_device *netdev; int bus_bitmask_start; int j, error_reporting; struct can_frame msg; const struct can_bittiming *bt; priv = netdev_priv(dev); card = priv->card; if (!card->fw.up) return -EIO; ret = mutex_lock_interruptible(&card->fw.lock); if (ret) return ret; bus_bitmask_start = 0; if (dev && up) /* prepare to start this bus as well */ bus_bitmask_start |= (1 << priv->index); /* bring netdevs down */ for (j = 0; j < ARRAY_SIZE(card->net); ++j) { netdev = card->net[j]; if (!netdev) continue; priv = netdev_priv(netdev); if (dev != netdev) netif_stop_queue(netdev); if (netif_running(netdev)) { if (dev != netdev) bus_bitmask_start |= (1 << j); priv->tx.pending = 0; priv->tx.echo_put = 0; priv->tx.echo_get = 0; /* * this bus' may just have called open_candev() * which is rather stupid to call close_candev() * already * but we may come here from busoff recovery too * in which case the echo_skb _needs_ flushing too. * just be sure to call open_candev() again */ close_candev(netdev); } priv->can.state = CAN_STATE_STOPPED; } card->tx.pending = 0; softing_enable_irq(card, 0); ret = softing_reset_chip(card); if (ret) goto failed; if (!bus_bitmask_start) /* no busses to be brought up */ goto card_done; if ((bus_bitmask_start & 1) && (bus_bitmask_start & 2) && (softing_error_reporting(card->net[0]) != softing_error_reporting(card->net[1]))) { dev_alert(&card->pdev->dev, "err_reporting flag differs for busses\n"); goto invalid; } error_reporting = 0; if (bus_bitmask_start & 1) { netdev = card->net[0]; priv = netdev_priv(netdev); error_reporting += softing_error_reporting(netdev); /* init chip 1 */ bt = &priv->can.bittiming; iowrite16(bt->brp, &card->dpram[DPRAM_FCT_PARAM + 2]); iowrite16(bt->sjw, &card->dpram[DPRAM_FCT_PARAM + 4]); iowrite16(bt->phase_seg1 + bt->prop_seg, &card->dpram[DPRAM_FCT_PARAM + 6]); iowrite16(bt->phase_seg2, &card->dpram[DPRAM_FCT_PARAM + 8]); iowrite16((priv->can.ctrlmode & CAN_CTRLMODE_3_SAMPLES) ? 1 : 0, &card->dpram[DPRAM_FCT_PARAM + 10]); ret = softing_fct_cmd(card, 1, "initialize_chip[0]"); if (ret < 0) goto failed; /* set mode */ iowrite16(0, &card->dpram[DPRAM_FCT_PARAM + 2]); iowrite16(0, &card->dpram[DPRAM_FCT_PARAM + 4]); ret = softing_fct_cmd(card, 3, "set_mode[0]"); if (ret < 0) goto failed; /* set filter */ /* 11bit id & mask */ iowrite16(0x0000, &card->dpram[DPRAM_FCT_PARAM + 2]); iowrite16(0x07ff, &card->dpram[DPRAM_FCT_PARAM + 4]); /* 29bit id.lo & mask.lo & id.hi & mask.hi */ iowrite16(0x0000, &card->dpram[DPRAM_FCT_PARAM + 6]); iowrite16(0xffff, &card->dpram[DPRAM_FCT_PARAM + 8]); iowrite16(0x0000, &card->dpram[DPRAM_FCT_PARAM + 10]); iowrite16(0x1fff, &card->dpram[DPRAM_FCT_PARAM + 12]); ret = softing_fct_cmd(card, 7, "set_filter[0]"); if (ret < 0) goto failed; /* set output control */ iowrite16(priv->output, &card->dpram[DPRAM_FCT_PARAM + 2]); ret = softing_fct_cmd(card, 5, "set_output[0]"); if (ret < 0) goto failed; } if (bus_bitmask_start & 2) { netdev = card->net[1]; priv = netdev_priv(netdev); error_reporting += softing_error_reporting(netdev); /* init chip2 */ bt = &priv->can.bittiming; iowrite16(bt->brp, &card->dpram[DPRAM_FCT_PARAM + 2]); iowrite16(bt->sjw, &card->dpram[DPRAM_FCT_PARAM + 4]); iowrite16(bt->phase_seg1 + bt->prop_seg, &card->dpram[DPRAM_FCT_PARAM + 6]); iowrite16(bt->phase_seg2, &card->dpram[DPRAM_FCT_PARAM + 8]); iowrite16((priv->can.ctrlmode & CAN_CTRLMODE_3_SAMPLES) ? 1 : 0, &card->dpram[DPRAM_FCT_PARAM + 10]); ret = softing_fct_cmd(card, 2, "initialize_chip[1]"); if (ret < 0) goto failed; /* set mode2 */ iowrite16(0, &card->dpram[DPRAM_FCT_PARAM + 2]); iowrite16(0, &card->dpram[DPRAM_FCT_PARAM + 4]); ret = softing_fct_cmd(card, 4, "set_mode[1]"); if (ret < 0) goto failed; /* set filter2 */ /* 11bit id & mask */ iowrite16(0x0000, &card->dpram[DPRAM_FCT_PARAM + 2]); iowrite16(0x07ff, &card->dpram[DPRAM_FCT_PARAM + 4]); /* 29bit id.lo & mask.lo & id.hi & mask.hi */ iowrite16(0x0000, &card->dpram[DPRAM_FCT_PARAM + 6]); iowrite16(0xffff, &card->dpram[DPRAM_FCT_PARAM + 8]); iowrite16(0x0000, &card->dpram[DPRAM_FCT_PARAM + 10]); iowrite16(0x1fff, &card->dpram[DPRAM_FCT_PARAM + 12]); ret = softing_fct_cmd(card, 8, "set_filter[1]"); if (ret < 0) goto failed; /* set output control2 */ iowrite16(priv->output, &card->dpram[DPRAM_FCT_PARAM + 2]); ret = softing_fct_cmd(card, 6, "set_output[1]"); if (ret < 0) goto failed; } /* enable_error_frame */ /* * Error reporting is switched off at the moment since * the receiving of them is not yet 100% verified * This should be enabled sooner or later * if (error_reporting) { ret = softing_fct_cmd(card, 51, "enable_error_frame"); if (ret < 0) goto failed; } */ /* initialize interface */ iowrite16(1, &card->dpram[DPRAM_FCT_PARAM + 2]); iowrite16(1, &card->dpram[DPRAM_FCT_PARAM + 4]); iowrite16(1, &card->dpram[DPRAM_FCT_PARAM + 6]); iowrite16(1, &card->dpram[DPRAM_FCT_PARAM + 8]); iowrite16(1, &card->dpram[DPRAM_FCT_PARAM + 10]); iowrite16(1, &card->dpram[DPRAM_FCT_PARAM + 12]); iowrite16(1, &card->dpram[DPRAM_FCT_PARAM + 14]); iowrite16(1, &card->dpram[DPRAM_FCT_PARAM + 16]); iowrite16(1, &card->dpram[DPRAM_FCT_PARAM + 18]); iowrite16(1, &card->dpram[DPRAM_FCT_PARAM + 20]); ret = softing_fct_cmd(card, 17, "initialize_interface"); if (ret < 0) goto failed; /* enable_fifo */ ret = softing_fct_cmd(card, 36, "enable_fifo"); if (ret < 0) goto failed; /* enable fifo tx ack */ ret = softing_fct_cmd(card, 13, "fifo_tx_ack[0]"); if (ret < 0) goto failed; /* enable fifo tx ack2 */ ret = softing_fct_cmd(card, 14, "fifo_tx_ack[1]"); if (ret < 0) goto failed; /* start_chip */ ret = softing_fct_cmd(card, 11, "start_chip"); if (ret < 0) goto failed; iowrite8(0, &card->dpram[DPRAM_INFO_BUSSTATE]); iowrite8(0, &card->dpram[DPRAM_INFO_BUSSTATE2]); if (card->pdat->generation < 2) { iowrite8(0, &card->dpram[DPRAM_V2_IRQ_TOHOST]); /* flush the DPRAM caches */ wmb(); } softing_initialize_timestamp(card); /* * do socketcan notifications/status changes * from here, no errors should occur, or the failed: part * must be reviewed */ memset(&msg, 0, sizeof(msg)); msg.can_id = CAN_ERR_FLAG | CAN_ERR_RESTARTED; msg.can_dlc = CAN_ERR_DLC; for (j = 0; j < ARRAY_SIZE(card->net); ++j) { if (!(bus_bitmask_start & (1 << j))) continue; netdev = card->net[j]; if (!netdev) continue; priv = netdev_priv(netdev); priv->can.state = CAN_STATE_ERROR_ACTIVE; open_candev(netdev); if (dev != netdev) { /* notify other busses on the restart */ softing_netdev_rx(netdev, &msg, ktime_set(0, 0)); ++priv->can.can_stats.restarts; } netif_wake_queue(netdev); } /* enable interrupts */ ret = softing_enable_irq(card, 1); if (ret) goto failed; card_done: mutex_unlock(&card->fw.lock); return 0; invalid: ret = -EINVAL; failed: softing_enable_irq(card, 0); softing_reset_chip(card); mutex_unlock(&card->fw.lock); /* bring all other interfaces down */ for (j = 0; j < ARRAY_SIZE(card->net); ++j) { netdev = card->net[j]; if (!netdev) continue; dev_close(netdev); } return ret; }
static void fs_enet_tx(struct net_device *dev) { struct fs_enet_private *fep = netdev_priv(dev); cbd_t *bdp; struct sk_buff *skb; int dirtyidx, do_wake, do_restart; u16 sc; spin_lock(&fep->lock); bdp = fep->dirty_tx; do_wake = do_restart = 0; while (((sc = CBDR_SC(bdp)) & BD_ENET_TX_READY) == 0) { dirtyidx = bdp - fep->tx_bd_base; if (fep->tx_free == fep->tx_ring) break; skb = fep->tx_skbuff[dirtyidx]; /* * Check for errors. */ if (sc & (BD_ENET_TX_HB | BD_ENET_TX_LC | BD_ENET_TX_RL | BD_ENET_TX_UN | BD_ENET_TX_CSL)) { if (sc & BD_ENET_TX_HB) /* No heartbeat */ fep->stats.tx_heartbeat_errors++; if (sc & BD_ENET_TX_LC) /* Late collision */ fep->stats.tx_window_errors++; if (sc & BD_ENET_TX_RL) /* Retrans limit */ fep->stats.tx_aborted_errors++; if (sc & BD_ENET_TX_UN) /* Underrun */ fep->stats.tx_fifo_errors++; if (sc & BD_ENET_TX_CSL) /* Carrier lost */ fep->stats.tx_carrier_errors++; if (sc & (BD_ENET_TX_LC | BD_ENET_TX_RL | BD_ENET_TX_UN)) { fep->stats.tx_errors++; do_restart = 1; } } else fep->stats.tx_packets++; if (sc & BD_ENET_TX_READY) printk(KERN_WARNING DRV_MODULE_NAME ": %s HEY! Enet xmit interrupt and TX_READY.\n", dev->name); /* * Deferred means some collisions occurred during transmit, * but we eventually sent the packet OK. */ if (sc & BD_ENET_TX_DEF) fep->stats.collisions++; /* unmap */ dma_unmap_single(fep->dev, CBDR_BUFADDR(bdp), skb->len, DMA_TO_DEVICE); /* * Free the sk buffer associated with this last transmit. */ dev_kfree_skb_irq(skb); fep->tx_skbuff[dirtyidx] = NULL; /* * Update pointer to next buffer descriptor to be transmitted. */ if ((sc & BD_ENET_TX_WRAP) == 0) bdp++; else bdp = fep->tx_bd_base; /* * Since we have freed up a buffer, the ring is no longer * full. */ if (!fep->tx_free++) do_wake = 1; } fep->dirty_tx = bdp; if (do_restart) (*fep->ops->tx_restart)(dev); spin_unlock(&fep->lock); if (do_wake) netif_wake_queue(dev); }
static int vector_send(struct vector_queue *qi) { struct vector_private *vp = netdev_priv(qi->dev); struct mmsghdr *send_from; int result = 0, send_len, queue_depth = qi->max_depth; if (spin_trylock(&qi->head_lock)) { if (spin_trylock(&qi->tail_lock)) { /* update queue_depth to current value */ queue_depth = qi->queue_depth; spin_unlock(&qi->tail_lock); while (queue_depth > 0) { /* Calculate the start of the vector */ send_len = queue_depth; send_from = qi->mmsg_vector; send_from += qi->head; /* Adjust vector size if wraparound */ if (send_len + qi->head > qi->max_depth) send_len = qi->max_depth - qi->head; /* Try to TX as many packets as possible */ if (send_len > 0) { result = uml_vector_sendmmsg( vp->fds->tx_fd, send_from, send_len, 0 ); vp->in_write_poll = (result != send_len); } /* For some of the sendmmsg error scenarios * we may end being unsure in the TX success * for all packets. It is safer to declare * them all TX-ed and blame the network. */ if (result < 0) { if (net_ratelimit()) netdev_err(vp->dev, "sendmmsg err=%i\n", result); result = send_len; } if (result > 0) { queue_depth = consume_vector_skbs(qi, result); /* This is equivalent to an TX IRQ. * Restart the upper layers to feed us * more packets. */ if (result > vp->estats.tx_queue_max) vp->estats.tx_queue_max = result; vp->estats.tx_queue_running_average = (vp->estats.tx_queue_running_average + result) >> 1; } netif_trans_update(qi->dev); netif_wake_queue(qi->dev); /* if TX is busy, break out of the send loop, * poll write IRQ will reschedule xmit for us */ if (result != send_len) { vp->estats.tx_restart_queue++; break; } } }
int rtusb_fast_probe(VOID *handle, VOID **ppAd, struct usb_interface *intf) { VOID *pAd = *ppAd; VOID *pCookie = NULL; struct net_device *net_dev; #if (defined(WOW_SUPPORT) && defined(RTMP_MAC_USB)) || defined(NEW_WOW_SUPPORT) UCHAR WOWRun; #endif /* (defined(WOW_SUPPORT) && defined(RTMP_MAC_USB)) || defined(NEW_WOW_SUPPORT) */ #ifdef USB_SUPPORT_SELECTIVE_SUSPEND INT pm_usage_cnt; #endif /* USB_SUPPORT_SELECTIVE_SUSPEND */ struct usb_device *usb_dev = NULL; pCookie = RTMPCheckOsCookie(handle, &pAd); if (pCookie == NULL) return NDIS_STATUS_FAILURE; usb_dev = ((POS_COOKIE)pCookie)->pUsb_Dev; if (USBDevConfigInit(usb_dev, intf, pAd) == FALSE) { RTMPFreeAdapter(pAd); return NDIS_STATUS_FAILURE; } RTMP_DRIVER_USB_INIT(pAd, usb_dev, 0); /* netdevice-related structure set-up */ netdev_sysfs_reinit(&pAd, usb_dev); if (RTMP_DRIVER_IOCTL_SANITY_CHECK(pAd, NULL) != NDIS_STATUS_SUCCESS) { DBGPRINT(RT_DEBUG_ERROR, ("Driver is not init, ignore %s\n", __func__)); return NDIS_STATUS_SUCCESS; } #ifdef USB_SUPPORT_SELECTIVE_SUSPEND #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,32) pm_usage_cnt = atomic_read(&intf->pm_usage_cnt); #else pm_usage_cnt = intf->pm_usage_cnt; #endif if(pm_usage_cnt <= 0) usb_autopm_get_interface(intf); RTMP_DRIVER_ADAPTER_RT28XX_CMD_RADIO_ON(pAd); DBGPRINT(RT_DEBUG_ERROR, ("%s(): <=autosuspend\n", __func__)); return NDIS_STATUS_SUCCESS; #endif /* USB_SUPPORT_SELECTIVE_SUSPEND */ #if (defined(WOW_SUPPORT) && defined(RTMP_MAC_USB)) || defined(NEW_WOW_SUPPORT) RTMP_DRIVER_ADAPTER_RT28XX_WOW_RUNSTATUS(pAd, &WOWRun); if (WOWRun) RTMP_DRIVER_ADAPTER_RT28XX_WOW_DISABLE(pAd); else #endif /* (defined(WOW_SUPPORT) && defined(RTMP_MAC_USB)) || defined(NEW_WOW_SUPPORT) */ { DBGPRINT(RT_DEBUG_ERROR, ("%s :radio_on \n", __func__)); RTMP_DRIVER_ADAPTER_RT28XX_CMD_RADIO_ON(pAd); RTMP_DRIVER_NET_DEV_GET(pAd, &net_dev); netif_device_attach(net_dev); netif_start_queue(net_dev); netif_carrier_on(net_dev); netif_wake_queue(net_dev); } RTMP_DRIVER_USB_RESUME(pAd); DBGPRINT(RT_DEBUG_TRACE, ("<=%s()\n", __func__)); return NDIS_STATUS_SUCCESS; }
static int elmc_send_packet(struct sk_buff *skb, struct net_device *dev) { int len; int i; #ifndef NO_NOPCOMMANDS int next_nop; #endif struct priv *p = (struct priv *) dev->priv; netif_stop_queue(dev); len = (ETH_ZLEN < skb->len) ? skb->len : ETH_ZLEN; if (len != skb->len) memset((char *) p->xmit_cbuffs[p->xmit_count], 0, ETH_ZLEN); memcpy((char *) p->xmit_cbuffs[p->xmit_count], (char *) (skb->data), skb->len); #if (NUM_XMIT_BUFFS == 1) #ifdef NO_NOPCOMMANDS p->xmit_buffs[0]->size = TBD_LAST | len; for (i = 0; i < 16; i++) { p->scb->cbl_offset = make16(p->xmit_cmds[0]); p->scb->cmd = CUC_START; p->xmit_cmds[0]->cmd_status = 0; elmc_attn586(); dev->trans_start = jiffies; if (!i) { dev_kfree_skb(skb); } WAIT_4_SCB_CMD(); if ((p->scb->status & CU_ACTIVE)) { /* test it, because CU sometimes doesn't start immediately */ break; } if (p->xmit_cmds[0]->cmd_status) { break; } if (i == 15) { printk(KERN_WARNING "%s: Can't start transmit-command.\n", dev->name); } } #else next_nop = (p->nop_point + 1) & 0x1; p->xmit_buffs[0]->size = TBD_LAST | len; p->xmit_cmds[0]->cmd_link = p->nop_cmds[next_nop]->cmd_link = make16((p->nop_cmds[next_nop])); p->xmit_cmds[0]->cmd_status = p->nop_cmds[next_nop]->cmd_status = 0; p->nop_cmds[p->nop_point]->cmd_link = make16((p->xmit_cmds[0])); dev->trans_start = jiffies; p->nop_point = next_nop; dev_kfree_skb(skb); #endif #else p->xmit_buffs[p->xmit_count]->size = TBD_LAST | len; if ((next_nop = p->xmit_count + 1) == NUM_XMIT_BUFFS) { next_nop = 0; } p->xmit_cmds[p->xmit_count]->cmd_status = 0; p->xmit_cmds[p->xmit_count]->cmd_link = p->nop_cmds[next_nop]->cmd_link = make16((p->nop_cmds[next_nop])); p->nop_cmds[next_nop]->cmd_status = 0; p->nop_cmds[p->xmit_count]->cmd_link = make16((p->xmit_cmds[p->xmit_count])); dev->trans_start = jiffies; p->xmit_count = next_nop; if (p->xmit_count != p->xmit_last) netif_wake_queue(dev); dev_kfree_skb(skb); #endif return 0; }
static void gs_usb_receive_bulk_callback(struct urb *urb) { struct gs_usb *usbcan = urb->context; struct gs_can *dev; struct net_device *netdev; int rc; struct net_device_stats *stats; struct gs_host_frame *hf = urb->transfer_buffer; struct gs_tx_context *txc; struct can_frame *cf; struct sk_buff *skb; BUG_ON(!usbcan); switch (urb->status) { case 0: /* success */ break; case -ENOENT: case -ESHUTDOWN: return; default: /* do not resubmit aborted urbs. eg: when device goes down */ return; } /* device reports out of range channel id */ if (hf->channel >= GS_MAX_INTF) goto resubmit_urb; dev = usbcan->canch[hf->channel]; netdev = dev->netdev; stats = &netdev->stats; if (!netif_device_present(netdev)) return; if (hf->echo_id == -1) { /* normal rx */ skb = alloc_can_skb(dev->netdev, &cf); if (!skb) return; cf->can_id = hf->can_id; cf->can_dlc = get_can_dlc(hf->can_dlc); memcpy(cf->data, hf->data, 8); /* ERROR frames tell us information about the controller */ if (hf->can_id & CAN_ERR_FLAG) gs_update_state(dev, cf); netdev->stats.rx_packets++; netdev->stats.rx_bytes += hf->can_dlc; netif_rx(skb); } else { /* echo_id == hf->echo_id */ if (hf->echo_id >= GS_MAX_TX_URBS) { netdev_err(netdev, "Unexpected out of range echo id %d\n", hf->echo_id); goto resubmit_urb; } netdev->stats.tx_packets++; netdev->stats.tx_bytes += hf->can_dlc; txc = gs_get_tx_context(dev, hf->echo_id); /* bad devices send bad echo_ids. */ if (!txc) { netdev_err(netdev, "Unexpected unused echo id %d\n", hf->echo_id); goto resubmit_urb; } can_get_echo_skb(netdev, hf->echo_id); gs_free_tx_context(txc); netif_wake_queue(netdev); } if (hf->flags & GS_CAN_FLAG_OVERFLOW) { skb = alloc_can_err_skb(netdev, &cf); if (!skb) goto resubmit_urb; cf->can_id |= CAN_ERR_CRTL; cf->can_dlc = CAN_ERR_DLC; cf->data[1] = CAN_ERR_CRTL_RX_OVERFLOW; stats->rx_over_errors++; stats->rx_errors++; netif_rx(skb); } resubmit_urb: usb_fill_bulk_urb(urb, usbcan->udev, usb_rcvbulkpipe(usbcan->udev, GSUSB_ENDPOINT_IN), hf, sizeof(struct gs_host_frame), gs_usb_receive_bulk_callback, usbcan ); rc = usb_submit_urb(urb, GFP_ATOMIC); /* USB failure take down all interfaces */ if (rc == -ENODEV) { for (rc = 0; rc < GS_MAX_INTF; rc++) { if (usbcan->canch[rc]) netif_device_detach(usbcan->canch[rc]->netdev); } } }
/* * HP-SIR format interrupt service routines. */ static void sa1100_irda_hpsir_irq(struct net_device *dev) { struct sa1100_irda *si = netdev_priv(dev); int status; status = Ser2UTSR0; /* * Deal with any receive errors first. The bytes in error may be * the only bytes in the receive FIFO, so we do this first. */ while (status & UTSR0_EIF) { int stat, data; stat = Ser2UTSR1; data = Ser2UTDR; if (stat & (UTSR1_FRE | UTSR1_ROR)) { dev->stats.rx_errors++; if (stat & UTSR1_FRE) dev->stats.rx_frame_errors++; if (stat & UTSR1_ROR) dev->stats.rx_fifo_errors++; } else async_unwrap_char(dev, &dev->stats, &si->rx_buff, data); status = Ser2UTSR0; } /* * We must clear certain bits. */ Ser2UTSR0 = status & (UTSR0_RID | UTSR0_RBB | UTSR0_REB); if (status & UTSR0_RFS) { /* * There are at least 4 bytes in the FIFO. Read 3 bytes * and leave the rest to the block below. */ async_unwrap_char(dev, &dev->stats, &si->rx_buff, Ser2UTDR); async_unwrap_char(dev, &dev->stats, &si->rx_buff, Ser2UTDR); async_unwrap_char(dev, &dev->stats, &si->rx_buff, Ser2UTDR); } if (status & (UTSR0_RFS | UTSR0_RID)) { /* * Fifo contains more than 1 character. */ do { async_unwrap_char(dev, &dev->stats, &si->rx_buff, Ser2UTDR); } while (Ser2UTSR1 & UTSR1_RNE); } if (status & UTSR0_TFS && si->tx_buff.len) { /* * Transmitter FIFO is not full */ do { Ser2UTDR = *si->tx_buff.data++; si->tx_buff.len -= 1; } while (Ser2UTSR1 & UTSR1_TNF && si->tx_buff.len); if (si->tx_buff.len == 0) { dev->stats.tx_packets++; dev->stats.tx_bytes += si->tx_buff.data - si->tx_buff.head; /* * We need to ensure that the transmitter has * finished. */ do rmb(); while (Ser2UTSR1 & UTSR1_TBY); /* * Ok, we've finished transmitting. Now enable * the receiver. Sometimes we get a receive IRQ * immediately after a transmit... */ Ser2UTSR0 = UTSR0_REB | UTSR0_RBB | UTSR0_RID; Ser2UTCR3 = UTCR3_RIE | UTCR3_RXE | UTCR3_TXE; if (si->newspeed) { sa1100_irda_set_speed(si, si->newspeed); si->newspeed = 0; } /* I'm hungry! */ netif_wake_queue(dev); } } }
static void fec_enet_tx(struct net_device *ndev) { struct fec_enet_private *fep; struct bufdesc *bdp; unsigned short status; struct sk_buff *skb; fep = netdev_priv(ndev); spin_lock(&fep->hw_lock); bdp = fep->dirty_tx; while (((status = bdp->cbd_sc) & BD_ENET_TX_READY) == 0) { if (bdp == fep->cur_tx && fep->tx_full == 0) break; dma_unmap_single(&fep->pdev->dev, bdp->cbd_bufaddr, FEC_ENET_TX_FRSIZE, DMA_TO_DEVICE); bdp->cbd_bufaddr = 0; skb = fep->tx_skbuff[fep->skb_dirty]; /* Check for errors. */ if (status & (BD_ENET_TX_HB | BD_ENET_TX_LC | BD_ENET_TX_RL | BD_ENET_TX_UN | BD_ENET_TX_CSL)) { ndev->stats.tx_errors++; if (status & BD_ENET_TX_HB) /* No heartbeat */ ndev->stats.tx_heartbeat_errors++; if (status & BD_ENET_TX_LC) /* Late collision */ ndev->stats.tx_window_errors++; if (status & BD_ENET_TX_RL) /* Retrans limit */ ndev->stats.tx_aborted_errors++; if (status & BD_ENET_TX_UN) /* Underrun */ ndev->stats.tx_fifo_errors++; if (status & BD_ENET_TX_CSL) /* Carrier lost */ ndev->stats.tx_carrier_errors++; } else { ndev->stats.tx_packets++; } #ifdef CONFIG_FEC_PTP if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS)) { struct skb_shared_hwtstamps shhwtstamps; unsigned long flags; memset(&shhwtstamps, 0, sizeof(shhwtstamps)); spin_lock_irqsave(&fep->tmreg_lock, flags); shhwtstamps.hwtstamp = ns_to_ktime( timecounter_cyc2time(&fep->tc, bdp->ts)); spin_unlock_irqrestore(&fep->tmreg_lock, flags); skb_tstamp_tx(skb, &shhwtstamps); } #endif if (status & BD_ENET_TX_READY) printk("HEY! Enet xmit interrupt and TX_READY.\n"); /* Deferred means some collisions occurred during transmit, * but we eventually sent the packet OK. */ if (status & BD_ENET_TX_DEF) ndev->stats.collisions++; /* Free the sk buffer associated with this last transmit */ dev_kfree_skb_any(skb); fep->tx_skbuff[fep->skb_dirty] = NULL; fep->skb_dirty = (fep->skb_dirty + 1) & TX_RING_MOD_MASK; /* Update pointer to next buffer descriptor to be transmitted */ if (status & BD_ENET_TX_WRAP) bdp = fep->tx_bd_base; else bdp++; /* Since we have freed up a buffer, the ring is no longer full */ if (fep->tx_full) { fep->tx_full = 0; if (netif_queue_stopped(ndev)) netif_wake_queue(ndev); } } fep->dirty_tx = bdp; spin_unlock(&fep->hw_lock); }
void vRunCommand(struct work_struct *work) { struct vnt_private *pDevice = container_of(work, struct vnt_private, run_command_work.work); struct vnt_manager *pMgmt = &pDevice->vnt_mgmt; PWLAN_IE_SSID pItemSSID; PWLAN_IE_SSID pItemSSIDCurr; CMD_STATUS Status; struct sk_buff *skb; union iwreq_data wrqu; int ii; u8 byMask[8] = {1, 2, 4, 8, 0x10, 0x20, 0x40, 0x80}; u8 byData; if (pDevice->Flags & fMP_DISCONNECTED) return; if (pDevice->bCmdRunning != true) return; spin_lock_irq(&pDevice->lock); switch (pDevice->eCommandState) { case WLAN_CMD_SCAN_START: pDevice->byReAssocCount = 0; if (pDevice->bRadioOff == true) { s_bCommandComplete(pDevice); spin_unlock_irq(&pDevice->lock); return; } if (pMgmt->eCurrMode == WMAC_MODE_ESS_AP) { s_bCommandComplete(pDevice); spin_unlock_irq(&pDevice->lock); return; } pItemSSID = (PWLAN_IE_SSID)pMgmt->abyScanSSID; if (pMgmt->uScanChannel == 0) pMgmt->uScanChannel = pDevice->byMinChannel; if (pMgmt->uScanChannel > pDevice->byMaxChannel) { pDevice->eCommandState = WLAN_CMD_SCAN_END; s_bCommandComplete(pDevice); spin_unlock_irq(&pDevice->lock); return; } else { if (!ChannelValid(pDevice->byZoneType, pMgmt->uScanChannel)) { DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Invalid channel pMgmt->uScanChannel = %d\n", pMgmt->uScanChannel); pMgmt->uScanChannel++; s_bCommandComplete(pDevice); spin_unlock_irq(&pDevice->lock); return; } if (pMgmt->uScanChannel == pDevice->byMinChannel) { // pMgmt->eScanType = WMAC_SCAN_ACTIVE; //mike mark pMgmt->abyScanBSSID[0] = 0xFF; pMgmt->abyScanBSSID[1] = 0xFF; pMgmt->abyScanBSSID[2] = 0xFF; pMgmt->abyScanBSSID[3] = 0xFF; pMgmt->abyScanBSSID[4] = 0xFF; pMgmt->abyScanBSSID[5] = 0xFF; pItemSSID->byElementID = WLAN_EID_SSID; // clear bssid list /* BSSvClearBSSList((void *) pDevice, pDevice->bLinkPass); */ pMgmt->eScanState = WMAC_IS_SCANNING; pDevice->byScanBBType = pDevice->byBBType; //lucas pDevice->bStopDataPkt = true; // Turn off RCR_BSSID filter every time MACvRegBitsOff(pDevice, MAC_REG_RCR, RCR_BSSID); pDevice->byRxMode &= ~RCR_BSSID; } //lucas vAdHocBeaconStop(pDevice); if ((pDevice->byBBType != BB_TYPE_11A) && (pMgmt->uScanChannel > CB_MAX_CHANNEL_24G)) { pDevice->byBBType = BB_TYPE_11A; CARDvSetBSSMode(pDevice); } else if ((pDevice->byBBType == BB_TYPE_11A) && (pMgmt->uScanChannel <= CB_MAX_CHANNEL_24G)) { pDevice->byBBType = BB_TYPE_11G; CARDvSetBSSMode(pDevice); } DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Scanning.... channel: [%d]\n", pMgmt->uScanChannel); // Set channel CARDbSetMediaChannel(pDevice, pMgmt->uScanChannel); // Set Baseband to be more sensitive. if (pDevice->bUpdateBBVGA) { BBvSetShortSlotTime(pDevice); BBvSetVGAGainOffset(pDevice, pDevice->abyBBVGA[0]); BBvUpdatePreEDThreshold(pDevice, true); } pMgmt->uScanChannel++; while (!ChannelValid(pDevice->byZoneType, pMgmt->uScanChannel) && pMgmt->uScanChannel <= pDevice->byMaxChannel){ pMgmt->uScanChannel++; } if (pMgmt->uScanChannel > pDevice->byMaxChannel) { // Set Baseband to be not sensitive and rescan pDevice->eCommandState = WLAN_CMD_SCAN_END; } if ((pMgmt->b11hEnable == false) || (pMgmt->uScanChannel < CB_MAX_CHANNEL_24G)) { s_vProbeChannel(pDevice); spin_unlock_irq(&pDevice->lock); vCommandTimerWait((void *) pDevice, 100); return; } else { spin_unlock_irq(&pDevice->lock); vCommandTimerWait((void *) pDevice, WCMD_PASSIVE_SCAN_TIME); return; } } break; case WLAN_CMD_SCAN_END: // Set Baseband's sensitivity back. if (pDevice->byBBType != pDevice->byScanBBType) { pDevice->byBBType = pDevice->byScanBBType; CARDvSetBSSMode(pDevice); } if (pDevice->bUpdateBBVGA) { BBvSetShortSlotTime(pDevice); BBvSetVGAGainOffset(pDevice, pDevice->byBBVGACurrent); BBvUpdatePreEDThreshold(pDevice, false); } // Set channel back vAdHocBeaconRestart(pDevice); // Set channel back CARDbSetMediaChannel(pDevice, pMgmt->uCurrChannel); // Set Filter if (pMgmt->bCurrBSSIDFilterOn) { MACvRegBitsOn(pDevice, MAC_REG_RCR, RCR_BSSID); pDevice->byRxMode |= RCR_BSSID; } DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Scanning, set back to channel: [%d]\n", pMgmt->uCurrChannel); pMgmt->uScanChannel = 0; pMgmt->eScanState = WMAC_NO_SCANNING; pDevice->bStopDataPkt = false; /*send scan event to wpa_Supplicant*/ PRINT_K("wireless_send_event--->SIOCGIWSCAN(scan done)\n"); memset(&wrqu, 0, sizeof(wrqu)); wireless_send_event(pDevice->dev, SIOCGIWSCAN, &wrqu, NULL); s_bCommandComplete(pDevice); break; case WLAN_CMD_DISASSOCIATE_START: pDevice->byReAssocCount = 0; if ((pMgmt->eCurrMode == WMAC_MODE_ESS_STA) && (pMgmt->eCurrState != WMAC_STATE_ASSOC)) { s_bCommandComplete(pDevice); spin_unlock_irq(&pDevice->lock); return; } else { pDevice->bwextstep0 = false; pDevice->bwextstep1 = false; pDevice->bwextstep2 = false; pDevice->bwextstep3 = false; pDevice->bWPASuppWextEnabled = false; pDevice->fWPA_Authened = false; DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"Send Disassociation Packet..\n"); // reason = 8 : disassoc because sta has left vMgrDisassocBeginSta((void *) pDevice, pMgmt, pMgmt->abyCurrBSSID, (8), &Status); pDevice->bLinkPass = false; ControlvMaskByte(pDevice, MESSAGE_REQUEST_MACREG, MAC_REG_PAPEDELAY, LEDSTS_STS, LEDSTS_SLOW); // unlock command busy pItemSSID = (PWLAN_IE_SSID)pMgmt->abyCurrSSID; pItemSSID->len = 0; memset(pItemSSID->abySSID, 0, WLAN_SSID_MAXLEN); pMgmt->eCurrState = WMAC_STATE_IDLE; pMgmt->sNodeDBTable[0].bActive = false; // pDevice->bBeaconBufReady = false; } netif_stop_queue(pDevice->dev); if (pDevice->bNeedRadioOFF == true) CARDbRadioPowerOff(pDevice); s_bCommandComplete(pDevice); break; case WLAN_CMD_SSID_START: pDevice->byReAssocCount = 0; if (pDevice->bRadioOff == true) { s_bCommandComplete(pDevice); spin_unlock_irq(&pDevice->lock); return; } memcpy(pMgmt->abyAdHocSSID, pMgmt->abyDesireSSID, ((PWLAN_IE_SSID)pMgmt->abyDesireSSID)->len + WLAN_IEHDR_LEN); pItemSSID = (PWLAN_IE_SSID)pMgmt->abyDesireSSID; pItemSSIDCurr = (PWLAN_IE_SSID)pMgmt->abyCurrSSID; DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO" cmd: desire ssid = %s\n", pItemSSID->abySSID); DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO" cmd: curr ssid = %s\n", pItemSSIDCurr->abySSID); if (pMgmt->eCurrState == WMAC_STATE_ASSOC) { DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO" Cmd pMgmt->eCurrState == WMAC_STATE_ASSOC\n"); DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO" pItemSSID->len =%d\n", pItemSSID->len); DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO" pItemSSIDCurr->len = %d\n", pItemSSIDCurr->len); DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO" desire ssid = %s\n", pItemSSID->abySSID); DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO" curr ssid = %s\n", pItemSSIDCurr->abySSID); } if ((pMgmt->eCurrState == WMAC_STATE_ASSOC) || ((pMgmt->eCurrMode == WMAC_MODE_IBSS_STA) && (pMgmt->eCurrState == WMAC_STATE_JOINTED))) { if (pItemSSID->len == pItemSSIDCurr->len) { if (memcmp(pItemSSID->abySSID, pItemSSIDCurr->abySSID, pItemSSID->len) == 0) { s_bCommandComplete(pDevice); spin_unlock_irq(&pDevice->lock); return; } } netif_stop_queue(pDevice->dev); pDevice->bLinkPass = false; ControlvMaskByte(pDevice, MESSAGE_REQUEST_MACREG, MAC_REG_PAPEDELAY, LEDSTS_STS, LEDSTS_SLOW); } // set initial state pMgmt->eCurrState = WMAC_STATE_IDLE; pMgmt->eCurrMode = WMAC_MODE_STANDBY; PSvDisablePowerSaving((void *) pDevice); BSSvClearNodeDBTable(pDevice, 0); vMgrJoinBSSBegin((void *) pDevice, &Status); // if Infra mode if ((pMgmt->eCurrMode == WMAC_MODE_ESS_STA) && (pMgmt->eCurrState == WMAC_STATE_JOINTED)) { // Call mgr to begin the deauthentication // reason = (3) because sta has left ESS if (pMgmt->eCurrState >= WMAC_STATE_AUTH) { vMgrDeAuthenBeginSta((void *)pDevice, pMgmt, pMgmt->abyCurrBSSID, (3), &Status); } // Call mgr to begin the authentication vMgrAuthenBeginSta((void *) pDevice, pMgmt, &Status); if (Status == CMD_STATUS_SUCCESS) { pDevice->byLinkWaitCount = 0; pDevice->eCommandState = WLAN_AUTHENTICATE_WAIT; vCommandTimerWait((void *) pDevice, AUTHENTICATE_TIMEOUT); spin_unlock_irq(&pDevice->lock); DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO" Set eCommandState = WLAN_AUTHENTICATE_WAIT\n"); return; } } // if Adhoc mode else if (pMgmt->eCurrMode == WMAC_MODE_IBSS_STA) { if (pMgmt->eCurrState == WMAC_STATE_JOINTED) { if (netif_queue_stopped(pDevice->dev)) netif_wake_queue(pDevice->dev); pDevice->bLinkPass = true; ControlvMaskByte(pDevice, MESSAGE_REQUEST_MACREG, MAC_REG_PAPEDELAY, LEDSTS_STS, LEDSTS_INTER); pMgmt->sNodeDBTable[0].bActive = true; pMgmt->sNodeDBTable[0].uInActiveCount = 0; } else { // start own IBSS DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "CreateOwn IBSS by CurrMode = IBSS_STA\n"); vMgrCreateOwnIBSS((void *) pDevice, &Status); if (Status != CMD_STATUS_SUCCESS) { DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "WLAN_CMD_IBSS_CREATE fail!\n"); } BSSvAddMulticastNode(pDevice); } s_bClearBSSID_SCAN(pDevice); } // if SSID not found else if (pMgmt->eCurrMode == WMAC_MODE_STANDBY) { if (pMgmt->eConfigMode == WMAC_CONFIG_IBSS_STA || pMgmt->eConfigMode == WMAC_CONFIG_AUTO) { // start own IBSS DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "CreateOwn IBSS by CurrMode = STANDBY\n"); vMgrCreateOwnIBSS((void *) pDevice, &Status); if (Status != CMD_STATUS_SUCCESS) { DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "WLAN_CMD_IBSS_CREATE fail!\n"); } BSSvAddMulticastNode(pDevice); s_bClearBSSID_SCAN(pDevice); /* pDevice->bLinkPass = true; ControlvMaskByte(pDevice,MESSAGE_REQUEST_MACREG,MAC_REG_PAPEDELAY,LEDSTS_STS,LEDSTS_INTER); if (netif_queue_stopped(pDevice->dev)){ netif_wake_queue(pDevice->dev); } s_bClearBSSID_SCAN(pDevice); */ } else { DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Disconnect SSID none\n"); // if(pDevice->bWPASuppWextEnabled == true) { union iwreq_data wrqu; memset(&wrqu, 0, sizeof(wrqu)); wrqu.ap_addr.sa_family = ARPHRD_ETHER; PRINT_K("wireless_send_event--->SIOCGIWAP(disassociated:vMgrJoinBSSBegin Fail !!)\n"); wireless_send_event(pDevice->dev, SIOCGIWAP, &wrqu, NULL); } } } s_bCommandComplete(pDevice); break; case WLAN_AUTHENTICATE_WAIT: DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"eCommandState == WLAN_AUTHENTICATE_WAIT\n"); if (pMgmt->eCurrState == WMAC_STATE_AUTH) { pDevice->byLinkWaitCount = 0; // Call mgr to begin the association DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"eCurrState == WMAC_STATE_AUTH\n"); vMgrAssocBeginSta((void *) pDevice, pMgmt, &Status); if (Status == CMD_STATUS_SUCCESS) { DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"eCommandState = WLAN_ASSOCIATE_WAIT\n"); pDevice->byLinkWaitCount = 0; pDevice->eCommandState = WLAN_ASSOCIATE_WAIT; vCommandTimerWait((void *) pDevice, ASSOCIATE_TIMEOUT); spin_unlock_irq(&pDevice->lock); return; } } else if (pMgmt->eCurrState < WMAC_STATE_AUTHPENDING) { printk("WLAN_AUTHENTICATE_WAIT:Authen Fail???\n"); } else if (pDevice->byLinkWaitCount <= 4) { //mike add:wait another 2 sec if authenticated_frame delay! pDevice->byLinkWaitCount++; printk("WLAN_AUTHENTICATE_WAIT:wait %d times!!\n", pDevice->byLinkWaitCount); spin_unlock_irq(&pDevice->lock); vCommandTimerWait((void *) pDevice, AUTHENTICATE_TIMEOUT/2); return; } pDevice->byLinkWaitCount = 0; s_bCommandComplete(pDevice); break; case WLAN_ASSOCIATE_WAIT: if (pMgmt->eCurrState == WMAC_STATE_ASSOC) { DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"eCurrState == WMAC_STATE_ASSOC\n"); if (pDevice->ePSMode != WMAC_POWER_CAM) { PSvEnablePowerSaving((void *) pDevice, pMgmt->wListenInterval); } /* if (pMgmt->eAuthenMode >= WMAC_AUTH_WPA) { KeybRemoveAllKey(pDevice, &(pDevice->sKey), pDevice->abyBSSID); } */ pDevice->byLinkWaitCount = 0; pDevice->byReAssocCount = 0; pDevice->bLinkPass = true; ControlvMaskByte(pDevice, MESSAGE_REQUEST_MACREG, MAC_REG_PAPEDELAY, LEDSTS_STS, LEDSTS_INTER); s_bClearBSSID_SCAN(pDevice); if (netif_queue_stopped(pDevice->dev)) netif_wake_queue(pDevice->dev); } else if (pMgmt->eCurrState < WMAC_STATE_ASSOCPENDING) { printk("WLAN_ASSOCIATE_WAIT:Association Fail???\n"); } else if (pDevice->byLinkWaitCount <= 4) { //mike add:wait another 2 sec if associated_frame delay! pDevice->byLinkWaitCount++; printk("WLAN_ASSOCIATE_WAIT:wait %d times!!\n", pDevice->byLinkWaitCount); spin_unlock_irq(&pDevice->lock); vCommandTimerWait((void *) pDevice, ASSOCIATE_TIMEOUT/2); return; } s_bCommandComplete(pDevice); break; case WLAN_CMD_AP_MODE_START: DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"eCommandState == WLAN_CMD_AP_MODE_START\n"); if (pMgmt->eConfigMode == WMAC_CONFIG_AP) { cancel_delayed_work_sync(&pDevice->second_callback_work); pMgmt->eCurrState = WMAC_STATE_IDLE; pMgmt->eCurrMode = WMAC_MODE_STANDBY; pDevice->bLinkPass = false; ControlvMaskByte(pDevice, MESSAGE_REQUEST_MACREG, MAC_REG_PAPEDELAY, LEDSTS_STS, LEDSTS_SLOW); if (pDevice->bEnableHostWEP == true) BSSvClearNodeDBTable(pDevice, 1); else BSSvClearNodeDBTable(pDevice, 0); pDevice->uAssocCount = 0; pMgmt->eCurrState = WMAC_STATE_IDLE; pDevice->bFixRate = false; vMgrCreateOwnIBSS((void *) pDevice, &Status); if (Status != CMD_STATUS_SUCCESS) { DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "vMgrCreateOwnIBSS fail!\n"); } // always turn off unicast bit MACvRegBitsOff(pDevice, MAC_REG_RCR, RCR_UNICAST); pDevice->byRxMode &= ~RCR_UNICAST; DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "wcmd: rx_mode = %x\n", pDevice->byRxMode); BSSvAddMulticastNode(pDevice); if (netif_queue_stopped(pDevice->dev)) netif_wake_queue(pDevice->dev); pDevice->bLinkPass = true; ControlvMaskByte(pDevice, MESSAGE_REQUEST_MACREG, MAC_REG_PAPEDELAY, LEDSTS_STS, LEDSTS_INTER); schedule_delayed_work(&pDevice->second_callback_work, HZ); } s_bCommandComplete(pDevice); break; case WLAN_CMD_TX_PSPACKET_START: // DTIM Multicast tx if (pMgmt->sNodeDBTable[0].bRxPSPoll) { while ((skb = skb_dequeue(&pMgmt->sNodeDBTable[0].sTxPSQueue)) != NULL) { if (skb_queue_empty(&pMgmt->sNodeDBTable[0].sTxPSQueue)) { pMgmt->abyPSTxMap[0] &= ~byMask[0]; pDevice->bMoreData = false; } else { pDevice->bMoreData = true; } if (nsDMA_tx_packet(pDevice, TYPE_AC0DMA, skb) != 0) DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Multicast ps tx fail\n"); pMgmt->sNodeDBTable[0].wEnQueueCnt--; } } // PS nodes tx for (ii = 1; ii < (MAX_NODE_NUM + 1); ii++) { if (pMgmt->sNodeDBTable[ii].bActive && pMgmt->sNodeDBTable[ii].bRxPSPoll) { DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Index=%d Enqueu Cnt= %d\n", ii, pMgmt->sNodeDBTable[ii].wEnQueueCnt); while ((skb = skb_dequeue(&pMgmt->sNodeDBTable[ii].sTxPSQueue)) != NULL) { if (skb_queue_empty(&pMgmt->sNodeDBTable[ii].sTxPSQueue)) { // clear tx map pMgmt->abyPSTxMap[pMgmt->sNodeDBTable[ii].wAID >> 3] &= ~byMask[pMgmt->sNodeDBTable[ii].wAID & 7]; pDevice->bMoreData = false; } else { pDevice->bMoreData = true; } if (nsDMA_tx_packet(pDevice, TYPE_AC0DMA, skb) != 0) DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "sta ps tx fail\n"); pMgmt->sNodeDBTable[ii].wEnQueueCnt--; // check if sta ps enable, wait next pspoll // if sta ps disable, send all pending buffers. if (pMgmt->sNodeDBTable[ii].bPSEnable) break; } if (skb_queue_empty(&pMgmt->sNodeDBTable[ii].sTxPSQueue)) { // clear tx map pMgmt->abyPSTxMap[pMgmt->sNodeDBTable[ii].wAID >> 3] &= ~byMask[pMgmt->sNodeDBTable[ii].wAID & 7]; DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "Index=%d PS queue clear\n", ii); }