Esempio n. 1
0
static int e1000_txavail(struct net_driver_s *dev)
{
    struct e1000_dev *e1000 = (struct e1000_dev *)dev->d_private;
    int tail = e1000->tx_ring.tail;
    irqstate_t flags;

    /* Disable interrupts because this function may be called from interrupt
     * level processing.
     */

    flags = irqsave();

    /* Ignore the notification if the interface is not yet up */

    if (e1000->bifup)
    {
        /* Check if there is room in the hardware to hold another outgoing packet. */

        if (e1000->tx_ring.desc[tail].desc_status)
        {
            (void)devif_poll(&e1000->netdev, e1000_txpoll);
        }
    }

    irqrestore(flags);
    return OK;
}
Esempio n. 2
0
static void bcmf_txavail_work(FAR void *arg)
{
  FAR struct bcmf_dev_s *priv = (FAR struct bcmf_dev_s *)arg;

  // wlinfo("Entry\n");

  /* Lock the network and serialize driver operations if necessary.
   * NOTE: Serialization is only required in the case where the driver work
   * is performed on an LP worker thread and where more than one LP worker
   * thread has been configured.
   */

  net_lock();

  /* Ignore the notification if the interface is not yet up */

  if (priv->bc_bifup)
    {
      /* Check if there is room in the hardware to hold another outgoing packet. */

      if (bcmf_netdev_alloc_tx_frame(priv))
        {
          goto exit_unlock;
        }

      /* If so, then poll the network for new XMIT data */

      priv->bc_dev.d_buf = priv->cur_tx_frame->data;
      priv->bc_dev.d_len = 0;
      (void)devif_poll(&priv->bc_dev, bcmf_txpoll);
    }

exit_unlock:
  net_unlock();
}
Esempio n. 3
0
static void skel_txdone(FAR struct skel_driver_s *priv)
{
  /* Check for errors and update statistics */

  NETDEV_TXDONE(priv->sk_dev);

  /* Check if there are pending transmissions */

  /* If no further transmissions are pending, then cancel the TX timeout and
   * disable further Tx interrupts.
   */

  wd_cancel(priv->sk_txtimeout);

  /* Then make sure that the TX poll timer is running (if it is already
   * running, the following would restart it).  This is necessary to
   * avoid certain race conditions where the polling sequence can be
   * interrupted.
   */

  (void)wd_start(priv->sk_txpoll, skeleton_WDDELAY, skel_poll_expiry, 1,
                 (wdparm_t)priv);

  /* And disable further TX interrupts. */

  /* In any event, poll the network for new TX data */

  (void)devif_poll(&priv->sk_dev, skel_txpoll);
}
Esempio n. 4
0
static int vnet_txavail(struct net_driver_s *dev)
{
  FAR struct vnet_driver_s *vnet = (FAR struct vnet_driver_s *)dev->d_private;
  irqstate_t flags;

  /* Disable interrupts because this function may be called from interrupt
   * level processing.
   */

  flags = enter_critical_section();

  /* Ignore the notification if the interface is not yet up */

  if (vnet->sk_bifup)
    {
      /* Check if there is room in the hardware to hold another outgoing packet. */

      if (vnet_is_txbuff_full(vnet->vnet))
        {
#ifdef CONFIG_DEBUG
          cprintf("VNET: TX buffer is full\n");
#endif
          goto out;
        }

      /* If so, then poll the network for new XMIT data */

      (void)devif_poll(&vnet->sk_dev, vnet_txpoll);
    }

out:
  leave_critical_section(flags);
  return OK;
}
Esempio n. 5
0
static void lo_poll_work(FAR void *arg)
{
  FAR struct lo_driver_s *priv = (FAR struct lo_driver_s *)arg;
  net_lock_t state;

  /* Perform the poll */

  state = net_lock();
  priv->lo_txdone = false;
  (void)devif_timer(&priv->lo_dev, lo_txpoll);

  /* Was something received and looped back? */

  while (priv->lo_txdone)
    {
      /* Yes, poll again for more TX data */

      priv->lo_txdone = false;
      (void)devif_poll(&priv->lo_dev, lo_txpoll);
    }

  /* Setup the watchdog poll timer again */

  (void)wd_start(priv->lo_polldog, LO_WDDELAY, lo_poll_expiry, 1, priv);
  net_unlock(state);
}
Esempio n. 6
0
static int emac_txavail(struct net_driver_s *dev)
{
  FAR struct emac_driver_s *priv = (FAR struct emac_driver_s *)dev->d_private;
  irqstate_t flags;

  /* Disable interrupts because this function may be called from interrupt
   * level processing.
   */

  flags = enter_critical_section();

  /* Ignore the notification if the interface is not yet up */

  if (priv->d_bifup)
    {
      /* Check if there is room in the hardware to hold another outgoing packet. */

      /* If so, then poll the network for new XMIT data */

      (void)devif_poll(&priv->d_dev, emac_txpoll);
    }

  leave_critical_section(flags);
  return OK;
}
Esempio n. 7
0
static void vnet_txtimeout(int argc, uint32_t arg, ...)
{
  FAR struct vnet_driver_s *vnet = (FAR struct vnet_driver_s *)arg;

  /* Poll the network for new XMIT data */

  (void)devif_poll(&vnet->sk_dev, vnet_txpoll);
}
Esempio n. 8
0
static inline void skel_txtimeout_process(FAR struct skel_driver_s *priv)
{
  /* Increment statistics and dump debug info */

  /* Then reset the hardware */

  /* Then poll the network for new XMIT data */

  (void)devif_poll(&priv->sk_dev, skel_txpoll);
}
Esempio n. 9
0
static void emac_txtimeout(int argc, uint32_t arg, ...)
{
  FAR struct emac_driver_s *priv = (FAR struct emac_driver_s *)arg;

  /* Increment statistics and dump debug info */

  /* Then reset the hardware */

  /* Then poll the network for new XMIT data */

  (void)devif_poll(&priv->d_dev, emac_txpoll);
}
Esempio n. 10
0
static inline void skel_txavail_process(FAR struct skel_driver_s *priv)
{
  /* Ignore the notification if the interface is not yet up */

  if (priv->sk_bifup)
    {
      /* Check if there is room in the hardware to hold another outgoing packet. */

      /* If so, then poll the network for new XMIT data */

      (void)devif_poll(&priv->sk_dev, skel_txpoll);
    }
}
Esempio n. 11
0
static void emac_txdone(FAR struct emac_driver_s *priv)
{
  /* Check for errors and update statistics */

  /* If no further xmits are pending, then cancel the TX timeout and
   * disable further Tx interrupts.
   */

  wd_cancel(priv->d_txtimeout);

  /* Then poll the network for new XMIT data */

  (void)devif_poll(&priv->d_dev, emac_txpoll);
}
Esempio n. 12
0
static void e1000_txtimeout(int argc, uint32_t arg, ...)
{
    struct e1000_dev *e1000 = (struct e1000_dev *)arg;

    /* Increment statistics and dump debug info */

    /* Then reset the hardware */

    e1000_init(e1000);

    /* Then poll uIP for new XMIT data */

    (void)devif_poll(&e1000->netdev, e1000_txpoll);
}
Esempio n. 13
0
static void misoc_net_txtimeout_work(FAR void *arg)
{
  FAR struct misoc_net_driver_s *priv = (FAR struct misoc_net_driver_s *)arg;

  /* Increment statistics and dump debug info */

  net_lock();
  NETDEV_TXTIMEOUTS(priv->misoc_net_dev);

  /* Then reset the hardware */

  /* Then poll the network for new XMIT data */

  (void)devif_poll(&priv->misoc_net_dev, misoc_net_txpoll);
  net_unlock();
}
Esempio n. 14
0
static void misoc_net_txavail_work(FAR void *arg)
{
  FAR struct misoc_net_driver_s *priv = (FAR struct misoc_net_driver_s *)arg;

  /* Ignore the notification if the interface is not yet up */

  net_lock();
  if (priv->misoc_net_bifup)
    {
      /* Check if there is room in the hardware to hold another outgoing packet. */

      if (!ethmac_sram_reader_ready_read())
        {
          /* If so, then poll the network for new XMIT data */

          (void)devif_poll(&priv->misoc_net_dev, misoc_net_txpoll);
        }
    }

  net_unlock();
}
Esempio n. 15
0
static void misoc_net_txdone(FAR struct misoc_net_driver_s *priv)
{
  /* Check for errors and update statistics */

  NETDEV_TXDONE(priv->misoc_net_dev);

  /* Check if there are pending transmissions */

  /* If no further transmissions are pending, then cancel the TX timeout and
   * disable further Tx interrupts.
   */

  wd_cancel(priv->misoc_net_txtimeout);

  /* And disable further TX interrupts. */

  ethmac_sram_reader_ev_enable_write(0);

  /* In any event, poll the network for new TX data */

  (void)devif_poll(&priv->misoc_net_dev, misoc_net_txpoll);
}
Esempio n. 16
0
static void lo_txavail_work(FAR void *arg)
{
  FAR struct lo_driver_s *priv = (FAR struct lo_driver_s *)arg;
  net_lock_t state;

  /* Ignore the notification if the interface is not yet up */

  state = net_lock();
  if (priv->lo_bifup)
    {
      do
        {
          /* If so, then poll the network for new XMIT data */

          priv->lo_txdone = false;
          (void)devif_poll(&priv->lo_dev, lo_txpoll);
        }
      while (priv->lo_txdone);
    }

  net_unlock(state);
}
Esempio n. 17
0
void netdriver_loop(void)
{
  FAR struct eth_hdr_s *eth;

  /* Check for new frames.  If so, then poll the network for new XMIT data */

  net_lock();
  (void)devif_poll(&g_sim_dev, sim_txpoll);
  net_unlock();

  /* netdev_read will return 0 on a timeout event and >0 on a data received event */

  g_sim_dev.d_len = netdev_read((FAR unsigned char *)g_sim_dev.d_buf,
                                CONFIG_NET_ETH_MTU);

  /* Disable preemption through to the following so that it behaves a little more
   * like an interrupt (otherwise, the following logic gets pre-empted an behaves
   * oddly.
   */

  sched_lock();
  if (g_sim_dev.d_len > 0)
    {
      /* Data received event.  Check for valid Ethernet header with destination == our
       * MAC address
       */

      eth = BUF;
      if (g_sim_dev.d_len > ETH_HDRLEN)
        {
         int is_ours;

         /* Figure out if this ethernet frame is addressed to us.  This affects
           * what we're willing to receive.   Note that in promiscuous mode, the
           * up_comparemac will always return 0.
           */

         is_ours = (up_comparemac(eth->dest, &g_sim_dev.d_mac.ether) == 0);

#ifdef CONFIG_NET_PKT
          /* When packet sockets are enabled, feed the frame into the packet
           * tap.
           */

          if (is_ours)
            {
              pkt_input(&g_sim_dev);
            }
#endif /* CONFIG_NET_PKT */

          /* We only accept IP packets of the configured type and ARP packets */

#ifdef CONFIG_NET_IPv4
          if (eth->type == HTONS(ETHTYPE_IP) && is_ours)
            {
              ninfo("IPv4 frame\n");

              /* Handle ARP on input then give the IPv4 packet to the network
               * layer
               */

              arp_ipin(&g_sim_dev);
              ipv4_input(&g_sim_dev);

              /* If the above function invocation resulted in data that
               * should be sent out on the network, the global variable
               * d_len is set to a value > 0.
               */

              if (g_sim_dev.d_len > 0)
                {
                  /* Update the Ethernet header with the correct MAC address */

#ifdef CONFIG_NET_IPv6
                  if (IFF_IS_IPv4(g_sim_dev.d_flags))
#endif
                    {
                      arp_out(&g_sim_dev);
                    }
#ifdef CONFIG_NET_IPv6
                  else
                    {
                      neighbor_out(&g_sim_dev);
                    }
#endif

                  /* And send the packet */

                  netdev_send(g_sim_dev.d_buf, g_sim_dev.d_len);
                }
            }
          else
#endif /* CONFIG_NET_IPv4 */
#ifdef CONFIG_NET_IPv6
          if (eth->type == HTONS(ETHTYPE_IP6) && is_ours)
            {
              ninfo("Iv6 frame\n");

              /* Give the IPv6 packet to the network layer */

              ipv6_input(&g_sim_dev);

              /* If the above function invocation resulted in data that
               * should be sent out on the network, the global variable
               * d_len is set to a value > 0.
               */

              if (g_sim_dev.d_len > 0)
               {
                  /* Update the Ethernet header with the correct MAC address */

#ifdef CONFIG_NET_IPv4
                  if (IFF_IS_IPv4(g_sim_dev.d_flags))
                    {
                      arp_out(&g_sim_dev);
                    }
                  else
#endif
#ifdef CONFIG_NET_IPv6
                    {
                      neighbor_out(&g_sim_dev);
                    }
#endif /* CONFIG_NET_IPv6 */

                  /* And send the packet */

                  netdev_send(g_sim_dev.d_buf, g_sim_dev.d_len);
                }
            }
          else
#endif/* CONFIG_NET_IPv6 */
#ifdef CONFIG_NET_ARP
          if (eth->type == htons(ETHTYPE_ARP))
            {
              arp_arpin(&g_sim_dev);

              /* If the above function invocation resulted in data that
               * should be sent out on the network, the global variable
               * d_len is set to a value > 0.
               */

              if (g_sim_dev.d_len > 0)
                {
                  netdev_send(g_sim_dev.d_buf, g_sim_dev.d_len);
                }
            }
          else
#endif
           {
             nwarn("WARNING: Unsupported Ethernet type %u\n", eth->type);
           }
        }
    }

  /* Otherwise, it must be a timeout event */

  else if (timer_expired(&g_periodic_timer))
    {
      timer_reset(&g_periodic_timer);
      devif_timer(&g_sim_dev, sim_txpoll);
    }

  sched_unlock();
}
Esempio n. 18
0
static irqreturn_t e1000_interrupt_handler(int irq, void *dev_id)
{
    struct e1000_dev *e1000 = (struct e1000_dev *)dev_id;

    /* Get and clear interrupt status bits */

    int intr_cause = e1000_inl(e1000, E1000_ICR);
    e1000_outl(e1000, E1000_ICR, intr_cause);

    /* not for me */

    if (intr_cause == 0)
    {
        return IRQ_NONE;
    }

    /* Handle interrupts according to status bit settings */

    /* Link status change */

    if (intr_cause & (1 << 2))
    {
        if (e1000_inl(e1000, E1000_STATUS) & 2)
        {
            e1000->bifup = true;
        }
        else
        {
            e1000->bifup = false;
        }
    }

    /* Check if we received an incoming packet, if so, call skel_receive() */

    /* Rx-descriptor Timer expired */

    if (intr_cause & (1 << 7))
    {
        e1000_receive(e1000);
    }

    /* Tx queue empty */

    if (intr_cause & (1 << 1))
    {
        wd_cancel(e1000->txtimeout);
    }

    /* Check is a packet transmission just completed.  If so, call skel_txdone.
     * This may disable further Tx interrupts if there are no pending
     * tansmissions.
     */

    /* Tx-descriptor Written back */

    if (intr_cause & (1 << 0))
    {
        devif_poll(&e1000->netdev, e1000_txpoll);
    }


    /* Rx-Descriptors Low */

    if (intr_cause & (1 << 4))
    {
        int tail;

        tail = e1000->rx_ring.tail + e1000->rx_ring.free;
        tail %= CONFIG_E1000_N_RX_DESC;
        e1000->rx_ring.tail = tail;
        e1000->rx_ring.free = 0;
        e1000_outl(e1000, E1000_RDT, tail);
    }

    return IRQ_HANDLED;
}
Esempio n. 19
0
static void vnet_txdone(FAR struct vnet_driver_s *vnet)
{
  /* Poll the network for new XMIT data */

  (void)devif_poll(&vnet->sk_dev, vnet_txpoll);
}
Esempio n. 20
0
static void slip_txtask(int argc, FAR char *argv[])
{
  FAR struct slip_driver_s *priv;
  unsigned int index = *(argv[1]) - '0';
  net_lock_t flags;
  systime_t msec_start;
  systime_t msec_now;
  unsigned int hsec;

  nerr("index: %d\n", index);
  DEBUGASSERT(index < CONFIG_NET_SLIP_NINTERFACES);

  /* Get our private data structure instance and wake up the waiting
   * initialization logic.
   */

  priv = &g_slip[index];
  slip_semgive(priv);

  /* Loop forever */

  msec_start = clock_systimer() * MSEC_PER_TICK;
  for (;  ; )
    {
      /* Wait for the timeout to expire (or until we are signaled by by  */

      slip_semtake(priv);
      if (!priv->txnodelay)
        {
          slip_semgive(priv);
          usleep(SLIP_WDDELAY);
        }
      else
        {
          priv->txnodelay = false;
          slip_semgive(priv);
        }

      /* Is the interface up? */

      if (priv->bifup)
        {
          /* Get exclusive access to the network (if it it is already being used
           * slip_rxtask, then we have to wait).
           */

          slip_semtake(priv);

          /* Poll the networking layer for new XMIT data. */

          flags = net_lock();
          priv->dev.d_buf = priv->txbuf;

          /* Has a half second elapsed since the last timer poll? */

          msec_now = clock_systimer() * MSEC_PER_TICK;
          hsec = (unsigned int)(msec_now - msec_start) / (MSEC_PER_SEC / 2);
          if (hsec)
            {
              /* Yes, perform the timer poll */

              (void)devif_timer(&priv->dev, slip_txpoll);
              msec_start += hsec * (MSEC_PER_SEC / 2);
            }
          else
            {
              /* No, perform the normal TX poll */

              (void)devif_poll(&priv->dev, slip_txpoll);
            }

          net_unlock(flags);
          slip_semgive(priv);
        }
    }
}