Esempio n. 1
0
static int ci_sock_cmn_timestamp_q_reapable(ci_netif* ni, ci_sock_cmn* s)
{
  int count = 0;
  oo_pkt_p p = s->timestamp_q.head;
  while( ! OO_PP_EQ(p, s->timestamp_q_extract) ) {
    ++count;
    p = PKT_CHK(ni, p)->tsq_next;
  }
  return count;
}
Esempio n. 2
0
void ci_sock_cmn_timestamp_q_reap(ci_netif* ni, ci_sock_cmn* s)
{ 
  ci_assert(ci_netif_is_locked(ni));
  while( ! OO_PP_EQ(s->timestamp_q.head, s->timestamp_q_extract) ) {
    ci_ip_pkt_fmt* pkt = PKT_CHK(ni, s->timestamp_q.head);
    oo_pkt_p next = pkt->tsq_next;

    ci_netif_pkt_release(ni, pkt);
    --s->timestamp_q.num;
    s->timestamp_q.head = next;
  }
}
Esempio n. 3
0
void ci_sock_cmn_timestamp_q_drop(ci_netif* netif, ci_sock_cmn* s)
{
  ci_ip_pkt_queue* qu = &s->timestamp_q;
  ci_ip_pkt_fmt* p;
  CI_DEBUG(int i = qu->num);

  ci_assert(netif);
  ci_assert(qu);

  while( OO_PP_NOT_NULL(qu->head)   CI_DEBUG( && i-- > 0) ) {
    p = PKT_CHK(netif, qu->head);
    qu->head = p->tsq_next;
    ci_netif_pkt_release(netif, p);
  }
  ci_assert_equal(i, 0);
  ci_assert(OO_PP_IS_NULL(qu->head));
  qu->num = 0;
}
Esempio n. 4
0
void ci_udp_all_fds_gone(ci_netif* netif, oo_sp sock_id, int do_free)
{
    /* All process references to this socket have gone.  So we should
     * shutdown() if necessary, and arrange for all resources to eventually
     * get cleaned up.
     *
     * This is called by the driver only.  [sock_id] is trusted.
     */
    ci_udp_state* us = SP_TO_UDP(netif, sock_id);

    ci_assert(ci_netif_is_locked(netif));
    ci_assert(us->s.b.state == CI_TCP_STATE_UDP);

    LOG_UC(ci_log("ci_udp_all_fds_gone: "NTS_FMT,
                  NTS_PRI_ARGS(netif, us)));

    if( UDP_GET_FLAG(us, CI_UDPF_FILTERED) ) {
        UDP_CLR_FLAG(us, CI_UDPF_FILTERED);
        ci_tcp_ep_clear_filters(netif, S_SP(us), 0);
    }
    ci_udp_recv_q_drop(netif, &us->recv_q);
    ci_ni_dllist_remove(netif, &us->s.reap_link);

    if( OO_PP_NOT_NULL(us->zc_kernel_datagram) ) {
        ci_netif_pkt_release_rx(netif, PKT_CHK(netif, us->zc_kernel_datagram));
        us->zc_kernel_datagram = OO_PP_NULL;
        us->zc_kernel_datagram_count = 0;
    }

    /* Only free state if no outstanding tx packets: otherwise it'll get
     * freed by the tx completion event.
     */
    if( do_free ) {
        if( us->tx_count == 0 )
            ci_udp_state_free(netif, us);
        else
            CITP_STATS_NETIF_INC(netif, udp_free_with_tx_active);
    }
}
static void efab_ip_queue_copy(ci_netif *ni_to, ci_ip_pkt_queue *q_to,
                               ci_netif *ni_from, ci_ip_pkt_queue *q_from)
{
  ci_ip_pkt_fmt *pkt_to, *pkt_from;
  oo_pkt_p pp;

  ci_ip_queue_init(q_to);
  if( q_from->num == 0 )
    return;

  ci_assert( OO_PP_NOT_NULL(q_from->head) );
  pp = q_from->head;
  do {
    pkt_from = PKT_CHK(ni_from, pp);
    pkt_to = ci_netif_pkt_alloc(ni_to);
    memcpy(&pkt_to->pay_len, &pkt_from->pay_len,
           CI_CFG_PKT_BUF_SIZE - CI_MEMBER_OFFSET(ci_ip_pkt_fmt, pay_len));
    ci_ip_queue_enqueue(ni_to, q_to, pkt_to);
    if( pp == q_from->tail )
      break;
    pp = pkt_from->next;
  } while(1);
}
Esempio n. 6
0
static int ci_udp_ioctl_locked(ci_netif* ni, ci_udp_state* us,
                               ci_fd_t fd, int request, void* arg)
{
  int rc;

  switch( request ) {
  case FIONREAD: /* synonym of SIOCINQ */
    if( ! CI_IOCTL_ARG_OK(int, arg) )
      return -EFAULT;
    rc = 1;
    if( rc ) {
      /* Return the size of the datagram at the head of the receive queue.
       *
       * Careful: extract side of receive queue is owned by sock lock,
       * which we don't have.  However, freeing of bufs is owned by netif
       * lock, which we do have.  So we're safe so long as we only read
       * [extract] once.
       */
      oo_pkt_p extract = us->recv_q.extract;
      if( OO_PP_NOT_NULL(extract) ) {
        ci_ip_pkt_fmt* pkt = PKT_CHK(ni, extract);
        if( (pkt->rx_flags & CI_PKT_RX_FLAG_RECV_Q_CONSUMED) &&
            OO_PP_NOT_NULL(pkt->udp_rx_next) )
          pkt = PKT_CHK(ni, pkt->udp_rx_next);
        if( !(pkt->rx_flags & CI_PKT_RX_FLAG_RECV_Q_CONSUMED) ) {
          *(int*) arg = pkt->pf.udp.pay_len;
          return 0;
        }
      }
    }
    /* Nothing in userlevel receive queue: So take the value returned by
     * the O/S socket.
     */
    if( !(us->s.os_sock_status & OO_OS_STATUS_RX) ) {
      *(int*)arg = 0;
      return 0;
    }
    goto sys_ioctl;

  case TIOCOUTQ: /* synonym of SIOCOUTQ */
    if( ! CI_IOCTL_ARG_OK(int, arg) )
      return -EFAULT;

    *(int*)arg = us->tx_count + oo_atomic_read(&us->tx_async_q_level);
    return 0;

  case SIOCGSTAMP:
#if defined( __linux__) && defined(__KERNEL__)
/* The following code assumes the width of the timespec and timeval fields */
# error "Need to consider 32-on-64 bit setting of timeval arg" 
#endif
    if( ! (us->udpflags & CI_UDPF_LAST_RECV_ON) )
      return oo_os_sock_ioctl(ni, us->s.b.bufid, request, arg, NULL);
    return ci_udp_ioctl_siocgstamp(ni, us, arg, 1);
  case SIOCGSTAMPNS:
    if( ! (us->udpflags & CI_UDPF_LAST_RECV_ON) )
      return oo_os_sock_ioctl(ni, us->s.b.bufid, request, arg, NULL);
    return ci_udp_ioctl_siocgstamp(ni, us, arg, 0);
  }

  return ci_udp_ioctl_slow(ni, us, fd, request, arg);

 sys_ioctl:
  return oo_os_sock_ioctl(ni, us->s.b.bufid, request, arg, NULL);
}