int
efab_eplock_unlock_and_wake(ci_netif *ni, int in_dl_context)
{
    int l = ni->state->lock.lock;
    tcp_helper_resource_t *rs = netif2tcp_helper_resource(ni);

    /* Allocate more packets if necessary. */
    if( !in_dl_context && OO_STACK_NEEDS_MORE_PACKETS(ni) )
        efab_tcp_helper_more_bufs(rs);

    /* We use in_dl_context from now on, and we should remove
     * CI_NETIF_FLAG_IN_DL_CONTEXT under the stack lock. */
    if( in_dl_context )
        ni->flags &= ~CI_NETIF_FLAG_IN_DL_CONTEXT;

again:

#ifndef NDEBUG
    if( (~l & CI_EPLOCK_LOCKED) || (l & CI_EPLOCK_UNLOCKED) ) {
        OO_DEBUG_ERR(ci_log("efab_eplock_unlock_and_wake:  corrupt"
                            " (value is %x)", (unsigned) l));
        OO_DEBUG_ERR(dump_stack());
        return -EIO;
    }
#endif

    if( l & CI_EPLOCK_CALLBACK_FLAGS ) {
        /* Invoke the callback while we've still got the lock.  The callback
        ** is responsible for either
        **  - dropping the lock using ef_eplock_try_unlock(), and returning
        **    the lock value prior to unlocking, OR
        **  - keeping the eplock locked and returning CI_EPLOCK_LOCKED
        */
        l = efab_tcp_helper_netif_lock_callback(&ni->eplock_helper, l, in_dl_context);
    }
    else if( ci_cas32_fail(&ni->state->lock.lock, l, CI_EPLOCK_UNLOCKED) ) {
        /* Someone (probably) set a flag when we tried to unlock, so we'd
        ** better handle the flag(s).
        */
        l = ni->state->lock.lock;
        goto again;
    }

    if( l & CI_EPLOCK_FL_NEED_WAKE ) {
        CITP_STATS_NETIF_INC(ni, lock_wakes);
        wake_up_interruptible(&ni->eplock_helper.wq);
    }

    return 0;
}
Example #2
0
void ci_udp_all_fds_gone(ci_netif* netif, oo_sp sock_id, int do_free)
{
    /* All process references to this socket have gone.  So we should
     * shutdown() if necessary, and arrange for all resources to eventually
     * get cleaned up.
     *
     * This is called by the driver only.  [sock_id] is trusted.
     */
    ci_udp_state* us = SP_TO_UDP(netif, sock_id);

    ci_assert(ci_netif_is_locked(netif));
    ci_assert(us->s.b.state == CI_TCP_STATE_UDP);

    LOG_UC(ci_log("ci_udp_all_fds_gone: "NTS_FMT,
                  NTS_PRI_ARGS(netif, us)));

    if( UDP_GET_FLAG(us, CI_UDPF_FILTERED) ) {
        UDP_CLR_FLAG(us, CI_UDPF_FILTERED);
        ci_tcp_ep_clear_filters(netif, S_SP(us), 0);
    }
    ci_udp_recv_q_drop(netif, &us->recv_q);
    ci_ni_dllist_remove(netif, &us->s.reap_link);

    if( OO_PP_NOT_NULL(us->zc_kernel_datagram) ) {
        ci_netif_pkt_release_rx(netif, PKT_CHK(netif, us->zc_kernel_datagram));
        us->zc_kernel_datagram = OO_PP_NULL;
        us->zc_kernel_datagram_count = 0;
    }

    /* Only free state if no outstanding tx packets: otherwise it'll get
     * freed by the tx completion event.
     */
    if( do_free ) {
        if( us->tx_count == 0 )
            ci_udp_state_free(netif, us);
        else
            CITP_STATS_NETIF_INC(netif, udp_free_with_tx_active);
    }
}
Example #3
0
/* Insert for either TCP or UDP */
int ci_netif_filter_insert(ci_netif* netif, oo_sp tcp_id,
			   unsigned laddr, unsigned lport,
			   unsigned raddr, unsigned rport, unsigned protocol)
{
  ci_netif_filter_table_entry* entry;
  unsigned hash1, hash2;
  ci_netif_filter_table* tbl;
#if !defined(NDEBUG) || CI_CFG_STATS_NETIF
  unsigned hops = 1;
#endif
  unsigned first;

  ci_assert(netif);
  ci_assert(ci_netif_is_locked(netif));
  ci_assert(netif->filter_table);
  tbl = netif->filter_table;

  hash1 = tcp_hash1(tbl, laddr, lport, raddr, rport, protocol);
  hash2 = tcp_hash2(tbl, laddr, lport, raddr, rport, protocol);
  first = hash1;

  /* Find a free slot. */
  while( 1 ) {
    entry = &tbl->table[hash1];
    if( entry->id < 0 )  break;

    ++entry->route_count;
#if !defined(NDEBUG) || CI_CFG_STATS_NETIF
    ++hops;
#endif

    /* A socket can only have multiple entries in the filter table if each
     * entry has a different [laddr].
     */
    ci_assert(
      !((entry->id == OO_SP_TO_INT(tcp_id)) && (laddr == entry->laddr)) );

    hash1 = (hash1 + hash2) & tbl->table_size_mask;

    if( hash1 == first ) {
      ci_sock_cmn *s = SP_TO_SOCK_CMN(netif, tcp_id);
      if( ! (s->s_flags & CI_SOCK_FLAG_SW_FILTER_FULL) ) {
        LOG_E(ci_log(FN_FMT "%d FULL %s %s:%u->%s:%u hops=%u",
                     FN_PRI_ARGS(netif),
                     OO_SP_FMT(tcp_id), CI_IP_PROTOCOL_STR(protocol),
                     ip_addr_str(laddr), (unsigned) CI_BSWAP_BE16(lport),
                     ip_addr_str(raddr), (unsigned) CI_BSWAP_BE16(rport),
                     hops));
        s->s_flags |= CI_SOCK_FLAG_SW_FILTER_FULL;
      }

      CITP_STATS_NETIF_INC(netif, sw_filter_insert_table_full);
      return -ENOBUFS;
    }
  }

  /* Now insert the new entry. */
  LOG_TC(ci_log(FN_FMT "%d INSERT %s %s:%u->%s:%u hash=%u:%u at=%u "
		"over=%d hops=%u", FN_PRI_ARGS(netif), OO_SP_FMT(tcp_id),
                CI_IP_PROTOCOL_STR(protocol),
		ip_addr_str(laddr), (unsigned) CI_BSWAP_BE16(lport),
		ip_addr_str(raddr), (unsigned) CI_BSWAP_BE16(rport),
		first, hash2, hash1, entry->id, hops));

#if CI_CFG_STATS_NETIF
  if( hops > netif->state->stats.table_max_hops )
    netif->state->stats.table_max_hops = hops;
  /* Keep a rolling average of the number of hops per entry. */
  if( netif->state->stats.table_mean_hops == 0 )
    netif->state->stats.table_mean_hops = 1;
  netif->state->stats.table_mean_hops =
    (netif->state->stats.table_mean_hops * 9 + hops) / 10;

  if( entry->id == EMPTY )
    ++netif->state->stats.table_n_slots;
  ++netif->state->stats.table_n_entries;
#endif

  entry->id = OO_SP_TO_INT(tcp_id);
  entry->laddr = laddr;
  return 0;
}