Beispiel #1
0
void citp_waitable_print(citp_waitable* w)
{
  /* Output socket using netstat style output:
   *   TCP 2 0 0.0.0.0:12865 0.0.0.0:0 LISTEN
   *   UDP 0 0 172.16.129.131:57521 0.0.0.0:0 UDP
   */
  if( CI_TCP_STATE_IS_SOCKET(w->state) ) {
    ci_sock_cmn* s = CI_CONTAINER(ci_sock_cmn, b, w);
    citp_waitable_obj* wo = CI_CONTAINER(citp_waitable_obj, waitable, w);
    int tq = 0;
    int rq = 0;
    
    if( (w->state & CI_TCP_STATE_TCP) &&
       !(w->state & CI_TCP_STATE_NOT_CONNECTED) ) {
      tq = ci_tcp_sendq_n_pkts(&wo->tcp);
      rq = wo->tcp.recv1.num + wo->tcp.recv2.num;
    }
    else if( w->state == CI_TCP_STATE_UDP ) {
      tq = wo->udp.tx_count + oo_atomic_read(&wo->udp.tx_async_q_level);
      rq = ci_udp_recv_q_pkts(&wo->udp.recv_q);
    }
    log("%s %d %d "OOF_IP4PORT" "OOF_IP4PORT" %s",
        citp_waitable_type_str(w), rq, tq,
        OOFA_IP4PORT(sock_laddr_be32(s), sock_lport_be16(s)),
        OOFA_IP4PORT(sock_raddr_be32(s), sock_rport_be16(s)),
        ci_tcp_state_str(w->state));
  }
}
Beispiel #2
0
void ci_udp_recvq_dump(ci_netif* ni, ci_udp_recv_q* q,
                       const char* pf1, const char* pf2,
                       oo_dump_log_fn_t logger, void* log_arg)
{
  logger(log_arg,
         "%s%s q_pkts=%d reap=%d tot_pkts=%u", pf1, pf2,
         ci_udp_recv_q_pkts(q), ci_udp_recv_q_reapable(q),
         q->pkts_added);

}
Beispiel #3
0
void ci_udp_state_dump(ci_netif* ni, ci_udp_state* us, const char* pf,
                      oo_dump_log_fn_t logger, void* log_arg)
{
  ci_udp_socket_stats uss = us->stats;
  unsigned rx_added = us->recv_q.pkts_added;
  unsigned rx_os = uss.n_rx_os + uss.n_rx_os_slow;
  unsigned rx_total = rx_added + uss.n_rx_mem_drop + uss.n_rx_overflow + rx_os;
  unsigned n_tx_onload = uss.n_tx_onload_uc + uss.n_tx_onload_c;
  unsigned tx_total = n_tx_onload + uss.n_tx_os;
  ci_ip_cached_hdrs* ipcache;

  (void) rx_total;  /* unused on 32-bit builds in kernel */
  (void) tx_total;

  if( us->s.timestamping_flags & ONLOAD_SOF_TIMESTAMPING_TX_HARDWARE )
    ci_timestamp_q_dump(ni, &us->timestamp_q, pf, logger, log_arg);

  /* General. */
  logger(log_arg, "%s  udpflags: "CI_UDP_STATE_FLAGS_FMT, pf,
         CI_UDP_STATE_FLAGS_PRI_ARG(us));

  /* Receive path. */
  logger(log_arg,
         "%s  rcv: q_bytes=%d q_depth=%d q_pkts=%d reap=%d tot_bytes=%u"
         " tot_pkts=%u", pf, ci_udp_recv_q_bytes(&us->recv_q),
         UDP_RECVQ_DEPTH(us), ci_udp_recv_q_pkts(&us->recv_q),
         ci_udp_recv_q_reapable(&us->recv_q),
         (unsigned) us->recv_q.bytes_added, rx_added);
#if CI_CFG_ZC_RECV_FILTER
  logger(log_arg,
         "%s  rcv: filtered=%d unfiltered=%d "
         "tot_filt_rej=%d tot_filt_pass=%d ",
         pf,
         us->recv_q.pkts_filter_passed - us->recv_q.pkts_delivered,
         us->recv_q.pkts_added - us->recv_q.pkts_filter_passed - 
         us->recv_q.pkts_filter_dropped,
         us->recv_q.pkts_filter_dropped, us->recv_q.pkts_filter_passed);
#endif
  logger(log_arg,
         "%s  rcv: oflow_drop=%u(%u%%) mem_drop=%u eagain=%u pktinfo=%u "
         "q_max=%u", pf, uss.n_rx_overflow,
         percent(uss.n_rx_overflow, rx_total),
         uss.n_rx_mem_drop, uss.n_rx_eagain, uss.n_rx_pktinfo, 
         uss.max_recvq_depth);
  logger(log_arg, "%s  rcv: os=%u(%u%%) os_slow=%u os_error=%u", pf,
         rx_os, percent(rx_os, rx_total), uss.n_rx_os_slow, uss.n_rx_os_error);

  /* Send path. */
  logger(log_arg, "%s  snd: q=%u+%u ul=%u os=%u(%u%%)", pf,
         us->tx_count, oo_atomic_read(&us->tx_async_q_level),
         n_tx_onload, uss.n_tx_os, percent(uss.n_tx_os, tx_total));
  logger(log_arg,
         "%s  snd: LOCK cp=%u(%u%%) pkt=%u(%u%%) snd=%u(%u%%) poll=%u(%u%%) "
         "defer=%u(%u%%)", pf,
         uss.n_tx_lock_cp,  percent(uss.n_tx_lock_cp,  n_tx_onload),
         uss.n_tx_lock_pkt,  percent(uss.n_tx_lock_pkt,  n_tx_onload),
         uss.n_tx_lock_snd,  percent(uss.n_tx_lock_snd,  n_tx_onload),
         uss.n_tx_lock_poll, percent(uss.n_tx_lock_poll, n_tx_onload),
         uss.n_tx_lock_defer, percent(uss.n_tx_lock_defer, n_tx_onload));

  logger(log_arg, "%s  snd: MCAST if=%d src="OOF_IP4" ttl=%d", pf,
         us->s.cp.ip_multicast_if,
         OOFA_IP4(us->s.cp.ip_multicast_if_laddr_be32),
         (int) us->s.cp.ip_mcast_ttl);

  /* State relating to unconnected sends. */
  ipcache = &us->ephemeral_pkt;
  logger(log_arg,
         "%s  snd: TO n=%u match=%u(%u%%) "
         "lookup=%u+%u(%u%%) "OOF_IPCACHE_STATE,
         pf, uss.n_tx_onload_uc,
         uss.n_tx_cp_match, percent(uss.n_tx_cp_match, uss.n_tx_onload_uc),
         uss.n_tx_cp_uc_lookup, uss.n_tx_cp_a_lookup,
         percent(uss.n_tx_cp_uc_lookup + uss.n_tx_cp_a_lookup,
                 uss.n_tx_onload_uc),
         OOFA_IPCACHE_STATE(ni, ipcache));
  logger(log_arg, "%s  snd: TO "OOF_IPCACHE_DETAIL, pf,
         OOFA_IPCACHE_DETAIL(ipcache));
  logger(log_arg, "%s  snd: TO "OOF_IP4PORT" => "OOF_IP4PORT, pf,
         OOFA_IP4PORT(ipcache->ip_saddr_be32, udp_lport_be16(us)),
         OOFA_IP4PORT(ipcache->ip.ip_daddr_be32, ipcache->dport_be16));
   
  /* State relating to connected sends. */
  ipcache = &us->s.pkt;
  logger(log_arg, "%s  snd: CON n=%d lookup=%d "OOF_IPCACHE_STATE, pf,
         uss.n_tx_onload_c, uss.n_tx_cp_c_lookup,
         OOFA_IPCACHE_STATE(ni,ipcache));
  logger(log_arg, "%s  snd: CON "OOF_IPCACHE_DETAIL, pf,
         OOFA_IPCACHE_DETAIL(ipcache));

  logger(log_arg, "%s  snd: eagain=%d spin=%d block=%d", pf,
         uss.n_tx_eagain, uss.n_tx_spin, uss.n_tx_block);
  logger(log_arg, "%s  snd: poll_avoids_full=%d fragments=%d confirm=%d", pf,
         uss.n_tx_poll_avoids_full, uss.n_tx_fragments, uss.n_tx_msg_confirm);
  logger(log_arg,
         "%s  snd: os_slow=%d os_late=%d unconnect_late=%d nomac=%u(%u%%)", pf,
         uss.n_tx_os_slow, uss.n_tx_os_late, uss.n_tx_unconnect_late,
         uss.n_tx_cp_no_mac, percent(uss.n_tx_cp_no_mac, tx_total));
}