Example #1
0
static void refresh_ip_list(void)
{
  struct ifconf ifc;
  int sock;

  CI_TRY(sock = socket(PF_INET, SOCK_DGRAM, IPPROTO_UDP));
  ifc.ifc_len = sizeof(ip_list);
  ifc.ifc_req = ip_list;
  CI_TRY(ioctl(sock, SIOCGIFCONF, &ifc));
  ip_list_n = ifc.ifc_len / sizeof(ip_list[0]);
  close(sock);
}
Example #2
0
static void sock_fmt(int fd, char* buf, int* buf_n, int buf_len)
{
  struct sockaddr_in sa;
  socklen_t sa_len;
  int type;
  bprintf("socket[");
  sa_len = sizeof(type);
  CI_TRY(ci_sys_getsockopt(fd, SOL_SOCKET, SO_TYPE, &type, &sa_len));
  sa_len = sizeof(sa);
  if( ci_sys_getsockname(fd, (struct sockaddr*) &sa, &sa_len) == 0 ) {
    switch( sa.sin_family ) {
    case AF_INET:
      bprintf("%s,"CI_IP_PRINTF_FORMAT":%u", type_to_proto(type),
              CI_IP_PRINTF_ARGS(&sa.sin_addr.s_addr),
              CI_BSWAP_BE16(sa.sin_port));
      break;
    case AF_UNIX:
      bprintf("UNIX,%s", type_to_string(type));
      break;
    default:
      bprintf("%d,%s", sa.sin_family, type_to_string(type));
      break;
    }
    sa_len = sizeof(sa);
    if( sa.sin_family == AF_INET &&
        ci_sys_getpeername(fd, (struct sockaddr*) &sa, &sa_len) == 0 )
      bprintf(","CI_IP_PRINTF_FORMAT":%u",
              CI_IP_PRINTF_ARGS(&sa.sin_addr.s_addr),
              CI_BSWAP_BE16(sa.sin_port));
  }
  bprintf("]");
}
Example #3
0
/* Looks up the user-level 'FD info' for a given file descriptor.
** Returns pointer to the user-level 'FD info' for a given file
** descriptor, or NULL if the FD is not user-level.
** NOTE: The reference count of the 'FD info' is incremented, the
**       caller should ensure the reference is dropped when no
**       longer needed by calling citp_fdinfo_release_ref().
*/
citp_fdinfo* citp_fdtable_lookup_noprobe(unsigned fd)
{
  /* Need to be initialised before we can try and grab the lock at the
  ** moment.  TODO: make this more efficient by using a trylock to grab the
  ** fdtable lock, and on fail see if we need to initialise it.
  */
  if( CI_UNLIKELY(citp.init_level < CITP_INIT_FDTABLE) ) {
    if (_citp_do_init_inprogress == 0)
      CI_TRY(citp_do_init(CITP_INIT_ALL));
    else
      CI_TRY(citp_do_init(CITP_INIT_FDTABLE)); /* get what we need */

    return NULL;
  }

  if( fd < citp_fdtable.inited_count ) {

    volatile citp_fdinfo_p* p_fdip = &citp_fdtable.table[fd].fdip;
    citp_fdinfo_p fdip;

  again:
    /* Swap in the busy marker. */
    fdip = *p_fdip;
    if( fdip_is_normal(fdip) ) {
      if( fdip_cas_succeed(p_fdip, fdip, fdip_busy) ) {
	/* Bump the reference count. */
	citp_fdinfo* fdi = fdip_to_fdi(fdip);
	citp_fdinfo_ref(fdi);
	/* Swap the busy marker out again. */
	citp_fdtable_busy_clear(fd, fdip, 0);
        return fdi;
      }
      goto again;
    }
    /* Not normal! */
    else if( fdip_is_busy(fdip) ) {
      citp_fdtable_busy_wait(fd, 0);
      goto again;
    }

  }

  return NULL;
}
Example #4
0
static int interface_to_ifindex(const char* intf_name)
{
  struct ifreq ifr;
  int rc, sock;
  interface_get_basename(intf_name, ifr.ifr_name);
  CI_TRY(sock = socket(PF_INET, SOCK_DGRAM, IPPROTO_UDP));
  rc = ioctl(sock, SIOCGIFINDEX, &ifr);
  close(sock);
  if( rc == 0 )
    return ifr.ifr_ifindex;
  else
    return rc;
}
Example #5
0
int main(int argc, char* argv[])
{
  stack_ni_fn_t* do_stack_ofe = NULL;

  ci_app_usage = usage;
  cfg_lock = 0; /* don't lock when attaching */

  ci_app_getopt("info | stats ... | command ...",
                &argc, argv, cfg_opts, N_CFG_OPTS);
  --argc; ++argv;

  cfg_stack_id = (ci_uint32)(-1);
  if( cfg_stack_name != NULL ) {
    char *str;
    cfg_stack_id = strtoul(cfg_stack_name, &str, 0);
    if( str[0] != '\0' )
      cfg_stack_id = (ci_uint32)(-1);
  }

  if( argc == 0 ) {
    do_stack_ofe = do_ofe_info;
  }
  else if ( strcmp(argv[0], "info" ) == 0 ) {
    do_stack_ofe = do_ofe_info;
    if( argc != 1 )
      ci_app_usage("Do not understand parameters to the \"info\" command");
  }
  else if( strcmp(argv[0], "stats" ) == 0 ) {
    do_stack_ofe = do_ofe_stats;
    cfg_argv = argv + 1;
    cfg_argc = argc - 1;
  }
  else if( strcmp(argv[0], "command") == 0 ||
           strcmp(argv[0], "cmd") == 0 ) {
    do_stack_ofe = do_ofe_command;
    cfg_argv = argv + 1;
    cfg_argc = argc - 1;
  }
  else
    ci_app_usage("info | stats ... | command ...");

  CI_TRY(libstack_init(NULL));
  list_all_stacks2(stackfilter_match_name, do_stack_ofe, NULL, NULL);
  return 0;
}
Example #6
0
static void onload_fmt(int fd, char* buf, int* buf_n, int buf_len)
{
  ci_ep_info_t info;
  CI_TRY(oo_ep_info(fd, &info));
  switch( info.fd_type ) {
  case CI_PRIV_TYPE_NONE:
    bprintf("onload[]");
    break;
  case CI_PRIV_TYPE_NETIF:
    bprintf("onload[stack,%u]", info.resource_id);
    break;
  case CI_PRIV_TYPE_TCP_EP:
    bprintf("onload[TCP,%u,%d]", info.resource_id, OO_SP_FMT(info.sock_id));
    break;
  case CI_PRIV_TYPE_UDP_EP:
    bprintf("onload[UDP,%u,%d]", info.resource_id, OO_SP_FMT(info.sock_id));
    break;
  default:
    bprintf("onload[type=%d,%u,%d,%lu]", info.fd_type, info.resource_id,
            OO_SP_FMT(info.sock_id), (unsigned long) info.mem_mmap_bytes);
    break;
  }
}
Example #7
0
static int ci_tcp_shutdown_listen(citp_socket* ep, int how, ci_fd_t fd)
{
  ci_tcp_socket_listen* tls = SOCK_TO_TCP_LISTEN(ep->s);

  if( how == SHUT_WR )
    return 0;

  ci_sock_lock(ep->netif, &tls->s.b);
  ci_netif_lock(ep->netif);
  LOG_TC(ci_log(SK_FMT" shutdown(SHUT_RD)", SK_PRI_ARGS(ep)));
  __ci_tcp_listen_shutdown(ep->netif, tls, fd);
  __ci_tcp_listen_to_normal(ep->netif, tls);
  {
    ci_fd_t os_sock = ci_get_os_sock_fd(ep, fd);
    int flags = ci_sys_fcntl(os_sock, F_GETFL);
    flags &= (~O_NONBLOCK);
    CI_TRY(ci_sys_fcntl(os_sock, F_SETFL, flags));
    ci_rel_os_sock_fd(os_sock);
  }
  ci_netif_unlock(ep->netif);
  ci_sock_unlock(ep->netif, &tls->s.b);
  return 0;
}
Example #8
0
int citp_fdtable_ctor()
{
  struct rlimit rlim;
  int rc;

  Log_S(log("%s:", __FUNCTION__));

  /* How big should our fdtable be by default?  It's pretty arbitrary, but we have
   * seen a few apps that use setrlimit to set the fdtable to 4096 entries on
   * start-up (see bugs 3253 and 3373), so we choose that.  (Note: we can't grow
   * the table if the app later does setrlimit, and unused entries consume virtual
   * space only, so it's worth allocating a table of reasonable sized.)
   */
  citp_fdtable.size = 4096;

  if( getrlimit(RLIMIT_NOFILE, &rlim) == 0 ) {
    citp_fdtable.size = rlim.rlim_max;
    if( CITP_OPTS.fdtable_size != 0 &&
        CITP_OPTS.fdtable_size != rlim.rlim_max ) {
      Log_S(ci_log("Set the limits for the number of opened files "
                   "to EF_FDTABLE_SIZE=%u value.",
                   CITP_OPTS.fdtable_size));
      rlim.rlim_max = CITP_OPTS.fdtable_size;
      if( rlim.rlim_cur > rlim.rlim_max )
        rlim.rlim_cur = rlim.rlim_max;
      if( ci_sys_setrlimit(RLIMIT_NOFILE, &rlim) == 0 )
          citp_fdtable.size = rlim.rlim_max;
      else {
        /* Most probably, we've got EPERM */
        ci_assert_lt(citp_fdtable.size, CITP_OPTS.fdtable_size);
        ci_log("Can't set EF_FDTABLE_SIZE=%u; using %u",
               CITP_OPTS.fdtable_size, citp_fdtable.size);
        rlim.rlim_max = rlim.rlim_cur = citp_fdtable.size;
        CI_TRY(ci_sys_setrlimit(RLIMIT_NOFILE, &rlim));
      }
    }
  }
  else
    Log_S(ci_log("Assume EF_FDTABLE_SIZE=%u", citp_fdtable.size));

  citp_fdtable.inited_count = 0;

  citp_fdtable.table = ci_libc_malloc(sizeof (citp_fdtable_entry) *
                                      citp_fdtable.size);
  if( ! citp_fdtable.table ) {
    Log_U(log("%s: failed to allocate fdtable (0x%x)", __FUNCTION__,
              citp_fdtable.size));
    return -1;
  }

  /* The whole table is not initialised at start-of-day, but is initialised
  ** on demand.  citp_fdtable.inited_count counts the number of initialised
  ** entries.
  */

  if( (rc = oo_rwlock_ctor(&citp_ul_lock)) != 0 ) {
    Log_E(log("%s: oo_rwlock_ctor %d", __FUNCTION__, rc));
    return -1;
  }

  /* Install SIGONLOAD handler */
  {
    struct sigaction sa;
    memset(&sa, 0, sizeof(sa)); /* sa_flags and sa_mask = 0 */
    sa.sa_handler = sighandler_do_nothing;
    sigaction(SIGONLOAD, &sa, NULL);
  }

  return 0;
}
Example #9
0
citp_fdinfo*
citp_fdtable_lookup_fast(citp_lib_context_t* ctx, unsigned fd)
{
  /* Note that if we haven't yet initialised this module, then
  ** [inited_count] will be zero, and the following test will fail.  So the
  ** test for initialisation is done further down...
  **
  ** This is highly performance critial.  DO NOT add any code between here
  ** and the first [return] statement.
  */
  citp_fdinfo* fdi;

  /* Try to avoid entering lib. */
  ctx->thread = NULL;

  if( fd < citp_fdtable.inited_count ) {
    volatile citp_fdinfo_p* p_fdip = &citp_fdtable.table[fd].fdip;
    citp_fdinfo_p fdip;

  again:
    fdip = *p_fdip;
    if( fdip_is_normal(fdip) ) {

      citp_enter_lib_if(ctx);
      if( citp_fdtable_is_mt_safe() ) {
	/* No need to use atomic ops or add a ref to the fdi when MT-safe.
         * The definition of "fds_mt_safe" is that the app does not change
         * the meaning of a file descriptor in one thread when it is being
         * used in another thread.
         */
        fdi = fdip_to_fdi(fdip);
        if( ! citp_fdinfo_is_consistent(fdi) )
          fdi = citp_reprobe_moved(fdi, CI_TRUE, CI_FALSE);

	return fdi;
      }
      else {
        /* Swap in the busy marker. */
	if( fdip_cas_succeed(p_fdip, fdip, fdip_busy) ) {
	  fdi = fdip_to_fdi(fdip);

	  ci_assert(fdi);
	  ci_assert_gt(oo_atomic_read(&fdi->ref_count), 0);
	  ci_assert(fdip_is_closing(fdip) || fdip_is_reserved(fdip) ||
		    fdi->fd == fd);
	  /* Bump the reference count. */
	  citp_fdinfo_ref(fdi);

          if( ! citp_fdinfo_is_consistent(fdi) )
            fdi = citp_reprobe_moved(fdi, CI_FALSE, CI_TRUE);
          else {
            /* Swap the busy marker out again. */
            citp_fdtable_busy_clear(fd, fdip, 0);
          }
	  return fdi;
	}
	goto again;
      }
    }

    /* Not normal! */
    if( fdip_is_passthru(fdip) )
      return NULL;

    citp_enter_lib_if(ctx);
    if( fdip_is_busy(fdip) ) {
      citp_fdtable_busy_wait(fd, 0);
      goto again;
    }

    ci_assert(fdip_is_unknown(fdip));
    goto probe;
  }

  if( citp.init_level < CITP_INIT_FDTABLE ) {
    if( _citp_do_init_inprogress == 0 )
      CI_TRY(citp_do_init(CITP_INIT_ALL));
    else
      CI_TRY(citp_do_init(CITP_INIT_FDTABLE)); /* get what we need */
  }

  if( fd >= citp_fdtable.size )
    return NULL;

 probe:
  citp_enter_lib_if(ctx);
  fdi = citp_fdtable_probe(fd);
  if( fdi && citp_fdtable_is_mt_safe() )
    citp_fdinfo_release_ref(fdi, 0);
  return fdi;
}
Example #10
0
citp_fdinfo *
citp_fdtable_lookup(unsigned fd)
{
  /* Note that if we haven't yet initialised this module, then
  ** [inited_count] will be zero, and the following test will fail.  So the
  ** test for initialisation is done further down...
  **
  ** This is highly performance critial.  DO NOT add any code between here
  ** and the first [return] statement.
  */
  citp_fdinfo* fdi;

  /* In some cases, we'll lock fdtable.  Assert that it is possible: */
  ci_assert(oo_per_thread_get()->sig.inside_lib);

  if( fd < citp_fdtable.inited_count ) {

    volatile citp_fdinfo_p* p_fdip = &citp_fdtable.table[fd].fdip;
    citp_fdinfo_p fdip;

  again:
    /* Swap in the busy marker. */
    fdip = *p_fdip;

    if( fdip_is_normal(fdip) ) {
      if( citp_fdtable_not_mt_safe() ) {
	if( fdip_cas_succeed(p_fdip, fdip, fdip_busy) ) {
	  fdi = fdip_to_fdi(fdip);
	  ci_assert(fdi);
	  ci_assert_gt(oo_atomic_read(&fdi->ref_count), 0);
	  ci_assert(fdip_is_closing(fdip) || fdip_is_reserved(fdip) ||
		    fdi->fd == fd);
	  /* Bump the reference count. */
	  citp_fdinfo_ref(fdi);

          if( ! citp_fdinfo_is_consistent(fdi) ) {
            /* Something is wrong.  Re-probe. */
            fdi = citp_reprobe_moved(fdi, CI_FALSE, CI_TRUE);
          }
          else {
            /* Swap the busy marker out again. */
            citp_fdtable_busy_clear(fd, fdip, 0);
          }
	  return fdi;
	}
	goto again;
      }
      else {
	/* No need to use atomic ops when single-threaded.  The definition
         * of "fds_mt_safe" is that the app does not change the meaning of
         * a file descriptor in one thread when it is being used in another
         * thread.  In that case I'm hoping this should be safe, but at
         * time of writing I'm really not confident.  (FIXME).
         */
	fdi = fdip_to_fdi(fdip);
        if( ci_is_multithreaded() )
	  citp_fdinfo_ref(fdi);
        else
          ++fdi->ref_count.n;

        if( ! citp_fdinfo_is_consistent(fdi) )
          fdi = citp_reprobe_moved(fdi, CI_FALSE, CI_FALSE);

	return fdi;
      }
    }

    /* Not normal! */
    if( fdip_is_passthru(fdip) )  return NULL;

    if( fdip_is_busy(fdip) ) {
      citp_fdtable_busy_wait(fd, 0);
      goto again;
    }

    ci_assert(fdip_is_unknown(fdip));
    goto probe;
  }

  if (citp.init_level < CITP_INIT_FDTABLE) {
    if (_citp_do_init_inprogress == 0)
      CI_TRY(citp_do_init(CITP_INIT_ALL));
    else
      CI_TRY(citp_do_init(CITP_INIT_FDTABLE)); /* get what we need */
  }

  if( fd >= citp_fdtable.size )  return NULL;

 probe:
  fdi = citp_fdtable_probe(fd);

  return fdi;
}
Example #11
0
static int ci_tcp_connect_ul_syn_sent(ci_netif *ni, ci_tcp_state *ts)
{
  int rc = 0;

  if( ts->s.b.state == CI_TCP_SYN_SENT ) {
    ci_netif_poll(ni);
    if( OO_SP_NOT_NULL(ts->local_peer) ) {
      /* No reason to sleep.  Obviously, listener have dropped our syn
       * because of some reason.  Go away! */
      ci_tcp_drop(ni, ts, EBUSY);
      RET_WITH_ERRNO(EBUSY);
    }
    CI_TCP_SLEEP_WHILE(ni, ts, CI_SB_FLAG_WAKE_RX,
                       ts->s.so.sndtimeo_msec,
                       ts->s.b.state == CI_TCP_SYN_SENT, &rc); 
  }

  if( rc == -EAGAIN ) {
    LOG_TC(log( LNT_FMT "timeout on sleep: %d",
		LNT_PRI_ARGS(ni, ts), -rc));
    if( ! (ts->tcpflags & CI_TCPT_FLAG_NONBLOCK_CONNECT) ) {
      ts->tcpflags |= CI_TCPT_FLAG_NONBLOCK_CONNECT;
      CI_SET_ERROR(rc, EINPROGRESS);
    }
    else
      CI_SET_ERROR(rc, EALREADY);
    return rc;
  }
  else if( rc == -EINTR ) {
    LOG_TC(log(LNT_FMT "connect() was interrupted by a signal", 
               LNT_PRI_ARGS(ni, ts)));
    ts->tcpflags |= CI_TCPT_FLAG_NONBLOCK_CONNECT;
    CI_SET_ERROR(rc, EINTR);
    return rc;
  }

  /*! \TODO propagate the correct error code: CONNREFUSED, NOROUTE, etc. */

  if( ts->s.b.state == CI_TCP_CLOSED ) {
    /* Bug 3558: 
     * Set OS socket state to allow/disallow next bind().
     * It is Linux hack. */
#ifdef __ci_driver__
    CI_TRY(efab_tcp_helper_set_tcp_close_os_sock(netif2tcp_helper_resource(ni),
                                                 S_SP(ts)));
#else
    CI_TRY(ci_tcp_helper_set_tcp_close_os_sock(ni, S_SP(ts)));
#endif

    /* We should re-bind socket on the next use if the port was determined by
     * OS.
     */
    if( ! (ts->s.s_flags & CI_SOCK_FLAG_PORT_BOUND) )
      ts->s.s_flags |= CI_SOCK_FLAG_CONNECT_MUST_BIND;

    /* - if SO_ERROR is set, handle it and return this value;
     * - else if rx_errno is set, return it;
     * - else (TCP_RX_ERRNO==0, socket is CI_SHUT_RD) return ECONNABORTED */
    if( (rc = ci_tcp_connect_handle_so_error(&ts->s)) == 0)
        rc = TCP_RX_ERRNO(ts) ? TCP_RX_ERRNO(ts) : ECONNABORTED;
    CI_SET_ERROR(rc, rc);

    if( ! (ts->s.s_flags & CI_SOCK_FLAG_ADDR_BOUND) ) {
      ts->s.pkt.ip.ip_saddr_be32 = 0;
      ts->s.cp.ip_laddr_be32 = 0;
    }
    return rc;
  }

  return 0;
}