Ejemplo n.º 1
0
static void onload_fmt(int fd, char* buf, int* buf_n, int buf_len)
{
  ci_ep_info_t info;
  CI_TRY(oo_ep_info(fd, &info));
  switch( info.fd_type ) {
  case CI_PRIV_TYPE_NONE:
    bprintf("onload[]");
    break;
  case CI_PRIV_TYPE_NETIF:
    bprintf("onload[stack,%u]", info.resource_id);
    break;
  case CI_PRIV_TYPE_TCP_EP:
    bprintf("onload[TCP,%u,%d]", info.resource_id, OO_SP_FMT(info.sock_id));
    break;
  case CI_PRIV_TYPE_UDP_EP:
    bprintf("onload[UDP,%u,%d]", info.resource_id, OO_SP_FMT(info.sock_id));
    break;
  default:
    bprintf("onload[type=%d,%u,%d,%lu]", info.fd_type, info.resource_id,
            OO_SP_FMT(info.sock_id), (unsigned long) info.mem_mmap_bytes);
    break;
  }
}
Ejemplo n.º 2
0
/* Find out what sort of thing [fd] is, and if it is a user-level socket
 * then map in the user-level state.
 */
static citp_fdinfo * citp_fdtable_probe_locked(unsigned fd, int print_banner,
                                               int fdip_is_already_busy)
{
  citp_fdinfo* fdi = NULL;
  struct stat64 st;
  ci_ep_info_t info;


  if( ! fdip_is_already_busy ) {
    volatile citp_fdinfo_p* p_fdip;
    citp_fdinfo_p fdip;
    /* ?? We're repeating some effort already expended in lookup() here, but
    ** this keeps it cleaner.  May optimise down the line when I understand
    ** what other code needs to call this.
    */
    
    p_fdip = &citp_fdtable.table[fd].fdip;
   again:
    fdip = *p_fdip;
    if( fdip_is_busy(fdip) )  fdip = citp_fdtable_busy_wait(fd, 1);
    if( ! fdip_is_unknown(fdip) && ! fdip_is_normal(fdip) )  goto exit;
    if( fdip_cas_fail(p_fdip, fdip, fdip_busy) )  goto again;
    
    if( fdip_is_normal(fdip) ) {
      fdi = fdip_to_fdi(fdip);
      citp_fdinfo_ref(fdi);
      citp_fdtable_busy_clear(fd, fdip, 1);
      goto exit;
    }
  }

  if( ci_sys_fstat64(fd, &st) != 0 ) {
    /* fstat() failed.  Must be a bad (closed) file descriptor, so
    ** leave this entry as unknown.  Return citp_the_closed_fd to avoid the
    ** caller passing through to an fd that is created asynchronously.
    */
    citp_fdtable_busy_clear(fd, fdip_unknown, 1);
    fdi = &citp_the_closed_fd;
    citp_fdinfo_ref(fdi);
    goto exit;
  }

  /* oo_get_st_rdev() and oo_onloadfs_dev_t() open-and-close fd, so
   * fdtable should be locked if strict mode requested. */
  if( fdtable_strict() )  { CITP_FDTABLE_ASSERT_LOCKED(1); }

  if(  st.st_dev == oo_onloadfs_dev_t() ) {
    /* Retrieve user-level endpoint info */
    if( oo_ep_info(fd, &info) < 0 ) {
      Log_V(log("%s: fd=%d type=%d unknown", __FUNCTION__,fd,info.fd_type));
      citp_fdtable_busy_clear(fd, fdip_passthru, 1);
      goto exit;
    }

    switch( info.fd_type ) {
    case CI_PRIV_TYPE_TCP_EP:
    case CI_PRIV_TYPE_UDP_EP:
    case CI_PRIV_TYPE_PASSTHROUGH_EP:
    case CI_PRIV_TYPE_ALIEN_EP:
#if CI_CFG_USERSPACE_PIPE
    case CI_PRIV_TYPE_PIPE_READER:
    case CI_PRIV_TYPE_PIPE_WRITER:
#endif
    {
      citp_fdinfo_p fdip;

      Log_V(log("%s: fd=%d %s restore", __FUNCTION__, fd,
		info.fd_type == CI_PRIV_TYPE_TCP_EP ? "TCP":
#if CI_CFG_USERSPACE_PIPE
                info.fd_type != CI_PRIV_TYPE_UDP_EP ? "PIPE" :
#endif
                "UDP"));
      fdip = citp_fdtable_probe_restore(fd, &info, print_banner);
      if( fdip_is_normal(fdip) )
        fdi = fdip_to_fdi(fdip);
      else
        citp_fdtable_busy_clear(fd, fdip, 1);
      goto exit;
    }

    case CI_PRIV_TYPE_NETIF:
      /* This should never happen, because netif fds are close-on-exec.
      ** But let's leave this code here just in case my reasoning is bad.
      */
      Log_U(log("%s: fd=%d NETIF reserved", __FUNCTION__, fd));
      citp_fdtable_busy_clear(fd, fdip_reserved, 1);
      fdi = &citp_the_reserved_fd;
      citp_fdinfo_ref(fdi);
      goto exit;

    case CI_PRIV_TYPE_NONE:
      /* This happens if a thread gets at an onload driver fd that has just
       * been created, but not yet specialised.  On Linux I think this
       * means it will shortly be a new netif internal fd.  (fds associated
       * with sockets and pipes are never unspecialised).
       */
      Log_V(log("%s: fd=%d TYPE_NONE", __FUNCTION__, fd));
      citp_fdtable_busy_clear(fd, fdip_passthru, 1);
      goto exit;

    default:
      CI_TEST(0);
      break;
    }
  }
  else if( ci_major(st.st_rdev) == ci_major(oo_get_st_rdev(OO_EPOLL_DEV)) ) {
    citp_epollb_fdi *epi = CI_ALLOC_OBJ(citp_epollb_fdi);
    if( ! epi ) {
      Log_E(log("%s: out of memory (epoll_fdi)", __FUNCTION__));
      citp_fdtable_busy_clear(fd, fdip_passthru, 1);
      goto exit;
    }
    oo_epollb_ctor(epi);
    fdi = &epi->fdinfo;
    citp_fdinfo_init(fdi, &citp_epollb_protocol_impl);
    citp_fdinfo_ref(fdi);
    citp_fdtable_insert(fdi, fd, 1);
    goto exit;
  }

#ifndef NDEBUG
  /* /dev/onload may be netif only; they are closed on fork or exec */
  if( ci_major(st.st_rdev) == ci_major(oo_get_st_rdev(OO_STACK_DEV)) )
    Log_U(log("%s: %d is /dev/onload", __FUNCTION__, fd));
#endif

  /* Not one of ours, so pass-through. */
  Log_V(log("%s: fd=%u non-efab", __FUNCTION__, fd));
  citp_fdtable_busy_clear(fd, fdip_passthru, 1);

 exit:
  return fdi;

}