void __citp_fdinfo_ref_count_zero(citp_fdinfo* fdi, int fdt_locked) { Log_V(log("%s: fd=%d on_rcz=%d", __FUNCTION__, fdi->fd, fdi->on_ref_count_zero)); citp_fdinfo_assert_valid(fdi); ci_assert(oo_atomic_read(&fdi->ref_count) == 0); ci_assert_ge(fdi->fd, 0); ci_assert_lt(fdi->fd, citp_fdtable.inited_count); ci_assert_nequal(fdi_to_fdip(fdi), citp_fdtable.table[fdi->fd].fdip); switch( fdi->on_ref_count_zero ) { case FDI_ON_RCZ_CLOSE: #if CI_CFG_FD_CACHING if( citp_fdinfo_get_ops(fdi)->cache(fdi) == 1 ) { if( ! fdt_locked && fdtable_strict() ) CITP_FDTABLE_LOCK(); fdtable_swap(fdi->fd, fdip_closing, fdip_unknown, fdt_locked | fdtable_strict()); citp_fdinfo_get_ops(fdi)->dtor(fdi, fdt_locked | fdtable_strict()); if( ! fdt_locked && fdtable_strict() ) CITP_FDTABLE_UNLOCK(); citp_fdinfo_free(fdi); break; } else #endif { if( ! fdt_locked && fdtable_strict() ) CITP_FDTABLE_LOCK(); ci_tcp_helper_close_no_trampoline(fdi->fd); /* The swap must occur after the close, otherwise another thread could * cause a probe of the old endpoint info, which is about be freed. */ fdtable_swap(fdi->fd, fdip_closing, fdip_unknown, fdt_locked | fdtable_strict()); citp_fdinfo_get_ops(fdi)->dtor(fdi, fdt_locked | fdtable_strict()); if( ! fdt_locked && fdtable_strict() ) CITP_FDTABLE_UNLOCK(); citp_fdinfo_free(fdi); break; } case FDI_ON_RCZ_DUP2: dup2_complete(fdi, fdi_to_fdip(fdi), fdt_locked); break; case FDI_ON_RCZ_HANDOVER: citp_fdinfo_do_handover(fdi, fdt_locked); break; case FDI_ON_RCZ_MOVED: citp_fdinfo_get_ops(fdi)->dtor(fdi, fdt_locked); citp_fdinfo_free(fdi); break; default: CI_DEBUG(ci_log("%s: fd=%d on_ref_count_zero=%d", __FUNCTION__, fdi->fd, fdi->on_ref_count_zero)); ci_assert(0); } }
/* Find out what sort of thing [fd] is, and if it is a user-level socket * then map in the user-level state. */ static citp_fdinfo * citp_fdtable_probe_locked(unsigned fd, int print_banner, int fdip_is_already_busy) { citp_fdinfo* fdi = NULL; struct stat64 st; ci_ep_info_t info; if( ! fdip_is_already_busy ) { volatile citp_fdinfo_p* p_fdip; citp_fdinfo_p fdip; /* ?? We're repeating some effort already expended in lookup() here, but ** this keeps it cleaner. May optimise down the line when I understand ** what other code needs to call this. */ p_fdip = &citp_fdtable.table[fd].fdip; again: fdip = *p_fdip; if( fdip_is_busy(fdip) ) fdip = citp_fdtable_busy_wait(fd, 1); if( ! fdip_is_unknown(fdip) && ! fdip_is_normal(fdip) ) goto exit; if( fdip_cas_fail(p_fdip, fdip, fdip_busy) ) goto again; if( fdip_is_normal(fdip) ) { fdi = fdip_to_fdi(fdip); citp_fdinfo_ref(fdi); citp_fdtable_busy_clear(fd, fdip, 1); goto exit; } } if( ci_sys_fstat64(fd, &st) != 0 ) { /* fstat() failed. Must be a bad (closed) file descriptor, so ** leave this entry as unknown. Return citp_the_closed_fd to avoid the ** caller passing through to an fd that is created asynchronously. */ citp_fdtable_busy_clear(fd, fdip_unknown, 1); fdi = &citp_the_closed_fd; citp_fdinfo_ref(fdi); goto exit; } /* oo_get_st_rdev() and oo_onloadfs_dev_t() open-and-close fd, so * fdtable should be locked if strict mode requested. */ if( fdtable_strict() ) { CITP_FDTABLE_ASSERT_LOCKED(1); } if( st.st_dev == oo_onloadfs_dev_t() ) { /* Retrieve user-level endpoint info */ if( oo_ep_info(fd, &info) < 0 ) { Log_V(log("%s: fd=%d type=%d unknown", __FUNCTION__,fd,info.fd_type)); citp_fdtable_busy_clear(fd, fdip_passthru, 1); goto exit; } switch( info.fd_type ) { case CI_PRIV_TYPE_TCP_EP: case CI_PRIV_TYPE_UDP_EP: case CI_PRIV_TYPE_PASSTHROUGH_EP: case CI_PRIV_TYPE_ALIEN_EP: #if CI_CFG_USERSPACE_PIPE case CI_PRIV_TYPE_PIPE_READER: case CI_PRIV_TYPE_PIPE_WRITER: #endif { citp_fdinfo_p fdip; Log_V(log("%s: fd=%d %s restore", __FUNCTION__, fd, info.fd_type == CI_PRIV_TYPE_TCP_EP ? "TCP": #if CI_CFG_USERSPACE_PIPE info.fd_type != CI_PRIV_TYPE_UDP_EP ? "PIPE" : #endif "UDP")); fdip = citp_fdtable_probe_restore(fd, &info, print_banner); if( fdip_is_normal(fdip) ) fdi = fdip_to_fdi(fdip); else citp_fdtable_busy_clear(fd, fdip, 1); goto exit; } case CI_PRIV_TYPE_NETIF: /* This should never happen, because netif fds are close-on-exec. ** But let's leave this code here just in case my reasoning is bad. */ Log_U(log("%s: fd=%d NETIF reserved", __FUNCTION__, fd)); citp_fdtable_busy_clear(fd, fdip_reserved, 1); fdi = &citp_the_reserved_fd; citp_fdinfo_ref(fdi); goto exit; case CI_PRIV_TYPE_NONE: /* This happens if a thread gets at an onload driver fd that has just * been created, but not yet specialised. On Linux I think this * means it will shortly be a new netif internal fd. (fds associated * with sockets and pipes are never unspecialised). */ Log_V(log("%s: fd=%d TYPE_NONE", __FUNCTION__, fd)); citp_fdtable_busy_clear(fd, fdip_passthru, 1); goto exit; default: CI_TEST(0); break; } } else if( ci_major(st.st_rdev) == ci_major(oo_get_st_rdev(OO_EPOLL_DEV)) ) { citp_epollb_fdi *epi = CI_ALLOC_OBJ(citp_epollb_fdi); if( ! epi ) { Log_E(log("%s: out of memory (epoll_fdi)", __FUNCTION__)); citp_fdtable_busy_clear(fd, fdip_passthru, 1); goto exit; } oo_epollb_ctor(epi); fdi = &epi->fdinfo; citp_fdinfo_init(fdi, &citp_epollb_protocol_impl); citp_fdinfo_ref(fdi); citp_fdtable_insert(fdi, fd, 1); goto exit; } #ifndef NDEBUG /* /dev/onload may be netif only; they are closed on fork or exec */ if( ci_major(st.st_rdev) == ci_major(oo_get_st_rdev(OO_STACK_DEV)) ) Log_U(log("%s: %d is /dev/onload", __FUNCTION__, fd)); #endif /* Not one of ours, so pass-through. */ Log_V(log("%s: fd=%u non-efab", __FUNCTION__, fd)); citp_fdtable_busy_clear(fd, fdip_passthru, 1); exit: return fdi; }
int citp_ep_close(unsigned fd) { volatile citp_fdinfo_p* p_fdip; citp_fdinfo_p fdip; int rc, got_lock; citp_fdinfo* fdi; /* Do not touch shared fdtable when in vfork child. */ if( oo_per_thread_get()->in_vfork_child ) return ci_tcp_helper_close_no_trampoline(fd); /* Interlock against other closes, against the fdtable being extended, ** and against select and poll. */ CITP_FDTABLE_LOCK(); got_lock = 1; __citp_fdtable_extend(fd); if( fd >= citp_fdtable.inited_count ) { rc = ci_sys_close(fd); goto done; } p_fdip = &citp_fdtable.table[fd].fdip; again: fdip = *p_fdip; if( fdip_is_busy(fdip) ) fdip = citp_fdtable_busy_wait(fd, 1); if( fdip_is_closing(fdip) | fdip_is_reserved(fdip) ) { /* Concurrent close or attempt to close reserved. */ Log_V(ci_log("%s: fd=%d closing=%d reserved=%d", __FUNCTION__, fd, fdip_is_closing(fdip), fdip_is_reserved(fdip))); errno = EBADF; rc = -1; goto done; } #if CI_CFG_FD_CACHING /* Need to check in case this sucker's cached */ if( fdip_is_unknown(fdip) ) { fdi = citp_fdtable_probe_locked(fd, CI_FALSE, CI_FALSE); if( fdi == &citp_the_closed_fd ) { citp_fdinfo_release_ref(fdi, CI_TRUE); errno = EBADF; rc = -1; goto done; } if( fdi ) citp_fdinfo_release_ref(fdi, CI_TRUE); } #endif ci_assert(fdip_is_normal(fdip) | fdip_is_passthru(fdip) | fdip_is_unknown(fdip)); /* Swap in the "closed" pseudo-fdinfo. This lets any other thread know ** that we're in the middle of closing this fd. */ if( fdip_cas_fail(p_fdip, fdip, fdip_closing) ) goto again; if( fdip_is_normal(fdip) ) { fdi = fdip_to_fdi(fdip); CITP_FDTABLE_UNLOCK(); got_lock = 0; if( fdi->is_special ) { Log_V(ci_log("%s: fd=%d is_special, returning EBADF", __FUNCTION__, fd)); errno = EBADF; rc = -1; fdtable_swap(fd, fdip_closing, fdip, 0); goto done; } Log_V(ci_log("%s: fd=%d u/l socket", __FUNCTION__, fd)); ci_assert_equal(fdi->fd, fd); ci_assert_equal(fdi->on_ref_count_zero, FDI_ON_RCZ_NONE); fdi->on_ref_count_zero = FDI_ON_RCZ_CLOSE; if( fdi->epoll_fd >= 0 ) { citp_fdinfo* epoll_fdi = citp_epoll_fdi_from_member(fdi, 0); if( epoll_fdi ) { if( epoll_fdi->protocol->type == CITP_EPOLL_FD ) citp_epoll_on_close(epoll_fdi, fdi, 0); citp_fdinfo_release_ref(epoll_fdi, 0); } } citp_fdinfo_release_ref(fdi, 0); rc = 0; } else { ci_assert(fdip_is_passthru(fdip) || fdip_is_unknown(fdip)); if( ! fdtable_strict() ) { CITP_FDTABLE_UNLOCK(); got_lock = 0; } Log_V(ci_log("%s: fd=%d passthru=%d unknown=%d", __FUNCTION__, fd, fdip_is_passthru(fdip), fdip_is_unknown(fdip))); fdtable_swap(fd, fdip_closing, fdip_unknown, fdtable_strict()); rc = ci_tcp_helper_close_no_trampoline(fd); } done: if( got_lock ) CITP_FDTABLE_UNLOCK(); FDTABLE_ASSERT_VALID(); return rc; }
/* ** Why do these live here? Because they need to hack into the low-level ** dirty nastiness of the fdtable. */ int citp_ep_dup(unsigned oldfd, int (*syscall)(int oldfd, long arg), long arg) { /* This implements dup(oldfd) and fcntl(oldfd, F_DUPFD, arg). */ volatile citp_fdinfo_p* p_oldfdip; citp_fdinfo_p oldfdip; citp_fdinfo* newfdi = 0; citp_fdinfo* oldfdi; int newfd; Log_V(log("%s(%d)", __FUNCTION__, oldfd)); if(CI_UNLIKELY( citp.init_level < CITP_INIT_FDTABLE || oo_per_thread_get()->in_vfork_child )) /* Lib not initialised, so no U/L state, and therefore system dup() ** will do just fine. */ return syscall(oldfd, arg); if( oldfd >= citp_fdtable.inited_count ) { /* NB. We can't just pass through in this case because we need to worry ** about other threads racing with us. So we need to be able to lock ** this fd while we do the dup. */ ci_assert(oldfd < citp_fdtable.size); CITP_FDTABLE_LOCK(); __citp_fdtable_extend(oldfd); CITP_FDTABLE_UNLOCK(); } p_oldfdip = &citp_fdtable.table[oldfd].fdip; again: oldfdip = *p_oldfdip; if( fdip_is_busy(oldfdip) ) oldfdip = citp_fdtable_busy_wait(oldfd, 0); if( fdip_is_closing(oldfdip) | fdip_is_reserved(oldfdip) ) { errno = EBADF; return -1; } #if CI_CFG_FD_CACHING /* Need to check in case this sucker's cached */ if( fdip_is_unknown(oldfdip) ) { CITP_FDTABLE_LOCK(); oldfdi = citp_fdtable_probe_locked(oldfd, CI_FALSE, CI_FALSE); CITP_FDTABLE_UNLOCK(); if( oldfdi == &citp_the_closed_fd ) { citp_fdinfo_release_ref(oldfdi, CI_TRUE); errno = EBADF; return -1; } if( oldfdi ) citp_fdinfo_release_ref(oldfdi, CI_TRUE); } #endif if( fdip_cas_fail(p_oldfdip, oldfdip, fdip_busy) ) goto again; #if CI_CFG_FD_CACHING /* May end up with multiple refs to this, don't allow it to be cached. */ if( fdip_is_normal(oldfdip) ) fdip_to_fdi(oldfdip)->can_cache = 0; #endif if( fdip_is_normal(oldfdip) && (((oldfdi = fdip_to_fdi(oldfdip))->protocol->type) == CITP_EPOLL_FD) ) { newfdi = citp_fdinfo_get_ops(oldfdi)->dup(oldfdi); if( ! newfdi ) { citp_fdtable_busy_clear(oldfd, oldfdip, 0); errno = ENOMEM; return -1; } if( fdtable_strict() ) CITP_FDTABLE_LOCK(); newfd = syscall(oldfd, arg); if( newfd >= 0 ) citp_fdtable_new_fd_set(newfd, fdip_busy, fdtable_strict()); if( fdtable_strict() ) CITP_FDTABLE_UNLOCK(); if( newfd >= 0 ) { citp_fdtable_insert(newfdi, newfd, 0); newfdi = 0; } } else { if( fdtable_strict() ) CITP_FDTABLE_LOCK(); newfd = syscall(oldfd, arg); if( newfd >= 0 && newfd < citp_fdtable.inited_count ) { /* Mark newfd as unknown. When used, it'll get probed. * * We are not just being lazy here: Setting to unknown rather than * installing a proper fdi (when oldfd is accelerated) is essential to * vfork()+dup()+exec() working properly. Reason is that child and * parent share address space, so child is modifying the parent's * fdtable. Setting an entry to unknown is safe. */ citp_fdtable_new_fd_set(newfd, fdip_unknown, fdtable_strict()); } if( fdtable_strict() ) CITP_FDTABLE_UNLOCK(); } citp_fdtable_busy_clear(oldfd, oldfdip, 0); if( newfdi ) citp_fdinfo_free(newfdi); return newfd; }
/* we don't register protocol impl */ int citp_pipe_create(int fds[2], int flags) { citp_pipe_fdi* epi_read; citp_pipe_fdi* epi_write; struct oo_pipe* p = NULL; /* make compiler happy */ ci_netif* ni; int rc = -1; ef_driver_handle fd = -1; Log_V(log(LPF "pipe()")); /* citp_netif_exists() does not need citp_ul_lock here */ if( CITP_OPTS.ul_pipe == CI_UNIX_PIPE_ACCELERATE_IF_NETIF && ! citp_netif_exists() ) { return CITP_NOT_HANDLED; } rc = citp_netif_alloc_and_init(&fd, &ni); if( rc != 0 ) { if( rc == CI_SOCKET_HANDOVER ) { /* This implies EF_DONT_ACCELERATE is set, so we handover * regardless of CITP_OPTS.no_fail */ return CITP_NOT_HANDLED; } /* may be lib mismatch - errno will be ELIBACC */ goto fail1; } rc = -1; CI_MAGIC_CHECK(ni, NETIF_MAGIC); /* add another reference as we have 2 fdis */ citp_netif_add_ref(ni); epi_read = citp_pipe_epi_alloc(ni, O_RDONLY); if( epi_read == NULL ) goto fail2; epi_write = citp_pipe_epi_alloc(ni, O_WRONLY); if( epi_write == NULL ) goto fail3; /* oo_pipe init code */ if( fdtable_strict() ) CITP_FDTABLE_LOCK(); rc = oo_pipe_ctor(ni, &p, fds, flags); if( rc < 0 ) goto fail4; citp_fdtable_new_fd_set(fds[0], fdip_busy, fdtable_strict()); citp_fdtable_new_fd_set(fds[1], fdip_busy, fdtable_strict()); if( fdtable_strict() ) CITP_FDTABLE_UNLOCK(); LOG_PIPE("%s: pipe=%p id=%d", __FUNCTION__, p, p->b.bufid); /* as pipe is created it should be attached to the end-points */ epi_read->pipe = p; epi_write->pipe = p; /* We're ready. Unleash us onto the world! */ ci_assert(epi_read->pipe->b.sb_aflags & CI_SB_AFLAG_NOT_READY); ci_assert(epi_write->pipe->b.sb_aflags & CI_SB_AFLAG_NOT_READY); ci_atomic32_and(&epi_read->pipe->b.sb_aflags, ~CI_SB_AFLAG_NOT_READY); ci_atomic32_and(&epi_read->pipe->b.sb_aflags, ~CI_SB_AFLAG_NOT_READY); citp_fdtable_insert(&epi_read->fdinfo, fds[0], 0); citp_fdtable_insert(&epi_write->fdinfo, fds[1], 0); CI_MAGIC_CHECK(ni, NETIF_MAGIC); return 0; fail4: if( fdtable_strict() ) CITP_FDTABLE_UNLOCK(); fail3: CI_FREE_OBJ(epi_write); fail2: CI_FREE_OBJ(epi_read); citp_netif_release_ref(ni, 0); citp_netif_release_ref(ni, 0); fail1: if( CITP_OPTS.no_fail && errno != ELIBACC ) { Log_U(ci_log("%s: failed (errno:%d) - PASSING TO OS", __FUNCTION__, errno)); return CITP_NOT_HANDLED; } return rc; }
static int citp_udp_socket(int domain, int type, int protocol) { citp_fdinfo* fdi; citp_sock_fdi* epi; ef_driver_handle fd; int rc; ci_netif* ni; Log_V(log(LPF "socket(%d, %d, %d)", domain, type, protocol)); epi = CI_ALLOC_OBJ(citp_sock_fdi); if( ! epi ) { Log_U(ci_log(LPF "socket: failed to allocate epi")); errno = ENOMEM; goto fail1; } fdi = &epi->fdinfo; citp_fdinfo_init(fdi, &citp_udp_protocol_impl); rc = citp_netif_alloc_and_init(&fd, &ni); if( rc != 0 ) { if( rc == CI_SOCKET_HANDOVER ) { /* This implies EF_DONT_ACCELERATE is set, so we handover * regardless of CITP_OPTS.no_fail */ CI_FREE_OBJ(epi); return rc; } goto fail2; } /* Protect the fdtable entry until we're done initialising. */ if( fdtable_strict() ) CITP_FDTABLE_LOCK(); if((fd = ci_udp_ep_ctor(&epi->sock, ni, domain, type)) < 0) { /*! ?? \TODO unpick the ci_udp_ep_ctor according to how failed */ Log_U(ci_log(LPF "socket: udp_ep_ctor failed")); errno = -fd; goto fail3; } citp_fdtable_new_fd_set(fd, fdip_busy, fdtable_strict()); if( fdtable_strict() ) CITP_FDTABLE_UNLOCK(); CI_DEBUG(epi->sock.s->pid = getpid()); /* We're ready. Unleash us onto the world! */ ci_assert(epi->sock.s->b.sb_aflags & CI_SB_AFLAG_NOT_READY); ci_atomic32_and(&epi->sock.s->b.sb_aflags, ~CI_SB_AFLAG_NOT_READY); citp_fdtable_insert(fdi, fd, 0); Log_VSS(log(LPF "socket(%d, %d, %d) = "EF_FMT, domain, type, protocol, EF_PRI_ARGS(epi,fd))); return fd; fail3: if( CITP_OPTS.no_fail && errno != ELIBACC ) CITP_STATS_NETIF(++ni->state->stats.udp_handover_socket); citp_netif_release_ref(ni, 0); fail2: CI_FREE_OBJ(epi); fail1: /* BUG1408: Graceful failure. We'll only fail outright if there's a * driver/library mismatch */ if( CITP_OPTS.no_fail && errno != ELIBACC ) { Log_U(ci_log("%s: failed (errno:%d) - PASSING TO OS", __FUNCTION__, errno)); return CI_SOCKET_HANDOVER; } return -1; }