void __citp_fdtable_reserve(int fd, int protect) { /* Must be holding the lock. */ CITP_FDTABLE_ASSERT_LOCKED(1); ci_assert_lt((unsigned) fd, citp_fdtable.size); if( protect ) citp_fdtable_new_fd_set(fd, fdip_reserved, 1); else fdtable_swap(fd, fdip_reserved, fdip_unknown, 1); }
/* ** Why do these live here? Because they need to hack into the low-level ** dirty nastiness of the fdtable. */ int citp_ep_dup(unsigned oldfd, int (*syscall)(int oldfd, long arg), long arg) { /* This implements dup(oldfd) and fcntl(oldfd, F_DUPFD, arg). */ volatile citp_fdinfo_p* p_oldfdip; citp_fdinfo_p oldfdip; citp_fdinfo* newfdi = 0; citp_fdinfo* oldfdi; int newfd; Log_V(log("%s(%d)", __FUNCTION__, oldfd)); if(CI_UNLIKELY( citp.init_level < CITP_INIT_FDTABLE || oo_per_thread_get()->in_vfork_child )) /* Lib not initialised, so no U/L state, and therefore system dup() ** will do just fine. */ return syscall(oldfd, arg); if( oldfd >= citp_fdtable.inited_count ) { /* NB. We can't just pass through in this case because we need to worry ** about other threads racing with us. So we need to be able to lock ** this fd while we do the dup. */ ci_assert(oldfd < citp_fdtable.size); CITP_FDTABLE_LOCK(); __citp_fdtable_extend(oldfd); CITP_FDTABLE_UNLOCK(); } p_oldfdip = &citp_fdtable.table[oldfd].fdip; again: oldfdip = *p_oldfdip; if( fdip_is_busy(oldfdip) ) oldfdip = citp_fdtable_busy_wait(oldfd, 0); if( fdip_is_closing(oldfdip) | fdip_is_reserved(oldfdip) ) { errno = EBADF; return -1; } #if CI_CFG_FD_CACHING /* Need to check in case this sucker's cached */ if( fdip_is_unknown(oldfdip) ) { CITP_FDTABLE_LOCK(); oldfdi = citp_fdtable_probe_locked(oldfd, CI_FALSE, CI_FALSE); CITP_FDTABLE_UNLOCK(); if( oldfdi == &citp_the_closed_fd ) { citp_fdinfo_release_ref(oldfdi, CI_TRUE); errno = EBADF; return -1; } if( oldfdi ) citp_fdinfo_release_ref(oldfdi, CI_TRUE); } #endif if( fdip_cas_fail(p_oldfdip, oldfdip, fdip_busy) ) goto again; #if CI_CFG_FD_CACHING /* May end up with multiple refs to this, don't allow it to be cached. */ if( fdip_is_normal(oldfdip) ) fdip_to_fdi(oldfdip)->can_cache = 0; #endif if( fdip_is_normal(oldfdip) && (((oldfdi = fdip_to_fdi(oldfdip))->protocol->type) == CITP_EPOLL_FD) ) { newfdi = citp_fdinfo_get_ops(oldfdi)->dup(oldfdi); if( ! newfdi ) { citp_fdtable_busy_clear(oldfd, oldfdip, 0); errno = ENOMEM; return -1; } if( fdtable_strict() ) CITP_FDTABLE_LOCK(); newfd = syscall(oldfd, arg); if( newfd >= 0 ) citp_fdtable_new_fd_set(newfd, fdip_busy, fdtable_strict()); if( fdtable_strict() ) CITP_FDTABLE_UNLOCK(); if( newfd >= 0 ) { citp_fdtable_insert(newfdi, newfd, 0); newfdi = 0; } } else { if( fdtable_strict() ) CITP_FDTABLE_LOCK(); newfd = syscall(oldfd, arg); if( newfd >= 0 && newfd < citp_fdtable.inited_count ) { /* Mark newfd as unknown. When used, it'll get probed. * * We are not just being lazy here: Setting to unknown rather than * installing a proper fdi (when oldfd is accelerated) is essential to * vfork()+dup()+exec() working properly. Reason is that child and * parent share address space, so child is modifying the parent's * fdtable. Setting an entry to unknown is safe. */ citp_fdtable_new_fd_set(newfd, fdip_unknown, fdtable_strict()); } if( fdtable_strict() ) CITP_FDTABLE_UNLOCK(); } citp_fdtable_busy_clear(oldfd, oldfdip, 0); if( newfdi ) citp_fdinfo_free(newfdi); return newfd; }
/* we don't register protocol impl */ int citp_pipe_create(int fds[2], int flags) { citp_pipe_fdi* epi_read; citp_pipe_fdi* epi_write; struct oo_pipe* p = NULL; /* make compiler happy */ ci_netif* ni; int rc = -1; ef_driver_handle fd = -1; Log_V(log(LPF "pipe()")); /* citp_netif_exists() does not need citp_ul_lock here */ if( CITP_OPTS.ul_pipe == CI_UNIX_PIPE_ACCELERATE_IF_NETIF && ! citp_netif_exists() ) { return CITP_NOT_HANDLED; } rc = citp_netif_alloc_and_init(&fd, &ni); if( rc != 0 ) { if( rc == CI_SOCKET_HANDOVER ) { /* This implies EF_DONT_ACCELERATE is set, so we handover * regardless of CITP_OPTS.no_fail */ return CITP_NOT_HANDLED; } /* may be lib mismatch - errno will be ELIBACC */ goto fail1; } rc = -1; CI_MAGIC_CHECK(ni, NETIF_MAGIC); /* add another reference as we have 2 fdis */ citp_netif_add_ref(ni); epi_read = citp_pipe_epi_alloc(ni, O_RDONLY); if( epi_read == NULL ) goto fail2; epi_write = citp_pipe_epi_alloc(ni, O_WRONLY); if( epi_write == NULL ) goto fail3; /* oo_pipe init code */ if( fdtable_strict() ) CITP_FDTABLE_LOCK(); rc = oo_pipe_ctor(ni, &p, fds, flags); if( rc < 0 ) goto fail4; citp_fdtable_new_fd_set(fds[0], fdip_busy, fdtable_strict()); citp_fdtable_new_fd_set(fds[1], fdip_busy, fdtable_strict()); if( fdtable_strict() ) CITP_FDTABLE_UNLOCK(); LOG_PIPE("%s: pipe=%p id=%d", __FUNCTION__, p, p->b.bufid); /* as pipe is created it should be attached to the end-points */ epi_read->pipe = p; epi_write->pipe = p; /* We're ready. Unleash us onto the world! */ ci_assert(epi_read->pipe->b.sb_aflags & CI_SB_AFLAG_NOT_READY); ci_assert(epi_write->pipe->b.sb_aflags & CI_SB_AFLAG_NOT_READY); ci_atomic32_and(&epi_read->pipe->b.sb_aflags, ~CI_SB_AFLAG_NOT_READY); ci_atomic32_and(&epi_read->pipe->b.sb_aflags, ~CI_SB_AFLAG_NOT_READY); citp_fdtable_insert(&epi_read->fdinfo, fds[0], 0); citp_fdtable_insert(&epi_write->fdinfo, fds[1], 0); CI_MAGIC_CHECK(ni, NETIF_MAGIC); return 0; fail4: if( fdtable_strict() ) CITP_FDTABLE_UNLOCK(); fail3: CI_FREE_OBJ(epi_write); fail2: CI_FREE_OBJ(epi_read); citp_netif_release_ref(ni, 0); citp_netif_release_ref(ni, 0); fail1: if( CITP_OPTS.no_fail && errno != ELIBACC ) { Log_U(ci_log("%s: failed (errno:%d) - PASSING TO OS", __FUNCTION__, errno)); return CITP_NOT_HANDLED; } return rc; }
int citp_epoll_create(int size, int flags) { citp_fdinfo *fdi; citp_epoll_fdi *epi; struct citp_epoll_fd* ep; int fd; if( (epi = CI_ALLOC_OBJ(citp_epoll_fdi)) == NULL ) goto fail0; if( (ep = CI_ALLOC_OBJ(struct citp_epoll_fd)) == NULL ) goto fail1; fdi = &epi->fdinfo; citp_fdinfo_init(fdi, &citp_epoll_protocol_impl); /* Create the epoll fd. */ CITP_FDTABLE_LOCK(); if( (fd = ci_sys_epoll_create_compat(size, flags, 0)) < 0 ) goto fail2; citp_fdtable_new_fd_set(fd, fdip_busy, TRUE); /* Init epfd_os */ #ifdef O_CLOEXEC ep->epfd_os = ci_sys_open(OO_EPOLL_DEV, O_RDWR | O_CLOEXEC); #else ep->epfd_os = ci_sys_open(OO_EPOLL_DEV, O_RDWR); if( ep->epfd_os >= 0 ) ci_sys_fcntl(ep->epfd_os, F_SETFD, FD_CLOEXEC); #endif if( ep->epfd_os < 0 ) { Log_E(ci_log("%s: ERROR: failed to open(%s) errno=%d", __FUNCTION__, OO_EPOLL_DEV, errno)); goto fail3; } __citp_fdtable_reserve(ep->epfd_os, 1); ep->shared = mmap(NULL, sizeof(*ep->shared), PROT_READ, MAP_SHARED, ep->epfd_os, 0); if( ep->shared == MAP_FAILED ) { Log_E(ci_log("%s: ERROR: failed to mmap shared segment errno=%d", __FUNCTION__, errno)); goto fail4; } __citp_fdtable_reserve(ep->shared->epfd, 1); CITP_FDTABLE_UNLOCK(); epi->epoll = ep; ep->size = size; oo_wqlock_init(&ep->lock); ep->not_mt_safe = ! CITP_OPTS.ul_epoll_mt_safe; ci_dllist_init(&ep->oo_sockets); ep->oo_sockets_n = 0; ci_dllist_init(&ep->dead_sockets); oo_atomic_set(&ep->refcount, 1); ep->epfd_syncs_needed = 0; ep->blocking = 0; citp_fdtable_insert(fdi, fd, 0); Log_POLL(ci_log("%s: fd=%d driver_fd=%d epfd=%d", __FUNCTION__, fd, ep->epfd_os, (int) ep->shared->epfd)); return fd; fail4: __citp_fdtable_reserve(ep->epfd_os, 0); ci_sys_close(ep->epfd_os); fail3: ci_sys_close(fd); citp_fdtable_busy_clear(fd, fdip_unknown, 1); fail2: CITP_FDTABLE_UNLOCK(); CI_FREE_OBJ(ep); fail1: CI_FREE_OBJ(epi); fail0: return -1; }
static int citp_udp_socket(int domain, int type, int protocol) { citp_fdinfo* fdi; citp_sock_fdi* epi; ef_driver_handle fd; int rc; ci_netif* ni; Log_V(log(LPF "socket(%d, %d, %d)", domain, type, protocol)); epi = CI_ALLOC_OBJ(citp_sock_fdi); if( ! epi ) { Log_U(ci_log(LPF "socket: failed to allocate epi")); errno = ENOMEM; goto fail1; } fdi = &epi->fdinfo; citp_fdinfo_init(fdi, &citp_udp_protocol_impl); rc = citp_netif_alloc_and_init(&fd, &ni); if( rc != 0 ) { if( rc == CI_SOCKET_HANDOVER ) { /* This implies EF_DONT_ACCELERATE is set, so we handover * regardless of CITP_OPTS.no_fail */ CI_FREE_OBJ(epi); return rc; } goto fail2; } /* Protect the fdtable entry until we're done initialising. */ if( fdtable_strict() ) CITP_FDTABLE_LOCK(); if((fd = ci_udp_ep_ctor(&epi->sock, ni, domain, type)) < 0) { /*! ?? \TODO unpick the ci_udp_ep_ctor according to how failed */ Log_U(ci_log(LPF "socket: udp_ep_ctor failed")); errno = -fd; goto fail3; } citp_fdtable_new_fd_set(fd, fdip_busy, fdtable_strict()); if( fdtable_strict() ) CITP_FDTABLE_UNLOCK(); CI_DEBUG(epi->sock.s->pid = getpid()); /* We're ready. Unleash us onto the world! */ ci_assert(epi->sock.s->b.sb_aflags & CI_SB_AFLAG_NOT_READY); ci_atomic32_and(&epi->sock.s->b.sb_aflags, ~CI_SB_AFLAG_NOT_READY); citp_fdtable_insert(fdi, fd, 0); Log_VSS(log(LPF "socket(%d, %d, %d) = "EF_FMT, domain, type, protocol, EF_PRI_ARGS(epi,fd))); return fd; fail3: if( CITP_OPTS.no_fail && errno != ELIBACC ) CITP_STATS_NETIF(++ni->state->stats.udp_handover_socket); citp_netif_release_ref(ni, 0); fail2: CI_FREE_OBJ(epi); fail1: /* BUG1408: Graceful failure. We'll only fail outright if there's a * driver/library mismatch */ if( CITP_OPTS.no_fail && errno != ELIBACC ) { Log_U(ci_log("%s: failed (errno:%d) - PASSING TO OS", __FUNCTION__, errno)); return CI_SOCKET_HANDOVER; } return -1; }