static void citp_pipe_dtor(citp_fdinfo* fdinfo, int fdt_locked) { citp_pipe_fdi* epi = fdi_to_pipe_fdi(fdinfo); LOG_PIPE("%s: fdinfo=%p epi=%p", __FUNCTION__, fdinfo, epi); citp_netif_release_ref(epi->ni, fdt_locked); LOG_PIPE("%s: done", __FUNCTION__); }
static void citp_passthrough_dtor(citp_fdinfo* fdi, int fdt_locked) { citp_alien_fdi* epi = fdi_to_alien_fdi(fdi); CITP_FDTABLE_LOCK(); ci_tcp_helper_close_no_trampoline(epi->os_socket); __citp_fdtable_reserve(epi->os_socket, 0); CITP_FDTABLE_UNLOCK(); citp_netif_release_ref(fdi_to_alien_fdi(fdi)->netif, fdt_locked); }
static citp_fdinfo_p citp_fdtable_probe_restore(int fd, ci_ep_info_t * info, int print_banner) { citp_protocol_impl* proto = 0; citp_fdinfo* fdi = 0; ci_netif* ni; int rc; int c_sock_fdi = 1; /* Must be holding the FD table writer lock */ CITP_FDTABLE_ASSERT_LOCKED(1); ci_assert_nequal(info->resource_id, CI_ID_POOL_ID_NONE); /* Will need to review this function if the following assert fires */ switch( info->fd_type ) { case CI_PRIV_TYPE_TCP_EP: proto = &citp_tcp_protocol_impl; break; case CI_PRIV_TYPE_UDP_EP: proto = &citp_udp_protocol_impl; break; case CI_PRIV_TYPE_PASSTHROUGH_EP: proto = &citp_passthrough_protocol_impl; c_sock_fdi = 0; break; case CI_PRIV_TYPE_ALIEN_EP: proto = NULL; c_sock_fdi = 0; break; #if CI_CFG_USERSPACE_PIPE case CI_PRIV_TYPE_PIPE_READER: proto = &citp_pipe_read_protocol_impl; c_sock_fdi = 0; break; case CI_PRIV_TYPE_PIPE_WRITER: proto = &citp_pipe_write_protocol_impl; c_sock_fdi = 0; break; #endif default: ci_assert(0); } /* Attempt to find the user-level netif for this endpoint */ ni = citp_find_ul_netif(info->resource_id, 1); if( ! ni ) { ef_driver_handle netif_fd; /* Not found, rebuild/restore the netif for this endpoint */ rc = citp_netif_recreate_probed(fd, &netif_fd, &ni); if ( rc < 0 ) { Log_E(log("%s: citp_netif_recreate_probed failed! (%d)", __FUNCTION__, rc)); goto fail; } if( print_banner ) { ci_log("Importing "ONLOAD_PRODUCT" "ONLOAD_VERSION" "ONLOAD_COPYRIGHT " [%s]", ni->state->pretty_name); } } else citp_netif_add_ref(ni); /* There is a race condition where the fd can have been created, but it has * not yet been initialised, as we can't put a busy marker in the right place * in the fdtable until we know what the fd is. In this case we don't want * to probe this new info, so return the closed fd. */ if( SP_TO_WAITABLE(ni, info->sock_id)->sb_aflags & CI_SB_AFLAG_NOT_READY ) { citp_fdtable_busy_clear(fd, fdip_unknown, 1); fdi = &citp_the_closed_fd; citp_fdinfo_ref(fdi); return fdi_to_fdip(fdi); } if (c_sock_fdi) { citp_sock_fdi* sock_fdi; sock_fdi = CI_ALLOC_OBJ(citp_sock_fdi); if( ! sock_fdi ) { Log_E(log("%s: out of memory (sock_fdi)", __FUNCTION__)); goto fail; } fdi = &sock_fdi->fdinfo; sock_fdi->sock.s = SP_TO_SOCK_CMN(ni, info->sock_id); sock_fdi->sock.netif = ni; } else if( info->fd_type == CI_PRIV_TYPE_PASSTHROUGH_EP ) { citp_waitable* w = SP_TO_WAITABLE(ni, info->sock_id); citp_alien_fdi* alien_fdi; if( ~w->sb_aflags & CI_SB_AFLAG_MOVED_AWAY_IN_EPOLL && fdtable_fd_move(fd, OO_IOC_FILE_MOVED) == 0 ) { citp_netif_release_ref(ni, 1); return fdip_passthru; } alien_fdi = CI_ALLOC_OBJ(citp_alien_fdi); if( ! alien_fdi ) { Log_E(log("%s: out of memory (alien_fdi)", __FUNCTION__)); goto fail; } fdi = &alien_fdi->fdinfo; alien_fdi->netif = ni; alien_fdi->ep = SP_TO_WAITABLE(ni, info->sock_id); citp_passthrough_init(alien_fdi); } else if( info->fd_type == CI_PRIV_TYPE_ALIEN_EP ) { citp_waitable* w = SP_TO_WAITABLE(ni, info->sock_id); citp_sock_fdi* sock_fdi; ci_netif* alien_ni; sock_fdi = CI_ALLOC_OBJ(citp_sock_fdi); if( ! sock_fdi ) { Log_E(log("%s: out of memory (alien sock_fdi)", __FUNCTION__)); goto fail; } fdi = &sock_fdi->fdinfo; rc = citp_netif_by_id(w->moved_to_stack_id, &alien_ni, 1); if( rc != 0 ) { goto fail; } sock_fdi->sock.s = SP_TO_SOCK_CMN(alien_ni, w->moved_to_sock_id); sock_fdi->sock.netif = alien_ni; citp_netif_release_ref(ni, 1); /* Replace the file under this fd if possible */ if( ~w->sb_aflags & CI_SB_AFLAG_MOVED_AWAY_IN_EPOLL ) fdtable_fd_move(fd, OO_IOC_FILE_MOVED); if( sock_fdi->sock.s->b.state & CI_TCP_STATE_TCP ) proto = &citp_tcp_protocol_impl; else if( sock_fdi->sock.s->b.state == CI_TCP_STATE_UDP ) proto = &citp_udp_protocol_impl; else { CI_TEST(0); } } #if CI_CFG_USERSPACE_PIPE else { citp_pipe_fdi* pipe_fdi; pipe_fdi = CI_ALLOC_OBJ(citp_pipe_fdi); if( ! pipe_fdi ) { Log_E(log("%s: out of memory (pipe_fdi)", __FUNCTION__)); goto fail; } fdi = &pipe_fdi->fdinfo; pipe_fdi->pipe = SP_TO_PIPE(ni, info->sock_id); pipe_fdi->ni = ni; } #endif citp_fdinfo_init(fdi, proto); /* We're returning a reference to the caller. */ citp_fdinfo_ref(fdi); citp_fdtable_insert(fdi, fd, 1); return fdi_to_fdip(fdi); fail: if( ni ) citp_netif_release_ref(ni, 1); return fdip_unknown; }
/* we don't register protocol impl */ int citp_pipe_create(int fds[2], int flags) { citp_pipe_fdi* epi_read; citp_pipe_fdi* epi_write; struct oo_pipe* p = NULL; /* make compiler happy */ ci_netif* ni; int rc = -1; ef_driver_handle fd = -1; Log_V(log(LPF "pipe()")); /* citp_netif_exists() does not need citp_ul_lock here */ if( CITP_OPTS.ul_pipe == CI_UNIX_PIPE_ACCELERATE_IF_NETIF && ! citp_netif_exists() ) { return CITP_NOT_HANDLED; } rc = citp_netif_alloc_and_init(&fd, &ni); if( rc != 0 ) { if( rc == CI_SOCKET_HANDOVER ) { /* This implies EF_DONT_ACCELERATE is set, so we handover * regardless of CITP_OPTS.no_fail */ return CITP_NOT_HANDLED; } /* may be lib mismatch - errno will be ELIBACC */ goto fail1; } rc = -1; CI_MAGIC_CHECK(ni, NETIF_MAGIC); /* add another reference as we have 2 fdis */ citp_netif_add_ref(ni); epi_read = citp_pipe_epi_alloc(ni, O_RDONLY); if( epi_read == NULL ) goto fail2; epi_write = citp_pipe_epi_alloc(ni, O_WRONLY); if( epi_write == NULL ) goto fail3; /* oo_pipe init code */ if( fdtable_strict() ) CITP_FDTABLE_LOCK(); rc = oo_pipe_ctor(ni, &p, fds, flags); if( rc < 0 ) goto fail4; citp_fdtable_new_fd_set(fds[0], fdip_busy, fdtable_strict()); citp_fdtable_new_fd_set(fds[1], fdip_busy, fdtable_strict()); if( fdtable_strict() ) CITP_FDTABLE_UNLOCK(); LOG_PIPE("%s: pipe=%p id=%d", __FUNCTION__, p, p->b.bufid); /* as pipe is created it should be attached to the end-points */ epi_read->pipe = p; epi_write->pipe = p; /* We're ready. Unleash us onto the world! */ ci_assert(epi_read->pipe->b.sb_aflags & CI_SB_AFLAG_NOT_READY); ci_assert(epi_write->pipe->b.sb_aflags & CI_SB_AFLAG_NOT_READY); ci_atomic32_and(&epi_read->pipe->b.sb_aflags, ~CI_SB_AFLAG_NOT_READY); ci_atomic32_and(&epi_read->pipe->b.sb_aflags, ~CI_SB_AFLAG_NOT_READY); citp_fdtable_insert(&epi_read->fdinfo, fds[0], 0); citp_fdtable_insert(&epi_write->fdinfo, fds[1], 0); CI_MAGIC_CHECK(ni, NETIF_MAGIC); return 0; fail4: if( fdtable_strict() ) CITP_FDTABLE_UNLOCK(); fail3: CI_FREE_OBJ(epi_write); fail2: CI_FREE_OBJ(epi_read); citp_netif_release_ref(ni, 0); citp_netif_release_ref(ni, 0); fail1: if( CITP_OPTS.no_fail && errno != ELIBACC ) { Log_U(ci_log("%s: failed (errno:%d) - PASSING TO OS", __FUNCTION__, errno)); return CITP_NOT_HANDLED; } return rc; }
static int citp_udp_socket(int domain, int type, int protocol) { citp_fdinfo* fdi; citp_sock_fdi* epi; ef_driver_handle fd; int rc; ci_netif* ni; Log_V(log(LPF "socket(%d, %d, %d)", domain, type, protocol)); epi = CI_ALLOC_OBJ(citp_sock_fdi); if( ! epi ) { Log_U(ci_log(LPF "socket: failed to allocate epi")); errno = ENOMEM; goto fail1; } fdi = &epi->fdinfo; citp_fdinfo_init(fdi, &citp_udp_protocol_impl); rc = citp_netif_alloc_and_init(&fd, &ni); if( rc != 0 ) { if( rc == CI_SOCKET_HANDOVER ) { /* This implies EF_DONT_ACCELERATE is set, so we handover * regardless of CITP_OPTS.no_fail */ CI_FREE_OBJ(epi); return rc; } goto fail2; } /* Protect the fdtable entry until we're done initialising. */ if( fdtable_strict() ) CITP_FDTABLE_LOCK(); if((fd = ci_udp_ep_ctor(&epi->sock, ni, domain, type)) < 0) { /*! ?? \TODO unpick the ci_udp_ep_ctor according to how failed */ Log_U(ci_log(LPF "socket: udp_ep_ctor failed")); errno = -fd; goto fail3; } citp_fdtable_new_fd_set(fd, fdip_busy, fdtable_strict()); if( fdtable_strict() ) CITP_FDTABLE_UNLOCK(); CI_DEBUG(epi->sock.s->pid = getpid()); /* We're ready. Unleash us onto the world! */ ci_assert(epi->sock.s->b.sb_aflags & CI_SB_AFLAG_NOT_READY); ci_atomic32_and(&epi->sock.s->b.sb_aflags, ~CI_SB_AFLAG_NOT_READY); citp_fdtable_insert(fdi, fd, 0); Log_VSS(log(LPF "socket(%d, %d, %d) = "EF_FMT, domain, type, protocol, EF_PRI_ARGS(epi,fd))); return fd; fail3: if( CITP_OPTS.no_fail && errno != ELIBACC ) CITP_STATS_NETIF(++ni->state->stats.udp_handover_socket); citp_netif_release_ref(ni, 0); fail2: CI_FREE_OBJ(epi); fail1: /* BUG1408: Graceful failure. We'll only fail outright if there's a * driver/library mismatch */ if( CITP_OPTS.no_fail && errno != ELIBACC ) { Log_U(ci_log("%s: failed (errno:%d) - PASSING TO OS", __FUNCTION__, errno)); return CI_SOCKET_HANDOVER; } return -1; }
static void citp_udp_dtor(citp_fdinfo* fdinfo, int fdt_locked) { citp_netif_release_ref(fdi_to_socket(fdinfo)->netif, fdt_locked); }