void citp_waitable_init(ci_netif* ni, citp_waitable* w, int id) { /* NB. Some members initialised in citp_waitable_obj_free(). */ oo_p sp; #if CI_CFG_SOCKP_IS_PTR w->bufid = id; #else w->bufid = OO_SP_FROM_INT(ni, id); #endif w->sb_flags = 0; w->sb_aflags = CI_SB_AFLAG_ORPHAN | CI_SB_AFLAG_NOT_READY; sp = oo_sockp_to_statep(ni, W_SP(w)); OO_P_ADD(sp, CI_MEMBER_OFFSET(citp_waitable, post_poll_link)); ci_ni_dllist_link_init(ni, &w->post_poll_link, sp, "ppll"); ci_ni_dllist_self_link(ni, &w->post_poll_link); sp = oo_sockp_to_statep(ni, W_SP(w)); OO_P_ADD(sp, CI_MEMBER_OFFSET(citp_waitable, ready_link)); ci_ni_dllist_link_init(ni, &w->ready_link, sp, "rll"); ci_ni_dllist_self_link(ni, &w->ready_link); w->lock.wl_val = 0; CI_DEBUG(w->wt_next = OO_SP_NULL); CI_DEBUG(w->next_id = CI_ILL_END); citp_waitable_reinit(ni, w); }
citp_waitable_obj* citp_waitable_obj_alloc(ci_netif* netif) { citp_waitable_obj* wo; ci_assert(netif); ci_assert(ci_netif_is_locked(netif)); if( netif->state->deferred_free_eps_head != CI_ILL_END ) { ci_uint32 link; do link = netif->state->deferred_free_eps_head; while( ci_cas32_fail(&netif->state->deferred_free_eps_head, link, CI_ILL_END)); while( link != CI_ILL_END ) { citp_waitable* w = ID_TO_WAITABLE(netif, link); link = w->next_id; CI_DEBUG(w->next_id = CI_ILL_END); ci_assert_equal(w->state, CI_TCP_STATE_FREE); ci_assert(OO_SP_IS_NULL(w->wt_next)); w->wt_next = netif->state->free_eps_head; netif->state->free_eps_head = W_SP(w); } } if( OO_SP_IS_NULL(netif->state->free_eps_head) ) { ci_tcp_helper_more_socks(netif); if( OO_SP_IS_NULL(netif->state->free_eps_head) ) ci_netif_timeout_reap(netif); } if( OO_SP_IS_NULL(netif->state->free_eps_head) ) return NULL; LOG_TV(ci_log("%s: allocating %d", __FUNCTION__, OO_SP_FMT(netif->state->free_eps_head))); ci_assert(IS_VALID_SOCK_P(netif, netif->state->free_eps_head)); #if !defined(__KERNEL__) && !defined (CI_HAVE_OS_NOPAGE) ci_netif_mmap_shmbuf(netif, (netif->state->free_eps_head >> EP_BUF_BLOCKSHIFT) + 1); #endif wo = SP_TO_WAITABLE_OBJ(netif, netif->state->free_eps_head); ci_assert(OO_SP_EQ(W_SP(&wo->waitable), netif->state->free_eps_head)); ci_assert_equal(wo->waitable.state, CI_TCP_STATE_FREE); ci_assert_equal(wo->waitable.sb_aflags, (CI_SB_AFLAG_ORPHAN | CI_SB_AFLAG_NOT_READY)); ci_assert_equal(wo->waitable.lock.wl_val, 0); netif->state->free_eps_head = wo->waitable.wt_next; CI_DEBUG(wo->waitable.wt_next = OO_SP_NULL); ci_assert_equal(wo->waitable.state, CI_TCP_STATE_FREE); return wo; }
void citp_waitable_obj_free(ci_netif* ni, citp_waitable* w) { ci_assert(ci_netif_is_locked(ni)); #ifdef __KERNEL__ { /* Avoid racing with tcp_helper_do_non_atomic(). */ tcp_helper_endpoint_t* ep = ci_netif_get_valid_ep(ni, w->bufid); unsigned ep_aflags; again: if( (ep_aflags = ep->ep_aflags) & OO_THR_EP_AFLAG_NON_ATOMIC ) { ci_assert(!(ep_aflags & OO_THR_EP_AFLAG_NEED_FREE)); if( ci_cas32_fail(&ep->ep_aflags, ep_aflags, ep_aflags | OO_THR_EP_AFLAG_NEED_FREE) ) goto again; return; } ci_rmb(); } #endif __citp_waitable_obj_free(ni, w); w->wt_next = ni->state->free_eps_head; ni->state->free_eps_head = W_SP(w); /* Must be last, as may result in stack going away. */ ci_drop_orphan(ni); }
void citp_waitable_obj_free_nnl(ci_netif* ni, citp_waitable* w) { /* Stack lock is probably not held (but not guaranteed). */ __citp_waitable_obj_free(ni, w); do w->next_id = ni->state->deferred_free_eps_head; while( ci_cas32_fail(&ni->state->deferred_free_eps_head, w->next_id, OO_SP_TO_INT(W_SP(w))) ); /* Must be last, as may result in stack going away. */ ci_drop_orphan(ni); }
static int oo_pipe_ctor(ci_netif* netif, struct oo_pipe** out_pipe, int fds[2], int flags) { struct oo_pipe* p; int rc; ci_assert(netif); ci_netif_lock(netif); p = oo_pipe_buf_get(netif); if( !p ) { rc = -1; errno = ENOMEM; goto out; } if( flags & O_NONBLOCK ) { p->aflags = (CI_PFD_AFLAG_NONBLOCK << CI_PFD_AFLAG_READER_SHIFT) | (CI_PFD_AFLAG_NONBLOCK << CI_PFD_AFLAG_WRITER_SHIFT); } /* attach */ rc = ci_tcp_helper_pipe_attach(ci_netif_get_driver_handle(netif), W_SP(&p->b), flags, fds); if( rc < 0 ) { LOG_E(ci_log("%s: ci_tcp_helper_pipe_attach %d", __FUNCTION__, rc)); errno = -rc; rc = -1; goto out; } *out_pipe = p; out: ci_netif_unlock(netif); return rc; }
/* c_ni is assumed to be locked on enterance and is always unlocked on * exit. */ int ci_tcp_connect_lo_toconn(ci_netif *c_ni, oo_sp c_id, ci_uint32 dst, ci_netif *l_ni, oo_sp l_id) { ci_tcp_state *ts; ci_tcp_socket_listen *tls, *alien_tls; citp_waitable_obj *wo; citp_waitable *w; int rc; ci_assert(ci_netif_is_locked(c_ni)); ci_assert(OO_SP_NOT_NULL(c_id)); ci_assert(OO_SP_NOT_NULL(l_id)); LOG_TC(log("%s: connect %d:%d to %d:%d", __FUNCTION__, c_ni->state->stack_id, OO_SP_TO_INT(c_id), l_ni->state->stack_id, OO_SP_TO_INT(l_id))); alien_tls = SP_TO_TCP_LISTEN(l_ni, l_id); if( (int)ci_tcp_acceptq_n(alien_tls) >= alien_tls->acceptq_max ) { ci_netif_unlock(c_ni); return -EBUSY; } /* In c_ni, create shadow listening socket tls (copy l_id) */ ts = ci_tcp_get_state_buf(c_ni); if( ts == NULL ) { ci_netif_unlock(c_ni); LOG_E(ci_log("%s: [%d] out of socket buffers", __FUNCTION__, NI_ID(c_ni))); return -ENOMEM; } /* init common tcp fields */ ts->s.so = alien_tls->s.so; ts->s.cp.ip_ttl = alien_tls->s.cp.ip_ttl; S_TCP_HDR(&ts->s)->tcp_source_be16 = S_TCP_HDR(&alien_tls->s)->tcp_source_be16; ts->s.domain = alien_tls->s.domain; ts->c = alien_tls->c; ts->c.tcp_defer_accept = OO_TCP_DEFER_ACCEPT_OFF; /* make sure nobody will ever connect to our "shadow" socket * except us */ ci_bit_set(&ts->s.b.sb_aflags, CI_SB_AFLAG_ORPHAN_BIT); ci_tcp_set_slow_state(c_ni, ts, CI_TCP_LISTEN); tls = SOCK_TO_TCP_LISTEN(&ts->s); /* no timer: */ tls->s.s_flags = alien_tls->s.s_flags | CI_SOCK_FLAG_BOUND_ALIEN; tls->acceptq_max = 1; rc = ci_tcp_listen_init(c_ni, tls); if( rc != 0 ) { citp_waitable_obj_free(c_ni, &tls->s.b); return rc; } /* Connect c_id to tls */ ts = SP_TO_TCP(c_ni, c_id); rc = ci_tcp_connect_lo_samestack(c_ni, ts, tls->s.b.bufid); /* Accept as from tls */ if( !ci_tcp_acceptq_not_empty(tls) ) { /* it is possible, for example, if ci_tcp_listenq_try_promote() failed * because there are no endpoints */ ci_tcp_listenq_drop_all(c_ni, tls); citp_waitable_obj_free(c_ni, &tls->s.b); ci_netif_unlock(c_ni); return -EBUSY; } w = ci_tcp_acceptq_get(c_ni, tls); ci_assert(w); LOG_TV(ci_log("%s: %d:%d to %d:%d shadow %d:%d accepted %d:%d", __FUNCTION__, c_ni->state->stack_id, OO_SP_TO_INT(c_id), l_ni->state->stack_id, OO_SP_TO_INT(l_id), c_ni->state->stack_id, tls->s.b.bufid, c_ni->state->stack_id, w->bufid)); ci_assert(w->state & CI_TCP_STATE_TCP); ci_assert(w->state != CI_TCP_LISTEN); /* Destroy tls. * NB: nobody could possibly connect to it, so no need to do proper * shutdown. */ ci_assert_equal(ci_tcp_acceptq_n(tls), 0); ci_tcp_listenq_drop_all(c_ni, tls); citp_waitable_obj_free(c_ni, &tls->s.b); ci_netif_unlock(c_ni); /* Keep a port reference */ { tcp_helper_endpoint_t *l_ep, *a_ep; struct oo_file_ref* os_sock_ref; ci_irqlock_state_t lock_flags; l_ep = ci_trs_ep_get(netif2tcp_helper_resource(l_ni), l_id); a_ep = ci_trs_ep_get(netif2tcp_helper_resource(c_ni), W_SP(w)); ci_irqlock_lock(&l_ep->thr->lock, &lock_flags); os_sock_ref = l_ep->os_socket; ci_assert_equal(a_ep->os_port_keeper, NULL); if( os_sock_ref != NULL ) { os_sock_ref = oo_file_ref_add(os_sock_ref); os_sock_ref = oo_file_ref_xchg(&a_ep->os_port_keeper, os_sock_ref); ci_irqlock_unlock(&l_ep->thr->lock, &lock_flags); if( os_sock_ref != NULL ) oo_file_ref_drop(os_sock_ref); } else { ci_irqlock_unlock(&l_ep->thr->lock, &lock_flags); goto cleanup; } } /* lock l_ni: Check that l_id is the same socket it used to be */ /* create ref-sock in l_ni, put it into acc q */ if( ci_netif_lock(l_ni) != 0 ) goto cleanup; if( alien_tls->s.b.state != CI_TCP_LISTEN || (alien_tls->s.b.sb_aflags & CI_SB_AFLAG_ORPHAN) || S_TCP_HDR(&alien_tls->s)->tcp_source_be16 != TS_TCP(ts)->tcp_dest_be16 || (alien_tls->s.pkt.ip.ip_saddr_be32 != INADDR_ANY && alien_tls->s.pkt.ip.ip_saddr_be32 != ts->s.pkt.ip.ip_daddr_be32) ) { ci_netif_unlock(l_ni); goto cleanup; } ci_bit_mask_set(&w->sb_aflags, CI_SB_AFLAG_TCP_IN_ACCEPTQ | CI_SB_AFLAG_ORPHAN); wo = citp_waitable_obj_alloc(l_ni); if( wo == NULL ) { ci_netif_unlock(l_ni); goto cleanup; } wo->waitable.state = CI_TCP_CLOSED; wo->waitable.sb_aflags |= CI_SB_AFLAG_MOVED_AWAY; wo->waitable.moved_to_stack_id = c_ni->state->stack_id; wo->waitable.moved_to_sock_id = W_SP(w); LOG_TC(log("%s: put to acceptq %d:%d referencing %d:%d", __func__, l_ni->state->stack_id, OO_SP_TO_INT(W_SP(&wo->waitable)), c_ni->state->stack_id, OO_SP_TO_INT(W_SP(w)))); ci_tcp_acceptq_put(l_ni, alien_tls, &wo->waitable); citp_waitable_wake_not_in_poll(l_ni, &alien_tls->s.b, CI_SB_FLAG_WAKE_RX); ci_netif_unlock(l_ni); return rc; cleanup: ci_assert(w->sb_aflags & CI_SB_AFLAG_ORPHAN); ci_bit_mask_clear(&w->sb_aflags, CI_SB_AFLAG_TCP_IN_ACCEPTQ | CI_SB_AFLAG_ORPHAN); efab_tcp_helper_close_endpoint(netif2tcp_helper_resource(c_ni), w->bufid); /* we can not guarantee c_ni lock, so we can' call * ci_tcp_drop(c_ni, ts). So, we return error; UL will handover * and close ts endpoint. */ return -EBUSY; }