/*! Mark a signal as pending. * Should be called from signal handler only. * ** \param signum Signal number ** \param info Saved info for sa_sigaction handler ** \param context Saved context for sa_sigaction handler ** \param our_info Our signal info */ ci_inline void citp_signal_set_pending(int signum, siginfo_t *info, void *context, citp_signal_info *our_info) { int i; ci_assert(our_info->inside_lib); for( i = 0; i < OO_SIGNAL_MAX_PENDING; i++ ) { if( our_info->signals[i].signum ) continue; if( ci_cas32_fail(&our_info->signals[i].signum, 0, signum) ) continue; LOG_SIG(log("%s: signal %d pending", __FUNCTION__, signum)); ci_assert(info); ci_assert(context); memcpy(&our_info->signals[i].saved_info, info, sizeof(siginfo_t)); our_info->signals[i].saved_context = context; /* Hack: in case of SA_ONESHOT, make sure that we intercept * the signal. At the end of citp_signal_run_app_handler, * we will reset the signal handler properly. */ if( citp_signal_data[signum-1].flags & SA_ONESHOT ) sigaction(signum, NULL, NULL); ci_atomic32_or(&our_info->aflags, OO_SIGNAL_FLAG_HAVE_PENDING); return; } log("%s: no empty slot to set pending signal %d", __FUNCTION__, signum); }
void citp_waitable_obj_free(ci_netif* ni, citp_waitable* w) { ci_assert(ci_netif_is_locked(ni)); #ifdef __KERNEL__ { /* Avoid racing with tcp_helper_do_non_atomic(). */ tcp_helper_endpoint_t* ep = ci_netif_get_valid_ep(ni, w->bufid); unsigned ep_aflags; again: if( (ep_aflags = ep->ep_aflags) & OO_THR_EP_AFLAG_NON_ATOMIC ) { ci_assert(!(ep_aflags & OO_THR_EP_AFLAG_NEED_FREE)); if( ci_cas32_fail(&ep->ep_aflags, ep_aflags, ep_aflags | OO_THR_EP_AFLAG_NEED_FREE) ) goto again; return; } ci_rmb(); } #endif __citp_waitable_obj_free(ni, w); w->wt_next = ni->state->free_eps_head; ni->state->free_eps_head = W_SP(w); /* Must be last, as may result in stack going away. */ ci_drop_orphan(ni); }
citp_waitable_obj* citp_waitable_obj_alloc(ci_netif* netif) { citp_waitable_obj* wo; ci_assert(netif); ci_assert(ci_netif_is_locked(netif)); if( netif->state->deferred_free_eps_head != CI_ILL_END ) { ci_uint32 link; do link = netif->state->deferred_free_eps_head; while( ci_cas32_fail(&netif->state->deferred_free_eps_head, link, CI_ILL_END)); while( link != CI_ILL_END ) { citp_waitable* w = ID_TO_WAITABLE(netif, link); link = w->next_id; CI_DEBUG(w->next_id = CI_ILL_END); ci_assert_equal(w->state, CI_TCP_STATE_FREE); ci_assert(OO_SP_IS_NULL(w->wt_next)); w->wt_next = netif->state->free_eps_head; netif->state->free_eps_head = W_SP(w); } } if( OO_SP_IS_NULL(netif->state->free_eps_head) ) { ci_tcp_helper_more_socks(netif); if( OO_SP_IS_NULL(netif->state->free_eps_head) ) ci_netif_timeout_reap(netif); } if( OO_SP_IS_NULL(netif->state->free_eps_head) ) return NULL; LOG_TV(ci_log("%s: allocating %d", __FUNCTION__, OO_SP_FMT(netif->state->free_eps_head))); ci_assert(IS_VALID_SOCK_P(netif, netif->state->free_eps_head)); #if !defined(__KERNEL__) && !defined (CI_HAVE_OS_NOPAGE) ci_netif_mmap_shmbuf(netif, (netif->state->free_eps_head >> EP_BUF_BLOCKSHIFT) + 1); #endif wo = SP_TO_WAITABLE_OBJ(netif, netif->state->free_eps_head); ci_assert(OO_SP_EQ(W_SP(&wo->waitable), netif->state->free_eps_head)); ci_assert_equal(wo->waitable.state, CI_TCP_STATE_FREE); ci_assert_equal(wo->waitable.sb_aflags, (CI_SB_AFLAG_ORPHAN | CI_SB_AFLAG_NOT_READY)); ci_assert_equal(wo->waitable.lock.wl_val, 0); netif->state->free_eps_head = wo->waitable.wt_next; CI_DEBUG(wo->waitable.wt_next = OO_SP_NULL); ci_assert_equal(wo->waitable.state, CI_TCP_STATE_FREE); return wo; }
int efrm_port_sniff(struct efrm_resource *rs, int enable, int promiscuous, int rss_context) { int rc; ci_int32 owner; struct efhw_nic *nic = rs->rs_client->nic; if( enable && !capable(CAP_NET_ADMIN) ) return -EPERM; /* Check that the current sniff owner is valid for the operation we're * doing, and mark the op as in progress. */ if( enable ) { if( ci_cas32_fail(&efrm_nic(nic)->rx_sniff_rxq, EFRM_PORT_SNIFF_NO_OWNER, EFRM_PORT_SNIFF_OP_IN_PROGRESS) ) return -EBUSY; } else { if( ci_cas32_fail(&efrm_nic(nic)->rx_sniff_rxq, rs->rs_instance, EFRM_PORT_SNIFF_OP_IN_PROGRESS) ) return -EBUSY; } EFRM_RESOURCE_ASSERT_VALID(rs, 0); rc = efhw_nic_set_port_sniff(nic, rs->rs_instance, enable, promiscuous, rss_context); if( (enable && rc == 0) || (!enable && rc != 0) ) owner = rs->rs_instance; else owner = EFRM_PORT_SNIFF_NO_OWNER; EFRM_VERIFY_EQ(ci_cas32_fail(&efrm_nic(nic)->rx_sniff_rxq, EFRM_PORT_SNIFF_OP_IN_PROGRESS, owner), 0); return rc; }
void citp_waitable_obj_free_nnl(ci_netif* ni, citp_waitable* w) { /* Stack lock is probably not held (but not guaranteed). */ __citp_waitable_obj_free(ni, w); do w->next_id = ni->state->deferred_free_eps_head; while( ci_cas32_fail(&ni->state->deferred_free_eps_head, w->next_id, OO_SP_TO_INT(W_SP(w))) ); /* Must be last, as may result in stack going away. */ ci_drop_orphan(ni); }
int efab_eplock_unlock_and_wake(ci_netif *ni, int in_dl_context) { int l = ni->state->lock.lock; tcp_helper_resource_t *rs = netif2tcp_helper_resource(ni); /* Allocate more packets if necessary. */ if( !in_dl_context && OO_STACK_NEEDS_MORE_PACKETS(ni) ) efab_tcp_helper_more_bufs(rs); /* We use in_dl_context from now on, and we should remove * CI_NETIF_FLAG_IN_DL_CONTEXT under the stack lock. */ if( in_dl_context ) ni->flags &= ~CI_NETIF_FLAG_IN_DL_CONTEXT; again: #ifndef NDEBUG if( (~l & CI_EPLOCK_LOCKED) || (l & CI_EPLOCK_UNLOCKED) ) { OO_DEBUG_ERR(ci_log("efab_eplock_unlock_and_wake: corrupt" " (value is %x)", (unsigned) l)); OO_DEBUG_ERR(dump_stack()); return -EIO; } #endif if( l & CI_EPLOCK_CALLBACK_FLAGS ) { /* Invoke the callback while we've still got the lock. The callback ** is responsible for either ** - dropping the lock using ef_eplock_try_unlock(), and returning ** the lock value prior to unlocking, OR ** - keeping the eplock locked and returning CI_EPLOCK_LOCKED */ l = efab_tcp_helper_netif_lock_callback(&ni->eplock_helper, l, in_dl_context); } else if( ci_cas32_fail(&ni->state->lock.lock, l, CI_EPLOCK_UNLOCKED) ) { /* Someone (probably) set a flag when we tried to unlock, so we'd ** better handle the flag(s). */ l = ni->state->lock.lock; goto again; } if( l & CI_EPLOCK_FL_NEED_WAKE ) { CITP_STATS_NETIF_INC(ni, lock_wakes); wake_up_interruptible(&ni->eplock_helper.wq); } return 0; }
/*! Run any pending signal handlers ** \param our_info Thread-specific context for current thread */ void citp_signal_run_pending(citp_signal_info *our_info) { /* preserve errno across calls to this function, as it's often called at error time as a result of EXIT_LIB */ int old_errno = errno; int i; LOG_SIG(log("%s: start", __FUNCTION__)); ci_wmb(); ci_assert_equal(our_info->inside_lib, 0); ci_assert(our_info->aflags & OO_SIGNAL_FLAG_HAVE_PENDING); ci_atomic32_and(&our_info->aflags, ~OO_SIGNAL_FLAG_HAVE_PENDING); for( i = 0; i < OO_SIGNAL_MAX_PENDING; i++ ) { siginfo_t saved_info; void *saved_context; int signum; if (our_info->signals[i].signum == 0) break; saved_context = our_info->signals[i].saved_context; if( our_info->signals[i].saved_context ) memcpy(&saved_info, &our_info->signals[i].saved_info, sizeof(saved_info)); signum = our_info->signals[i].signum; if( ci_cas32_fail(&our_info->signals[i].signum, signum, 0) ) break; if( citp_signal_run_app_handler( signum, saved_context == NULL ? NULL : &saved_info, saved_context) ) ci_atomic32_or(&our_info->aflags, OO_SIGNAL_FLAG_NEED_RESTART); else ci_atomic32_and(&our_info->aflags, ~OO_SIGNAL_FLAG_NEED_RESTART); } LOG_SIG(log("%s: end", __FUNCTION__)); errno = old_errno; }
/* Substitute signal handler by our variant. */ static int efab_signal_substitute(int sig, struct sigaction *new_act, struct mm_signal_data *tramp_data) { int rc; __sighandler_t handler; struct k_sigaction *k; int type; __user struct oo_sigaction *user_data; struct oo_sigaction *signal_data = &(tramp_data->signal_data[sig - 1]); ci_int32 old_type; ci_int32 seq; user_data = &(((struct oo_sigaction *) (CI_USER_PTR_GET(tramp_data->user_data)))[sig - 1]); if( !access_ok(VERIFY_WRITE, user_data, sizeof(struct oo_sigaction) ) ) return -EFAULT; do { old_type = signal_data->type; seq = (old_type & OO_SIGHANGLER_SEQ_MASK) + (1 << OO_SIGHANGLER_SEQ_SHIFT); } while( ci_cas32_fail(&signal_data->type, old_type, OO_SIGHANGLER_BUSY | seq) ); /* We are going to change signal handler: UL should wait until we've * finished */ rc = __put_user(signal_data->type, &user_data->type); if( rc != 0 ) { signal_data->type = old_type; return -EFAULT; } spin_lock_irq(¤t->sighand->siglock); k = ¤t->sighand->action[sig - 1]; if( new_act ) k->sa = *new_act; type = efab_signal_handler_type(sig, k->sa.sa_handler); handler = type <= OO_SIGHANGLER_DFL_MAX ? tramp_data->handlers[type] : NULL; BUILD_BUG_ON(SIG_DFL != NULL); /* We do not handle this signal: */ if( type != OO_SIGHANGLER_USER && handler == NULL ) { spin_unlock_irq(¤t->sighand->siglock); signal_data->type = old_type | OO_SIGHANGLER_IGN_BIT | seq; ci_verify(__put_user(signal_data->type, &user_data->type) == 0); return 0; } OO_DEBUG_SIGNAL(ci_log("%s: %d change sig=%d handler %p flags %lx " "restorer %p type %d", __func__, current->pid, sig, k->sa.sa_handler, k->sa.sa_flags, k->sa.sa_restorer, type)); signal_data->flags = k->sa.sa_flags; k->sa.sa_flags |= SA_SIGINFO; if( type == OO_SIGHANGLER_USER ) CI_USER_PTR_SET(signal_data->handler, k->sa.sa_handler); else { CI_USER_PTR_SET(signal_data->handler, handler); if( tramp_data->sarestorer ) { k->sa.sa_flags |= SA_RESTORER; k->sa.sa_restorer = tramp_data->sarestorer; } } k->sa.sa_handler = tramp_data->handler_postpone; spin_unlock_irq(¤t->sighand->siglock); OO_DEBUG_SIGNAL(ci_log("%s: %d set sig=%d handler %p flags %lx restorer %p", __func__, current->pid, sig, k->sa.sa_handler, k->sa.sa_flags, k->sa.sa_restorer)); /* Copy signal_data to UL; type BUSY */ rc = __copy_to_user(user_data, signal_data, sizeof(*signal_data)); signal_data->type = type | seq; if( rc != 0 ) return -EFAULT; /* Fill in the real type */ ci_verify(__put_user(signal_data->type, &user_data->type) == 0); return 0; }