int oo_os_sock_recvmsg(ci_netif* ni, oo_sp sock_p, struct msghdr* msg, int flags) { oo_os_sock_recvmsg_t op; int rc; op.sock_id = OO_SP_TO_INT(sock_p); op.sizeof_ptr = sizeof(void*); op.flags = flags; CI_USER_PTR_SET(op.msg_iov, msg->msg_iov); op.msg_iovlen = msg->msg_iovlen; CI_USER_PTR_SET(op.msg_name, msg->msg_name); op.msg_namelen = msg->msg_namelen; CI_USER_PTR_SET(op.msg_control, msg->msg_control); op.msg_controllen = msg->msg_controllen; rc = oo_resource_op(ci_netif_get_driver_handle(ni), OO_IOC_OS_SOCK_RECVMSG, &op); ci_assert(op.rc >= 0 || rc < 0); if( rc == 0 ) { msg->msg_flags = op.flags; msg->msg_namelen = op.msg_namelen; if( msg->msg_controllen ) msg->msg_controllen = op.msg_controllen; return op.rc; } return rc; }
int oo_os_sock_accept(ci_netif* ni, oo_sp sock_p, struct sockaddr *addr, socklen_t *addrlen, int flags) { oo_os_sock_accept_t op; int rc; op.sock_id = OO_SP_TO_INT(sock_p); CI_USER_PTR_SET(op.addr, addr); CI_USER_PTR_SET(op.addrlen, addrlen); op.flags = flags; rc = oo_resource_op(ci_netif_get_driver_handle(ni), OO_IOC_OS_SOCK_ACCEPT, &op); return rc == 0 ? op.rc : rc; }
void citp_waitable_obj_free_to_cache(ci_netif* ni, citp_waitable* w) { #if defined (__KERNEL__) && !defined(NDEBUG) /* There should be no non-atomic work queued for endpoints going to cache - * they don't get their filters removed. */ tcp_helper_endpoint_t* ep = ci_netif_get_valid_ep(ni, w->bufid); ci_assert(!(ep->ep_aflags & OO_THR_EP_AFLAG_NON_ATOMIC)); #endif ci_assert(!(w->sb_aflags & CI_SB_AFLAG_ORPHAN)); ci_assert(w->sb_aflags & CI_SB_AFLAG_NOT_READY); ci_assert(w->sb_aflags & CI_SB_AFLAG_IN_CACHE); ci_assert(w->state == CI_TCP_CLOSED); ci_assert(ci_ni_dllist_is_self_linked(ni, &w->post_poll_link)); ci_assert(OO_SP_IS_NULL(w->wt_next)); /* This resets a subset of the state done by __citp_waitable_obj_free. * We do not set the orphan flag, as cached endpoints remain attached. * We do not alter the state, as that too remains accurate. * * We preserve cache related aflags. If the endpoint is freed before being * accepted from the cache then these will be cleared when * __citp_waitable_obj_free is called, otherwise they'll be checked for * correctness, and updated if necessary when the socket is accepted. */ w->wake_request = 0; w->sb_flags = 0; ci_atomic32_and(&w->sb_aflags, CI_SB_AFLAG_NOT_READY | CI_SB_AFLAG_CACHE_PRESERVE); w->lock.wl_val = 0; w->ready_list_id = 0; CI_USER_PTR_SET(w->eitem, NULL); }
int oo_os_sock_sendmsg_raw(ci_netif* ni, oo_sp sock_p, const struct msghdr* msg, int flags) { unsigned long socketcall_args[8]; oo_os_sock_sendmsg_raw_t op; int rc; op.sock_id = OO_SP_TO_INT(sock_p); op.sizeof_ptr = sizeof(void*); op.flags = flags; CI_USER_PTR_SET(op.msg, msg); CI_USER_PTR_SET(op.socketcall_args, socketcall_args); oo_rwlock_lock_read(&citp_dup2_lock); rc = oo_resource_op(ci_netif_get_driver_handle(ni), OO_IOC_OS_SOCK_SENDMSG_RAW, &op); oo_rwlock_unlock_read (&citp_dup2_lock); return rc; }
int oo_os_sock_sendmsg(ci_netif* ni, oo_sp sock_p, const struct msghdr* msg, int flags) { oo_os_sock_sendmsg_t op; op.sock_id = OO_SP_TO_INT(sock_p); op.sizeof_ptr = sizeof(void*); op.flags = flags; CI_USER_PTR_SET(op.msg_iov, msg->msg_iov); op.msg_iovlen = msg->msg_iovlen; CI_USER_PTR_SET(op.msg_name, msg->msg_name); op.msg_namelen = msg->msg_namelen; #ifdef __i386__ /* compat cmsg is not handled in this function */ ci_assert_equal(msg->msg_controllen, 0); op.msg_controllen = 0; CI_USER_PTR_SET(op.msg_control, NULL); #else CI_USER_PTR_SET(op.msg_control, msg->msg_control); op.msg_controllen = msg->msg_controllen; #endif return oo_resource_op(ci_netif_get_driver_handle(ni), OO_IOC_OS_SOCK_SENDMSG, &op); }
static void __citp_waitable_obj_free(ci_netif* ni, citp_waitable* w) { ci_assert(w->sb_aflags & CI_SB_AFLAG_ORPHAN); ci_assert(w->state != CI_TCP_STATE_FREE); ci_assert(ci_ni_dllist_is_self_linked(ni, &w->post_poll_link)); ci_assert(OO_SP_IS_NULL(w->wt_next)); w->wake_request = 0; w->sb_flags = 0; w->sb_aflags = CI_SB_AFLAG_ORPHAN | CI_SB_AFLAG_NOT_READY; w->state = CI_TCP_STATE_FREE; w->lock.wl_val = 0; w->ready_list_id = 0; CI_USER_PTR_SET(w->eitem, NULL); }
void efab_signal_process_init(struct mm_signal_data *tramp_data) { int sig; int rc; OO_DEBUG_SIGNAL(ci_log("%s(%p) pid %d", __func__, tramp_data, current->pid)); /* At start-of-day, we intercept all already-installed handlers * and deadly SIG_DFL */ for( sig = 1; sig <= _NSIG; sig++ ) { struct k_sigaction *k; tramp_data->signal_data[sig - 1].type = OO_SIGHANGLER_USER | OO_SIGHANGLER_IGN_BIT; CI_USER_PTR_SET(tramp_data->signal_data[sig - 1].handler, NULL); /* Never, never intercept SIGKILL. You'll get deadlock since exit_group * sends SIGKILL to all other threads. */ if( sig_kernel_only(sig) ) continue; /* If this is our handler, do nothing. This is second init from the * same process. It happens in fork hooks, when second netif is * created, etc. */ spin_lock_irq(¤t->sighand->siglock); k = ¤t->sighand->action[sig - 1]; if( k->sa.sa_handler == tramp_data->handler_postpone ) { spin_unlock_irq(¤t->sighand->siglock); OO_DEBUG_SIGNAL(ci_log("%s: double init pid=%d", __func__, current->pid)); rc = copy_from_user(tramp_data->signal_data, CI_USER_PTR_GET(tramp_data->user_data), sizeof(tramp_data->signal_data)); if( rc != 0 ) ci_log("%s: ERROR: failed to copy signal data (%d)", __func__, rc); break; } spin_unlock_irq(¤t->sighand->siglock); /* Ignore any errors */ (void) efab_signal_substitute(sig, NULL, tramp_data); } tramp_data->kernel_sighand = current->sighand; }
static void do_ofe_command(ci_netif *ni) { char command[200]; char *str = command; int rc; oo_ofe_config_t op; int i; if( ni->ofe == NULL ) return; for( i = 0; i < cfg_argc; i++ ) str += snprintf(str, sizeof(command) - (str - command), "%s ", cfg_argv[i]); op.len = str - command; CI_USER_PTR_SET(op.str, command); rc = oo_resource_op(ci_netif_get_driver_handle(ni), OO_IOC_OFE_CONFIG, &op); oo_resource_op(ci_netif_get_driver_handle(ni), OO_IOC_OFE_CONFIG_DONE, NULL); if( rc == 0 ) { ci_log("[%s] %s: OK", ni->state->pretty_name, command); return; } ci_log("[%s] Onload Filter Engine fails to process command\n\t%s", ni->state->pretty_name, command); if( ofe_engine_get_last_error(ni->ofe) != NULL ) ci_log("OFE ERROR: %s", ofe_engine_get_last_error(ni->ofe)); else { char err[CI_LOG_MAX_LINE]; oo_resource_op(ci_netif_get_driver_handle(ni), OO_IOC_OFE_GET_LAST_ERROR, err); err[CI_LOG_MAX_LINE-1] = '\0'; ci_log("OFE ERROR: %s", err); } }
/* Substitute signal handler by our variant. */ static int efab_signal_substitute(int sig, struct sigaction *new_act, struct mm_signal_data *tramp_data) { int rc; __sighandler_t handler; struct k_sigaction *k; int type; __user struct oo_sigaction *user_data; struct oo_sigaction *signal_data = &(tramp_data->signal_data[sig - 1]); ci_int32 old_type; ci_int32 seq; user_data = &(((struct oo_sigaction *) (CI_USER_PTR_GET(tramp_data->user_data)))[sig - 1]); if( !access_ok(VERIFY_WRITE, user_data, sizeof(struct oo_sigaction) ) ) return -EFAULT; do { old_type = signal_data->type; seq = (old_type & OO_SIGHANGLER_SEQ_MASK) + (1 << OO_SIGHANGLER_SEQ_SHIFT); } while( ci_cas32_fail(&signal_data->type, old_type, OO_SIGHANGLER_BUSY | seq) ); /* We are going to change signal handler: UL should wait until we've * finished */ rc = __put_user(signal_data->type, &user_data->type); if( rc != 0 ) { signal_data->type = old_type; return -EFAULT; } spin_lock_irq(¤t->sighand->siglock); k = ¤t->sighand->action[sig - 1]; if( new_act ) k->sa = *new_act; type = efab_signal_handler_type(sig, k->sa.sa_handler); handler = type <= OO_SIGHANGLER_DFL_MAX ? tramp_data->handlers[type] : NULL; BUILD_BUG_ON(SIG_DFL != NULL); /* We do not handle this signal: */ if( type != OO_SIGHANGLER_USER && handler == NULL ) { spin_unlock_irq(¤t->sighand->siglock); signal_data->type = old_type | OO_SIGHANGLER_IGN_BIT | seq; ci_verify(__put_user(signal_data->type, &user_data->type) == 0); return 0; } OO_DEBUG_SIGNAL(ci_log("%s: %d change sig=%d handler %p flags %lx " "restorer %p type %d", __func__, current->pid, sig, k->sa.sa_handler, k->sa.sa_flags, k->sa.sa_restorer, type)); signal_data->flags = k->sa.sa_flags; k->sa.sa_flags |= SA_SIGINFO; if( type == OO_SIGHANGLER_USER ) CI_USER_PTR_SET(signal_data->handler, k->sa.sa_handler); else { CI_USER_PTR_SET(signal_data->handler, handler); if( tramp_data->sarestorer ) { k->sa.sa_flags |= SA_RESTORER; k->sa.sa_restorer = tramp_data->sarestorer; } } k->sa.sa_handler = tramp_data->handler_postpone; spin_unlock_irq(¤t->sighand->siglock); OO_DEBUG_SIGNAL(ci_log("%s: %d set sig=%d handler %p flags %lx restorer %p", __func__, current->pid, sig, k->sa.sa_handler, k->sa.sa_flags, k->sa.sa_restorer)); /* Copy signal_data to UL; type BUSY */ rc = __copy_to_user(user_data, signal_data, sizeof(*signal_data)); signal_data->type = type | seq; if( rc != 0 ) return -EFAULT; /* Fill in the real type */ ci_verify(__put_user(signal_data->type, &user_data->type) == 0); return 0; }