void in_rtqdrain(void) { VNET_ITERATOR_DECL(vnet_iter); struct radix_node_head *rnh; struct rtqk_arg arg; int fibnum; VNET_LIST_RLOCK_NOSLEEP(); VNET_FOREACH(vnet_iter) { CURVNET_SET(vnet_iter); for ( fibnum = 0; fibnum < rt_numfibs; fibnum++) { rnh = rt_tables_get_rnh(fibnum, AF_INET); arg.found = arg.killed = 0; arg.rnh = rnh; arg.nextstop = 0; arg.draining = 1; arg.updating = 0; RADIX_NODE_HEAD_LOCK(rnh); rnh->rnh_walktree(rnh, in_rtqkill, &arg); RADIX_NODE_HEAD_UNLOCK(rnh); } CURVNET_RESTORE(); } VNET_LIST_RUNLOCK_NOSLEEP(); }
static int send_modevent(module_t mod, int type, void *unused) { #ifdef __notyet__ VNET_ITERATOR_DECL(vnet_iter); #endif int error; switch (type) { case MOD_LOAD: SEND_LOCK_INIT(); error = pf_proto_register(PF_INET6, &send_protosw); if (error != 0) { printf("%s:%d: MOD_LOAD pf_proto_register(): %d\n", __func__, __LINE__, error); SEND_LOCK_DESTROY(); break; } send_sendso_input_hook = send_input; break; case MOD_UNLOAD: /* Do not allow unloading w/o locking. */ return (EBUSY); #ifdef __notyet__ VNET_LIST_RLOCK_NOSLEEP(); SEND_LOCK(); VNET_FOREACH(vnet_iter) { CURVNET_SET(vnet_iter); if (V_send_so != NULL) { CURVNET_RESTORE(); SEND_UNLOCK(); VNET_LIST_RUNLOCK_NOSLEEP(); return (EBUSY); } CURVNET_RESTORE(); } SEND_UNLOCK(); VNET_LIST_RUNLOCK_NOSLEEP(); error = pf_proto_unregister(PF_INET6, IPPROTO_SEND, SOCK_RAW); if (error == 0) SEND_LOCK_DESTROY(); send_sendso_input_hook = NULL; break; #endif default: error = 0; break; } return (error); }
/* * Tcp protocol timeout routine called every 500 ms. * Updates timestamps used for TCP * causes finite state machine actions if timers expire. */ void tcp_slowtimo(void) { VNET_ITERATOR_DECL(vnet_iter); VNET_LIST_RLOCK_NOSLEEP(); VNET_FOREACH(vnet_iter) { CURVNET_SET(vnet_iter); (void) tcp_tw_2msl_scan(0); CURVNET_RESTORE(); } VNET_LIST_RUNLOCK_NOSLEEP(); }
/* * Tcp protocol timeout routine called every 500 ms. * Updates timestamps used for TCP * causes finite state machine actions if timers expire. */ void tcp_slowtimo(void) { VNET_ITERATOR_DECL(vnet_iter); VNET_LIST_RLOCK_NOSLEEP(); VNET_FOREACH(vnet_iter) { CURVNET_SET(vnet_iter); tcp_maxidle = tcp_keepcnt * tcp_keepintvl; INP_INFO_WLOCK(&V_tcbinfo); (void) tcp_tw_2msl_scan(0); INP_INFO_WUNLOCK(&V_tcbinfo); CURVNET_RESTORE(); } VNET_LIST_RUNLOCK_NOSLEEP(); }
DB_SHOW_ALL_COMMAND(ifnets, db_show_all_ifnets) { VNET_ITERATOR_DECL(vnet_iter); struct ifnet *ifp; u_short idx; VNET_FOREACH(vnet_iter) { CURVNET_SET_QUIET(vnet_iter); #ifdef VIMAGE db_printf("vnet=%p\n", curvnet); #endif for (idx = 1; idx <= V_if_index; idx++) { ifp = V_ifindex_table[idx].ife_ifnet; if (ifp == NULL) continue; db_printf( "%20s ifp=%p\n", ifp->if_xname, ifp); if (db_pager_quit) break; } CURVNET_RESTORE(); } }
/* * The caller must make sure that the new protocol is fully set up and ready to * accept requests before it is registered. */ int pf_proto_register(int family, struct protosw *npr) { VNET_ITERATOR_DECL(vnet_iter); struct domain *dp; struct protosw *pr, *fpr; /* Sanity checks. */ if (family == 0) return (EPFNOSUPPORT); if (npr->pr_type == 0) return (EPROTOTYPE); if (npr->pr_protocol == 0) return (EPROTONOSUPPORT); if (npr->pr_usrreqs == NULL) return (ENXIO); /* Try to find the specified domain based on the family. */ dp = pffinddomain(family); if (dp == NULL) return (EPFNOSUPPORT); /* Initialize backpointer to struct domain. */ npr->pr_domain = dp; fpr = NULL; /* * Protect us against races when two protocol registrations for * the same protocol happen at the same time. */ mtx_lock(&dom_mtx); /* The new protocol must not yet exist. */ for (pr = dp->dom_protosw; pr < dp->dom_protoswNPROTOSW; pr++) { if ((pr->pr_type == npr->pr_type) && (pr->pr_protocol == npr->pr_protocol)) { mtx_unlock(&dom_mtx); return (EEXIST); /* XXX: Check only protocol? */ } /* While here, remember the first free spacer. */ if ((fpr == NULL) && (pr->pr_protocol == PROTO_SPACER)) fpr = pr; } /* If no free spacer is found we can't add the new protocol. */ if (fpr == NULL) { mtx_unlock(&dom_mtx); return (ENOMEM); } /* Copy the new struct protosw over the spacer. */ bcopy(npr, fpr, sizeof(*fpr)); /* Job is done, no more protection required. */ mtx_unlock(&dom_mtx); /* Initialize and activate the protocol. */ VNET_LIST_RLOCK(); VNET_FOREACH(vnet_iter) { CURVNET_SET_QUIET(vnet_iter); protosw_init(fpr); CURVNET_RESTORE(); } VNET_LIST_RUNLOCK(); return (0); }
static void shutdown_helper(void *arg) { struct uhi_msg *msg = arg; uint8_t signo; int lock_attempts; int have_lock; VNET_ITERATOR_DECL(vnet_iter); struct uinet_instance *uinst; int shutdown_complete = 0; if (msg) { /* * Loop to respond to multiple messages, but only shutdown * once. This allows multiple, possibly concurrent, * executions of uinet_shutdown() to result in one shutdown * and none of the calls to uinet_shutdown() to block * indefinitely. This provides nice behavior when * uinet_shutdown() is called from a signal handler in a * multi-threaded application that is not carefully policing * signal masks in all the threads. */ for (;;) { if (uhi_msg_wait(msg, &signo) == 0) { if (!shutdown_complete) { printf("\nuinet shutting down"); if (signo) printf(" from signal handler (signal %u)", signo); printf("\n"); printf("Shutting down all uinet instances...\n"); /* * We may be shutting down as a * result of a signal occurring * while another thread is holding * the vnet list lock, so attempt to * acquire the lock in a way that * will avoid getting stuck. */ lock_attempts = 0; have_lock = 0; while ((lock_attempts < 5) && !(have_lock = VNET_LIST_TRY_RLOCK())) { printf("Waiting for vnet list lock...\n"); uhi_nanosleep(UHI_NSEC_PER_SEC); lock_attempts++; } if (lock_attempts > 0 && have_lock) printf("Acquired vnet list lock\n"); if (!have_lock) printf("Proceeding without vnet list lock\n"); #ifdef VIMAGE VNET_FOREACH(vnet_iter) { uinst = vnet_iter->vnet_uinet; uinet_instance_shutdown(uinst); } #else uinet_instance_shutdown(uinet_instance_default()); #endif if (have_lock) VNET_LIST_RUNLOCK(); printf("uinet shutdown complete\n"); shutdown_complete = 1; } uhi_msg_rsp_send(msg, NULL); } else { printf("Failed to receive shutdown message\n"); } }