static int cyclic_test(SYSCTL_HANDLER_ARGS) { int error, cmd = 0; error = sysctl_wire_old_buffer(req, sizeof(int)); if (error == 0) error = sysctl_handle_int(oidp, &cmd, 0, req); if (error != 0 || req->newptr == NULL) return (error); /* Check for command validity. */ switch (cmd) { case 1: case 2: case -1: /* * Execute the tests in a kernel thread to avoid blocking * the sysctl. Look for the results in the syslog. */ error = kthread_add(cyclic_run_tests, (void *)(uintptr_t) cmd, NULL, NULL, 0, 0, "cyctest%d", cmd); break; default: printf("Usage: debug.cyclic.test=(1..9) or -1 for all tests\n"); error = EINVAL; break; } return (error); }
int kproc_kthread_add(void (*func)(void *), void *arg, struct proc **procptr, struct thread **tdptr, int flags, int pages, const char *procname, const char *fmt, ...) { int error; va_list ap; char buf[100]; struct thread *td; if (*procptr == 0) { error = kproc_create(func, arg, procptr, flags, pages, "%s", procname); if (error) return (error); td = FIRST_THREAD_IN_PROC(*procptr); if (tdptr) *tdptr = td; va_start(ap, fmt); vsnprintf(td->td_name, sizeof(td->td_name), fmt, ap); va_end(ap); #ifdef KTR sched_clear_tdname(td); #endif return (0); } va_start(ap, fmt); vsnprintf(buf, sizeof(buf), fmt, ap); va_end(ap); error = kthread_add(func, arg, *procptr, tdptr, flags, pages, "%s", buf); return (error); }
static int test_modinit(void) { struct thread *td; int i, error, pri_range, pri_off; pri_range = PRI_MIN_TIMESHARE - PRI_MIN_REALTIME; test_epoch = epoch_alloc(EPOCH_PREEMPT); for (i = 0; i < mp_ncpus*2; i++) { etilist[i].threadid = i; error = kthread_add(testloop, &etilist[i], NULL, &testthreads[i], 0, 0, "epoch_test_%d", i); if (error) { printf("%s: kthread_add(epoch_test): error %d", __func__, error); } else { pri_off = (i*4)%pri_range; td = testthreads[i]; thread_lock(td); sched_prio(td, PRI_MIN_REALTIME + pri_off); thread_unlock(td); } } inited = 1; return (0); }
static int if_netmap_setup_interface(struct if_netmap_softc *sc) { struct ifnet *ifp; ifp = sc->ifp = if_alloc(IFT_ETHER); ifp->if_init = if_netmap_init; ifp->if_softc = sc; if_initname(ifp, sc->cfg->name, IF_DUNIT_NONE); ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; ifp->if_ioctl = if_netmap_ioctl; ifp->if_start = if_netmap_start; /* XXX what values? */ IFQ_SET_MAXLEN(&ifp->if_snd, if_netmap_txslots(sc->nm_host_ctx)); ifp->if_snd.ifq_drv_maxlen = if_netmap_txslots(sc->nm_host_ctx); IFQ_SET_READY(&ifp->if_snd); ifp->if_fib = sc->cfg->cdom; ether_ifattach(ifp, sc->addr); ifp->if_capabilities = ifp->if_capenable = IFCAP_HWSTATS; mtx_init(&sc->tx_lock, "txlk", NULL, MTX_DEF); cv_init(&sc->tx_cv, "txcv"); if (kthread_add(if_netmap_send, sc, NULL, &sc->tx_thread.thr, 0, 0, "nm_tx: %s", ifp->if_xname)) { printf("Could not start transmit thread for %s (%s)\n", ifp->if_xname, sc->host_ifname); ether_ifdetach(ifp); if_free(ifp); return (1); } if (kthread_add(if_netmap_receive, sc, NULL, &sc->rx_thread.thr, 0, 0, "nm_rx: %s", ifp->if_xname)) { printf("Could not start receive thread for %s (%s)\n", ifp->if_xname, sc->host_ifname); ether_ifdetach(ifp); if_free(ifp); return (1); } return (0); }
void khttpd_log_start(void) { mtx_init(&khttpd_log_lock, "khttpd_log", NULL, MTX_DEF); kthread_add(khttpd_log_main, NULL, curproc, NULL, 0, 0, "log"); }
static int vtballoon_attach(device_t dev) { struct vtballoon_softc *sc; int error; sc = device_get_softc(dev); sc->vtballoon_dev = dev; VTBALLOON_LOCK_INIT(sc, device_get_nameunit(dev)); TAILQ_INIT(&sc->vtballoon_pages); vtballoon_add_sysctl(sc); virtio_set_feature_desc(dev, vtballoon_feature_desc); vtballoon_negotiate_features(sc); sc->vtballoon_page_frames = malloc(VTBALLOON_PAGES_PER_REQUEST * sizeof(uint32_t), M_DEVBUF, M_NOWAIT | M_ZERO); if (sc->vtballoon_page_frames == NULL) { error = ENOMEM; device_printf(dev, "cannot allocate page frame request array\n"); goto fail; } error = vtballoon_alloc_virtqueues(sc); if (error) { device_printf(dev, "cannot allocate virtqueues\n"); goto fail; } error = virtio_setup_intr(dev, INTR_TYPE_MISC); if (error) { device_printf(dev, "cannot setup virtqueue interrupts\n"); goto fail; } error = kthread_add(vtballoon_thread, sc, NULL, &sc->vtballoon_td, 0, 0, "virtio_balloon"); if (error) { device_printf(dev, "cannot create balloon kthread\n"); goto fail; } virtqueue_enable_intr(sc->vtballoon_inflate_vq); virtqueue_enable_intr(sc->vtballoon_deflate_vq); fail: if (error) vtballoon_detach(dev); return (error); }
void kthread_start(const void *udata) { const struct kthread_desc *kp = udata; int error; error = kthread_add((void (*)(void *))kp->func, NULL, NULL, kp->global_threadpp, 0, 0, "%s", kp->arg0); if (error) panic("kthread_start: %s: error %d", kp->arg0, error); }
void nvme_ns_test(struct nvme_namespace *ns, u_long cmd, caddr_t arg) { struct nvme_io_test *io_test; struct nvme_io_test_internal *io_test_internal; void (*fn)(void *); int i; io_test = (struct nvme_io_test *)arg; if ((io_test->opc != NVME_OPC_READ) && (io_test->opc != NVME_OPC_WRITE)) return; if (io_test->size % nvme_ns_get_sector_size(ns)) return; io_test_internal = malloc(sizeof(*io_test_internal), M_NVME, M_WAITOK | M_ZERO); io_test_internal->opc = io_test->opc; io_test_internal->ns = ns; io_test_internal->td_active = io_test->num_threads; io_test_internal->time = io_test->time; io_test_internal->size = io_test->size; io_test_internal->flags = io_test->flags; if (cmd == NVME_IO_TEST) fn = nvme_ns_io_test; else fn = nvme_ns_bio_test; getmicrouptime(&io_test_internal->start); for (i = 0; i < io_test->num_threads; i++) #if __FreeBSD_version >= 800004 kthread_add(fn, io_test_internal, NULL, NULL, 0, 0, "nvme_io_test[%d]", i); #else kthread_create(fn, io_test_internal, NULL, 0, 0, "nvme_io_test[%d]", i); #endif tsleep(io_test_internal, 0, "nvme_test", io_test->time * 2 * hz); while (io_test_internal->td_active > 0) DELAY(10); memcpy(io_test->io_completed, io_test_internal->io_completed, sizeof(io_test->io_completed)); free(io_test_internal, M_NVME); }
int nandfs_start_cleaner(struct nandfs_device *fsdev) { int error; MPASS(fsdev->nd_cleaner == NULL); fsdev->nd_cleaner_exit = 0; error = kthread_add((void(*)(void *))nandfs_cleaner, fsdev, NULL, &fsdev->nd_cleaner, 0, 0, "nandfs_cleaner"); if (error) printf("nandfs: could not start cleaner: %d\n", error); return (error); }
int khttpd_ktr_logging_init(void) { int error; KHTTPD_ASSERT_CURPROC_IS_KHTTPD(); khttpd_ktr_logging_shutdown = FALSE; error = kthread_add(khttpd_ktr_logging_main, NULL, curproc, &khttpd_ktr_logging_thread, 0, 0, "khttpd-ktr-flush"); if (error != 0) { log(LOG_WARNING, "khttpd: failed to fork khttpd-ktr-flush: %d", error); return (error); } return (0); }
int main(int argc, char **argv) { int error; mutex_lock(&test_mutex); error = kthread_add(thread_loop, NULL, NULL, &test_thread, 0, 0, "test"); if (error) { kprintf("failed to create kernel thread\n"); return 1; } mutex_unlock(&test_mutex); /* * Sleep a bit to wait for the thread to terminate. Unfortunately the * BSD kthread interface has no nice way to wait for thread termination. */ bsd_pause("kthread_test", 200); return 0; }
int waitForMessages(struct sockaddr_in *site_2, struct thread *td) { waiting_sockaddr = *site_2; struct sockaddr_in *site = &waiting_sockaddr; log_info("waiting for messages on %s:%d", inet_ntoa(site->sin_addr), ntohs(site->sin_port)); int error = 0; struct socket *so = NULL; error = socreate(AF_INET, &so, SOCK_STREAM, 0, td->td_ucred, td); if (error) { log_warn("error in socreate in waitForMessages"); goto bad; } error = sobind(so, (struct sockaddr *) site, td); if (error) { log_warn("error in sobind in waitForMessages"); goto bad; } error = solisten(so, 5, td); if (error) { log_warn("error in solisten in waitForMessages"); goto bad; } error = kthread_add(accept_loop, so, NULL, &accept_kthread, 0, 0, "raymond_accept_loop"); if (error) { log_warn("error creating thread: %d\n", error); goto bad; } return 0; bad: // on error if (so != NULL) soclose(so); return error; }
int uinet_init(unsigned int ncpus, unsigned int nmbclusters, struct uinet_instance_cfg *inst_cfg) { struct thread *td; char tmpbuf[32]; int boot_pages; int num_hash_buckets; caddr_t v; if (ncpus > MAXCPU) { printf("Limiting number of CPUs to %u\n", MAXCPU); ncpus = MAXCPU; } else if (0 == ncpus) { printf("Setting number of CPUs to 1\n"); ncpus = 1; } printf("uinet starting: cpus=%u, nmbclusters=%u\n", ncpus, nmbclusters); snprintf(tmpbuf, sizeof(tmpbuf), "%u", nmbclusters); setenv("kern.ipc.nmbclusters", tmpbuf); /* The env var kern.ncallout will get read in proc0_init(), but * that's after we init the callwheel below. So we set it here for * consistency, but the operative setting is the direct assignment * below. */ ncallout = HZ * 3600; snprintf(tmpbuf, sizeof(tmpbuf), "%u", ncallout); setenv("kern.ncallout", tmpbuf); /* Assuming maxsockets will be set to nmbclusters, the following * sets the TCP tcbhash size so that perfectly uniform hashing would * result in a maximum bucket depth of about 16. */ num_hash_buckets = 1; while (num_hash_buckets < nmbclusters / 16) num_hash_buckets <<= 1; snprintf(tmpbuf, sizeof(tmpbuf), "%u", num_hash_buckets); setenv("net.inet.tcp.tcbhashsize", tmpbuf); snprintf(tmpbuf, sizeof(tmpbuf), "%u", 2048); setenv("net.inet.tcp.syncache.hashsize", tmpbuf); boot_pages = 16; /* number of pages made available for uma to bootstrap itself */ mp_ncpus = ncpus; mp_maxid = mp_ncpus - 1; uhi_set_num_cpus(mp_ncpus); /* vm_init bits */ /* first get size required, then alloc memory, then give that memory to the second call */ v = 0; v = kern_timeout_callwheel_alloc(v); kern_timeout_callwheel_alloc(malloc(round_page((vm_offset_t)v), M_DEVBUF, M_ZERO)); kern_timeout_callwheel_init(); uinet_init_thread0(); uma_startup(malloc(boot_pages*PAGE_SIZE, M_DEVBUF, M_ZERO), boot_pages); uma_startup2(); /* XXX any need to tune this? */ num_hash_buckets = 8192; /* power of 2. 32 bytes per bucket on a 64-bit system, so no need to skimp */ uma_page_slab_hash = malloc(sizeof(struct uma_page)*num_hash_buckets, M_DEVBUF, M_ZERO); uma_page_mask = num_hash_buckets - 1; #if 0 pthread_mutex_init(&init_lock, NULL); pthread_cond_init(&init_cond, NULL); #endif mutex_init(); mi_startup(); sx_init(&proctree_lock, "proctree"); td = curthread; /* XXX - would very much like to do better than this */ /* give all configuration threads time to complete initialization * before continuing */ sleep(1); uinet_instance_init(&uinst0, vnet0, inst_cfg); if (uhi_msg_init(&shutdown_helper_msg, 1, 0) != 0) printf("Failed to init shutdown helper message - there will be no shutdown helper thread\n"); else if (kthread_add(shutdown_helper, &shutdown_helper_msg, NULL, &shutdown_helper_thread, 0, 0, "shutdown_helper")) printf("Failed to create shutdown helper thread\n"); /* * XXX This should be configurable - applications that arrange for a * particular thread to process all signals will not want this. */ if (kthread_add(one_sighandling_thread, NULL, NULL, &at_least_one_sighandling_thread, 0, 0, "one_sighandler")) printf("Failed to create at least one signal handling thread\n"); uhi_mask_all_signals(); #if 0 printf("maxusers=%d\n", maxusers); printf("maxfiles=%d\n", maxfiles); printf("maxsockets=%d\n", maxsockets); printf("nmbclusters=%d\n", nmbclusters); #endif return (0); }
static int icl_listen_add_tcp(struct icl_listen *il, int domain, int socktype, int protocol, struct sockaddr *sa, int portal_id) { struct icl_listen_sock *ils; struct socket *so; struct sockopt sopt; int error, one = 1; error = socreate(domain, &so, socktype, protocol, curthread->td_ucred, curthread); if (error != 0) { ICL_WARN("socreate failed with error %d", error); return (error); } sopt.sopt_dir = SOPT_SET; sopt.sopt_level = SOL_SOCKET; sopt.sopt_name = SO_REUSEADDR; sopt.sopt_val = &one; sopt.sopt_valsize = sizeof(one); sopt.sopt_td = NULL; error = sosetopt(so, &sopt); if (error != 0) { ICL_WARN("failed to set SO_REUSEADDR with error %d", error); soclose(so); return (error); } error = sobind(so, sa, curthread); if (error != 0) { ICL_WARN("sobind failed with error %d", error); soclose(so); return (error); } error = solisten(so, -1, curthread); if (error != 0) { ICL_WARN("solisten failed with error %d", error); soclose(so); return (error); } ils = malloc(sizeof(*ils), M_ICL_PROXY, M_ZERO | M_WAITOK); ils->ils_listen = il; ils->ils_socket = so; ils->ils_id = portal_id; error = kthread_add(icl_accept_thread, ils, NULL, NULL, 0, 0, "iclacc"); if (error != 0) { ICL_WARN("kthread_add failed with error %d", error); soclose(so); free(ils, M_ICL_PROXY); return (error); } sx_xlock(&il->il_lock); TAILQ_INSERT_TAIL(&il->il_sockets, ils, ils_next); sx_xunlock(&il->il_lock); return (0); }
int uinet_init(struct uinet_global_cfg *cfg, struct uinet_instance_cfg *inst_cfg) { struct thread *td; char tmpbuf[32]; int boot_pages; caddr_t v; struct uinet_global_cfg default_cfg; unsigned int ncpus; unsigned int num_hash_buckets; #if defined(__amd64__) || defined(__i386__) unsigned int regs[4]; do_cpuid(1, regs); cpu_feature = regs[3]; cpu_feature2 = regs[2]; #endif uinet_hz = HZ; if (cfg == NULL) { uinet_default_cfg(&default_cfg, UINET_GLOBAL_CFG_MEDIUM); cfg = &default_cfg; } epoch_number = cfg->epoch_number; #if defined(VIMAGE_STS) || defined(VIMAGE_STS_ONLY) if (inst_cfg) { uinet_instance_init_vnet_sts(&vnet0_sts, inst_cfg); } #endif printf("uinet starting\n"); printf("requested configuration:\n"); uinet_print_cfg(cfg); if_netmap_num_extra_bufs = cfg->netmap_extra_bufs; ncpus = cfg->ncpus; if (ncpus > MAXCPU) { printf("Limiting number of CPUs to %u\n", MAXCPU); ncpus = MAXCPU; } else if (0 == ncpus) { printf("Setting number of CPUs to 1\n"); ncpus = 1; } snprintf(tmpbuf, sizeof(tmpbuf), "%u", cfg->kern.ipc.maxsockets); setenv("kern.ipc.maxsockets", tmpbuf); snprintf(tmpbuf, sizeof(tmpbuf), "%u", cfg->kern.ipc.nmbclusters); setenv("kern.ipc.nmbclusters", tmpbuf); /* The env var kern.ncallout will get read in proc0_init(), but * that's after we init the callwheel below. So we set it here for * consistency, but the operative setting is the direct assignment * below. */ ncallout = HZ * 3600; snprintf(tmpbuf, sizeof(tmpbuf), "%u", ncallout); setenv("kern.ncallout", tmpbuf); snprintf(tmpbuf, sizeof(tmpbuf), "%u", roundup_nearest_power_of_2(cfg->net.inet.tcp.syncache.hashsize)); setenv("net.inet.tcp.syncache.hashsize", tmpbuf); snprintf(tmpbuf, sizeof(tmpbuf), "%u", cfg->net.inet.tcp.syncache.bucketlimit); setenv("net.inet.tcp.syncache.bucketlimit", tmpbuf); snprintf(tmpbuf, sizeof(tmpbuf), "%u", cfg->net.inet.tcp.syncache.cachelimit); setenv("net.inet.tcp.syncache.cachelimit", tmpbuf); snprintf(tmpbuf, sizeof(tmpbuf), "%u", roundup_nearest_power_of_2(cfg->net.inet.tcp.tcbhashsize)); setenv("net.inet.tcp.tcbhashsize", tmpbuf); boot_pages = 16; /* number of pages made available for uma to bootstrap itself */ mp_ncpus = ncpus; mp_maxid = mp_ncpus - 1; uhi_set_num_cpus(mp_ncpus); /* vm_init bits */ /* first get size required, then alloc memory, then give that memory to the second call */ v = 0; v = kern_timeout_callwheel_alloc(v); kern_timeout_callwheel_alloc(malloc(round_page((vm_offset_t)v), M_DEVBUF, M_ZERO)); kern_timeout_callwheel_init(); uinet_thread_init(); uinet_init_thread0(); uma_startup(malloc(boot_pages*PAGE_SIZE, M_DEVBUF, M_ZERO), boot_pages); uma_startup2(); /* XXX any need to tune this? */ num_hash_buckets = 8192; /* power of 2. 32 bytes per bucket on a 64-bit system, so no need to skimp */ uma_page_slab_hash = malloc(sizeof(struct uma_page)*num_hash_buckets, M_DEVBUF, M_ZERO); uma_page_mask = num_hash_buckets - 1; #if 0 pthread_mutex_init(&init_lock, NULL); pthread_cond_init(&init_cond, NULL); #endif mutex_init(); mi_startup(); sx_init(&proctree_lock, "proctree"); td = curthread; /* XXX - would very much like to do better than this */ /* give all configuration threads time to complete initialization * before continuing */ sleep(1); kernel_sysctlbyname(curthread, "kern.ipc.somaxconn", NULL, NULL, &cfg->kern.ipc.somaxconn, sizeof(cfg->kern.ipc.somaxconn), NULL, 0); uinet_instance_init(&uinst0, vnet0, inst_cfg); if (uhi_msg_init(&shutdown_helper_msg, 1, 0) != 0) printf("Failed to init shutdown helper message - there will be no shutdown helper thread\n"); else if (kthread_add(shutdown_helper, &shutdown_helper_msg, NULL, &shutdown_helper_thread, 0, 0, "shutdown_helper")) printf("Failed to create shutdown helper thread\n"); /* * XXX This should be configurable - applications that arrange for a * particular thread to process all signals will not want this. */ if (kthread_add(one_sighandling_thread, NULL, NULL, &at_least_one_sighandling_thread, 0, 0, "one_sighandler")) printf("Failed to create at least one signal handling thread\n"); uhi_mask_all_signals(); return (0); }
static int cyapa_attach(device_t dev) { struct cyapa_softc *sc; struct cyapa_cap cap; int unit; int addr; sc = device_get_softc(dev); sc->reporting_mode = 1; unit = device_get_unit(dev); addr = smbus_get_addr(dev); if (init_device(dev, &cap, addr, 0)) return (ENXIO); mtx_init(&sc->mutex, "cyapa", NULL, MTX_DEF); sc->dev = dev; sc->addr = addr; knlist_init_mtx(&sc->selinfo.si_note, &sc->mutex); sc->cap_resx = ((cap.max_abs_xy_high << 4) & 0x0F00) | cap.max_abs_x_low; sc->cap_resy = ((cap.max_abs_xy_high << 8) & 0x0F00) | cap.max_abs_y_low; sc->cap_phyx = ((cap.phy_siz_xy_high << 4) & 0x0F00) | cap.phy_siz_x_low; sc->cap_phyy = ((cap.phy_siz_xy_high << 8) & 0x0F00) | cap.phy_siz_y_low; sc->cap_buttons = cap.buttons; device_printf(dev, "%5.5s-%6.6s-%2.2s buttons=%c%c%c res=%dx%d\n", cap.prod_ida, cap.prod_idb, cap.prod_idc, ((cap.buttons & CYAPA_FNGR_LEFT) ? 'L' : '-'), ((cap.buttons & CYAPA_FNGR_MIDDLE) ? 'M' : '-'), ((cap.buttons & CYAPA_FNGR_RIGHT) ? 'R' : '-'), sc->cap_resx, sc->cap_resy); sc->hw.buttons = 5; sc->hw.iftype = MOUSE_IF_PS2; sc->hw.type = MOUSE_MOUSE; sc->hw.model = MOUSE_MODEL_INTELLI; sc->hw.hwid = addr; sc->mode.protocol = MOUSE_PROTO_PS2; sc->mode.rate = 100; sc->mode.resolution = 4; sc->mode.accelfactor = 1; sc->mode.level = 0; sc->mode.packetsize = MOUSE_PS2_PACKETSIZE; sc->drag_state = D_IDLE; sc->draglock_ticks = -1; sc->dragwait_ticks = -1; sc->tft_state = T_IDLE; sc->tft_ticks = -1; sc->send_but = 0; /* Setup input event tracking */ cyapa_set_power_mode(sc, CMD_POWER_MODE_IDLE); /* Start the polling thread */ kthread_add(cyapa_poll_thread, sc, NULL, NULL, 0, 0, "cyapa-poll"); sc->devnode = make_dev(&cyapa_cdevsw, unit, UID_ROOT, GID_WHEEL, 0600, "cyapa%d", unit); sc->devnode->si_drv1 = sc; return (0); }