int nfs_nfsiodnew(int set_iodwant) { int error, i; int newiod; if (nfs_numasync >= nfs_iodmax) return (-1); newiod = -1; for (i = 0; i < nfs_iodmax; i++) if (nfs_asyncdaemon[i] == 0) { nfs_asyncdaemon[i]++; newiod = i; break; } if (newiod == -1) return (-1); if (set_iodwant > 0) nfs_iodwant[i] = NFSIOD_CREATED_FOR_NFS_ASYNCIO; mtx_unlock(&nfs_iod_mtx); error = kproc_create(nfssvc_iod, nfs_asyncdaemon + i, NULL, RFHIGHPID, 0, "nfsiod %d", newiod); mtx_lock(&nfs_iod_mtx); if (error) { if (set_iodwant > 0) nfs_iodwant[i] = NFSIOD_NOT_AVAILABLE; return (-1); } nfs_numasync++; return (newiod); }
static int nfs_nfsiodnew_sync(void) { int error, i; mtx_assert(&nfs_iod_mtx, MA_OWNED); for (i = 0; i < nfs_iodmax; i++) { if (nfs_asyncdaemon[i] == 0) { nfs_asyncdaemon[i] = 1; break; } } if (i == nfs_iodmax) return (0); mtx_unlock(&nfs_iod_mtx); error = kproc_create(nfssvc_iod, nfs_asyncdaemon + i, NULL, RFHIGHPID, 0, "nfsiod %d", i); mtx_lock(&nfs_iod_mtx); if (error == 0) { nfs_numasync++; nfs_iodwant[i] = NFSIOD_AVAILABLE; } else nfs_asyncdaemon[i] = 0; return (error); }
int ncl_nfsiodnew(void) { int error, i; int newiod; if (ncl_numasync >= ncl_iodmax) return (-1); newiod = -1; for (i = 0; i < ncl_iodmax; i++) if (nfs_asyncdaemon[i] == 0) { nfs_asyncdaemon[i]++; newiod = i; break; } if (newiod == -1) return (-1); mtx_unlock(&ncl_iod_mutex); error = kproc_create(nfssvc_iod, nfs_asyncdaemon + i, NULL, RFHIGHPID, 0, "nfsiod %d", newiod); mtx_lock(&ncl_iod_mutex); if (error) return (-1); ncl_numasync++; return (newiod); }
VCHIQ_THREAD_T vchiq_thread_create(int (*threadfn)(void *data), void *data, const char namefmt[], ...) { VCHIQ_THREAD_T newp; va_list ap; char name[MAXCOMLEN+1]; struct thread_data *slot; if (thread_data_slot >= MAX_THREAD_DATA_SLOTS) { printf("kthread_create: out of thread data slots\n"); return (NULL); } slot = &thread_slots[thread_data_slot]; slot->data = data; slot->threadfn = threadfn; va_start(ap, namefmt); vsnprintf(name, sizeof(name), namefmt, ap); va_end(ap); newp = NULL; if (kproc_create(kthread_wrapper, (void*)slot, &newp, 0, 0, "%s", name) != 0) { /* Just to be sure */ newp = NULL; } else thread_data_slot++; return newp; }
static void isf_disk_insert(struct isf_softc *sc, off_t mediasize) { struct disk *disk; sc->isf_doomed = 0; sc->isf_erasing = 0; sc->isf_bstate = malloc(sizeof(*sc->isf_bstate) * (mediasize / ISF_ERASE_BLOCK), M_ISF, M_ZERO | M_WAITOK); kproc_create(&isf_task, sc, &sc->isf_proc, 0, 0, "isf"); disk = disk_alloc(); disk->d_drv1 = sc; disk->d_name = "isf"; disk->d_unit = sc->isf_unit; disk->d_strategy = isf_disk_strategy; disk->d_ioctl = isf_disk_ioctl; disk->d_sectorsize = ISF_SECTORSIZE; disk->d_mediasize = mediasize; disk->d_maxsize = ISF_SECTORSIZE; sc->isf_disk = disk; if (bootverbose) isf_dump_info(sc); disk_create(disk, DISK_VERSION); device_printf(sc->isf_dev, "%juM flash device\n", (uintmax_t)disk->d_mediasize / (1024 * 1024)); }
static int ssif_startup(struct ipmi_softc *sc) { return (kproc_create(ssif_loop, sc, &sc->ipmi_kthread, 0, 0, "%s: ssif", device_get_nameunit(sc->ipmi_dev))); }
int kproc_kthread_add(void (*func)(void *), void *arg, struct proc **procptr, struct thread **tdptr, int flags, int pages, const char *procname, const char *fmt, ...) { int error; va_list ap; char buf[100]; struct thread *td; if (*procptr == 0) { error = kproc_create(func, arg, procptr, flags, pages, "%s", procname); if (error) return (error); td = FIRST_THREAD_IN_PROC(*procptr); if (tdptr) *tdptr = td; va_start(ap, fmt); vsnprintf(td->td_name, sizeof(td->td_name), fmt, ap); va_end(ap); #ifdef KTR sched_clear_tdname(td); #endif return (0); } va_start(ap, fmt); vsnprintf(buf, sizeof(buf), fmt, ap); va_end(ap); error = kthread_add(func, arg, *procptr, tdptr, flags, pages, "%s", buf); return (error); }
static int acpi_tz_cooling_thread_start(struct acpi_tz_softc *sc) { int error; ACPI_LOCK(thermal); if (sc->tz_cooling_proc_running) { ACPI_UNLOCK(thermal); return (0); } sc->tz_cooling_proc_running = TRUE; ACPI_UNLOCK(thermal); error = 0; if (sc->tz_cooling_proc == NULL) { error = kproc_create(acpi_tz_cooling_thread, sc, &sc->tz_cooling_proc, RFHIGHPID, 0, "acpi_cooling%d", device_get_unit(sc->tz_dev)); if (error != 0) { device_printf(sc->tz_dev, "could not create thread - %d", error); ACPI_LOCK(thermal); sc->tz_cooling_proc_running = FALSE; ACPI_UNLOCK(thermal); } } return (error); }
static void bcm2835_audio_create_worker(struct bcm2835_audio_info *sc) { struct proc *newp; if (kproc_create(bcm2835_audio_worker, (void*)sc, &newp, 0, 0, "bcm2835_audio_worker") != 0) { printf("failed to create bcm2835_audio_worker\n"); } }
/* * Start a kernel process. This is called after a fork() call in * mi_startup() in the file kern/init_main.c. * * This function is used to start "internal" daemons and intended * to be called from SYSINIT(). */ void kproc_start(const void *udata) { const struct kproc_desc *kp = udata; int error; error = kproc_create((void (*)(void *))kp->func, NULL, kp->global_procpp, 0, 0, "%s", kp->arg0); if (error) panic("kproc_start: %s: error %d", kp->arg0, error); }
static int vtballoon_attach(device_t dev) { struct vtballoon_softc *sc; int error; sc = device_get_softc(dev); sc->vtballoon_dev = dev; VTBALLOON_LOCK_INIT(sc, device_get_nameunit(dev)); TAILQ_INIT(&sc->vtballoon_pages); vtballoon_add_sysctl(sc); virtio_set_feature_desc(dev, vtballoon_feature_desc); vtballoon_negotiate_features(sc); sc->vtballoon_page_frames = malloc(VTBALLOON_PAGES_PER_REQUEST * sizeof(uint32_t), M_DEVBUF, M_NOWAIT | M_ZERO); if (sc->vtballoon_page_frames == NULL) { error = ENOMEM; device_printf(dev, "cannot allocate page frame request array\n"); goto fail; } error = vtballoon_alloc_virtqueues(sc); if (error) { device_printf(dev, "cannot allocate virtqueues\n"); goto fail; } error = virtio_setup_intr(dev, INTR_TYPE_MISC); if (error) { device_printf(dev, "cannot setup virtqueue interrupts\n"); goto fail; } error = kproc_create(vtballoon_thread, sc, &sc->vtballoon_kproc, 0, 0, "virtio_balloon"); if (error) { device_printf(dev, "cannot create balloon kproc\n"); goto fail; } virtqueue_enable_intr(sc->vtballoon_inflate_vq); virtqueue_enable_intr(sc->vtballoon_deflate_vq); fail: if (error) vtballoon_detach(dev); return (error); }
static int opalflash_attach(device_t dev) { struct opalflash_softc *sc; phandle_t node; cell_t flash_blocksize, opal_id; uint32_t regs[2]; sc = device_get_softc(dev); sc->sc_dev = dev; node = ofw_bus_get_node(dev); OF_getencprop(node, "ibm,opal-id", &opal_id, sizeof(opal_id)); sc->sc_opal_id = opal_id; if (OF_getencprop(node, "ibm,flash-block-size", &flash_blocksize, sizeof(flash_blocksize)) < 0) { device_printf(dev, "Cannot determine flash block size.\n"); return (ENXIO); } if (!OF_hasprop(node, "no-erase")) sc->sc_erase = true; OPALFLASH_LOCK_INIT(sc); if (OF_getencprop(node, "reg", regs, sizeof(regs)) < 0) { device_printf(dev, "Unable to get flash size.\n"); return (ENXIO); } sc->sc_disk = disk_alloc(); sc->sc_disk->d_name = "opalflash"; sc->sc_disk->d_open = opalflash_open; sc->sc_disk->d_close = opalflash_close; sc->sc_disk->d_strategy = opalflash_strategy; sc->sc_disk->d_ioctl = opalflash_ioctl; sc->sc_disk->d_getattr = opalflash_getattr; sc->sc_disk->d_drv1 = sc; sc->sc_disk->d_maxsize = DFLTPHYS; sc->sc_disk->d_mediasize = regs[1]; sc->sc_disk->d_unit = device_get_unit(sc->sc_dev); sc->sc_disk->d_sectorsize = FLASH_BLOCKSIZE; sc->sc_disk->d_stripesize = flash_blocksize; sc->sc_disk->d_dump = NULL; disk_create(sc->sc_disk, DISK_VERSION); bioq_init(&sc->sc_bio_queue); kproc_create(&opalflash_task, sc, &sc->sc_p, 0, 0, "task: OPAL Flash"); return (0); }
int pmclog_proc_create(struct thread *td, void **handlep) { struct pmclog_proc_init_args *ia; int error; ia = malloc(sizeof(*ia), M_TEMP, M_WAITOK | M_ZERO); error = kproc_create(pmclog_loop, ia, &ia->kthr, RFHIGHPID, 0, "hwpmc: proc(%d)", td->td_proc->p_pid); if (error == 0) *handlep = ia; return (error); }
static int ow_temp_attach(device_t dev) { struct ow_temp_softc *sc; sc = device_get_softc(dev); sc->dev = dev; sc->type = ow_get_family(dev); SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev), SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "temperature", CTLFLAG_RD | CTLTYPE_INT, &sc->temp, 0, sysctl_handle_int, "IK3", "Current Temperature"); SYSCTL_ADD_INT(device_get_sysctl_ctx(dev), SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "badcrc", CTLFLAG_RD, &sc->bad_crc, 0, "Number of Bad CRC on reading scratchpad"); SYSCTL_ADD_INT(device_get_sysctl_ctx(dev), SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "badread", CTLFLAG_RD, &sc->bad_reads, 0, "Number of errors on reading scratchpad"); SYSCTL_ADD_INT(device_get_sysctl_ctx(dev), SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "reading_interval", CTLFLAG_RW, &sc->reading_interval, 0, "ticks between reads"); SYSCTL_ADD_INT(device_get_sysctl_ctx(dev), SYSCTL_CHILDREN(device_get_sysctl_tree(dev)), OID_AUTO, "parasite", CTLFLAG_RW, &sc->parasite, 0, "In Parasite mode"); /* * Just do this for unit 0 to avoid locking * the ow bus until that code can be put * into place. */ sc->temp = -1; sc->reading_interval = 10 * hz; mtx_init(&sc->temp_lock, "lock for doing temperature", NULL, MTX_DEF); /* Start the thread */ if (kproc_create(ow_temp_event_thread, sc, &sc->event_thread, 0, 0, "%s event thread", device_get_nameunit(dev))) { device_printf(dev, "unable to create event thread.\n"); panic("cbb_create_event_thread"); } return 0; }
static int htif_blk_attach(device_t dev) { struct htif_blk_softc *sc; char prefix[] = " size="; char *str; long size; sc = device_get_softc(dev); sc->dev = dev; mtx_init(&sc->htif_io_mtx, device_get_nameunit(dev), "htif_blk", MTX_DEF); HTIF_BLK_LOCK_INIT(sc); str = strstr(htif_get_id(dev), prefix); size = strtol((str + 6), NULL, 10); if (size == 0) { return (ENXIO); } sc->index = htif_get_index(dev); if (sc->index < 0) return (EINVAL); htif_setup_intr(sc->index, htif_blk_intr, sc); sc->disk = disk_alloc(); sc->disk->d_drv1 = sc; sc->disk->d_maxsize = 4096; /* Max transfer */ sc->disk->d_name = "htif_blk"; sc->disk->d_open = htif_blk_open; sc->disk->d_close = htif_blk_close; sc->disk->d_strategy = htif_blk_strategy; sc->disk->d_unit = 0; sc->disk->d_sectorsize = SECTOR_SIZE; sc->disk->d_mediasize = size; disk_create(sc->disk, DISK_VERSION); bioq_init(&sc->bio_queue); sc->running = 1; kproc_create(&htif_blk_task, sc, &sc->p, 0, 0, "%s: transfer", device_get_nameunit(dev)); return (0); }
static int mambodisk_attach(device_t dev) { struct mambodisk_softc *sc; struct disk *d; intmax_t mb; char unit; sc = device_get_softc(dev); sc->dev = dev; MBODISK_LOCK_INIT(sc); d = sc->disk = disk_alloc(); d->d_open = mambodisk_open; d->d_close = mambodisk_close; d->d_strategy = mambodisk_strategy; d->d_name = "mambodisk"; d->d_drv1 = sc; d->d_maxsize = MAXPHYS; /* Maybe ask bridge? */ d->d_sectorsize = 512; sc->maxblocks = mambocall(MAMBO_DISK_INFO,MAMBO_INFO_BLKSZ,d->d_unit) / 512; d->d_unit = device_get_unit(dev); d->d_mediasize = mambocall(MAMBO_DISK_INFO,MAMBO_INFO_DEVSZ,d->d_unit) * 1024ULL; /* Mambo gives size in KB */ mb = d->d_mediasize >> 20; /* 1MiB == 1 << 20 */ unit = 'M'; if (mb >= 10240) { /* 1GiB = 1024 MiB */ unit = 'G'; mb /= 1024; } device_printf(dev, "%ju%cB, %d byte sectors\n", mb, unit, d->d_sectorsize); disk_create(d, DISK_VERSION); bioq_init(&sc->bio_queue); sc->running = 1; kproc_create(&mambodisk_task, sc, &sc->p, 0, 0, "task: mambo hd"); return (0); }
DECLHIDDEN(int) rtThreadNativeCreate(PRTTHREADINT pThreadInt, PRTNATIVETHREAD pNativeThread) { int rc; struct proc *pProc; #if __FreeBSD_version >= 800002 rc = kproc_create(rtThreadNativeMain, pThreadInt, &pProc, RFHIGHPID, 0, "%s", pThreadInt->szName); #else rc = kthread_create(rtThreadNativeMain, pThreadInt, &pProc, RFHIGHPID, 0, "%s", pThreadInt->szName); #endif if (!rc) { *pNativeThread = (RTNATIVETHREAD)FIRST_THREAD_IN_PROC(pProc); rc = VINF_SUCCESS; } else rc = RTErrConvertFromErrno(rc); return rc; }
static void pagezero_start(void __unused *arg) { int error; struct proc *p; struct thread *td; error = kproc_create(vm_pagezero, NULL, &p, RFSTOPPED, 0, "pagezero"); if (error) panic("pagezero_start: error %d\n", error); td = FIRST_THREAD_IN_PROC(p); thread_lock(td); /* We're an idle task, don't count us in the load. */ td->td_flags |= TDF_NOLOAD; sched_class(td, PRI_IDLE); sched_prio(td, PRI_MAX_IDLE); sched_add(td, SRQ_BORING); thread_unlock(td); }
static void soaio_kproc_create(void *context, int pending) { struct proc *p; int error, id; mtx_lock(&soaio_jobs_lock); for (;;) { if (soaio_num_procs < soaio_target_procs) { /* Must create */ } else if (soaio_num_procs >= soaio_max_procs) { /* * Hit the limit on kernel processes, don't * create another one. */ break; } else if (soaio_queued <= soaio_idle + soaio_starting) { /* * No more AIO jobs waiting for a process to be * created, so stop. */ break; } soaio_starting++; mtx_unlock(&soaio_jobs_lock); id = alloc_unr(soaio_kproc_unr); error = kproc_create(soaio_kproc_loop, (void *)(intptr_t)id, &p, 0, 0, "soaiod%d", id); if (error != 0) { free_unr(soaio_kproc_unr, id); mtx_lock(&soaio_jobs_lock); soaio_starting--; break; } mtx_lock(&soaio_jobs_lock); soaio_num_procs++; } mtx_unlock(&soaio_jobs_lock); }
static struct g_geom * g_uzip_taste(struct g_class *mp, struct g_provider *pp, int flags) { int error; uint32_t i, total_offsets, offsets_read, blk; void *buf; struct cloop_header *header; struct g_consumer *cp; struct g_geom *gp; struct g_provider *pp2; struct g_uzip_softc *sc; enum { GEOM_UZIP = 1, GEOM_ULZMA } type; g_trace(G_T_TOPOLOGY, "%s(%s,%s)", __func__, mp->name, pp->name); g_topology_assert(); /* Skip providers that are already open for writing. */ if (pp->acw > 0) return (NULL); buf = NULL; /* * Create geom instance. */ gp = g_new_geomf(mp, "%s.uzip", pp->name); cp = g_new_consumer(gp); error = g_attach(cp, pp); if (error == 0) error = g_access(cp, 1, 0, 0); if (error) { goto e1; } g_topology_unlock(); /* * Read cloop header, look for CLOOP magic, perform * other validity checks. */ DPRINTF(GUZ_DBG_INFO, ("%s: media sectorsize %u, mediasize %jd\n", gp->name, pp->sectorsize, (intmax_t)pp->mediasize)); buf = g_read_data(cp, 0, pp->sectorsize, NULL); if (buf == NULL) goto e2; header = (struct cloop_header *) buf; if (strncmp(header->magic, CLOOP_MAGIC_START, sizeof(CLOOP_MAGIC_START) - 1) != 0) { DPRINTF(GUZ_DBG_ERR, ("%s: no CLOOP magic\n", gp->name)); goto e3; } switch (header->magic[CLOOP_OFS_COMPR]) { case CLOOP_COMP_LZMA: case CLOOP_COMP_LZMA_DDP: type = GEOM_ULZMA; if (header->magic[CLOOP_OFS_VERSN] < CLOOP_MINVER_LZMA) { DPRINTF(GUZ_DBG_ERR, ("%s: image version too old\n", gp->name)); goto e3; } DPRINTF(GUZ_DBG_INFO, ("%s: GEOM_UZIP_LZMA image found\n", gp->name)); break; case CLOOP_COMP_LIBZ: case CLOOP_COMP_LIBZ_DDP: type = GEOM_UZIP; if (header->magic[CLOOP_OFS_VERSN] < CLOOP_MINVER_ZLIB) { DPRINTF(GUZ_DBG_ERR, ("%s: image version too old\n", gp->name)); goto e3; } DPRINTF(GUZ_DBG_INFO, ("%s: GEOM_UZIP_ZLIB image found\n", gp->name)); break; default: DPRINTF(GUZ_DBG_ERR, ("%s: unsupported image type\n", gp->name)); goto e3; } /* * Initialize softc and read offsets. */ sc = malloc(sizeof(*sc), M_GEOM_UZIP, M_WAITOK | M_ZERO); gp->softc = sc; sc->blksz = ntohl(header->blksz); sc->nblocks = ntohl(header->nblocks); if (sc->blksz % 512 != 0) { printf("%s: block size (%u) should be multiple of 512.\n", gp->name, sc->blksz); goto e4; } if (sc->blksz > MAX_BLKSZ) { printf("%s: block size (%u) should not be larger than %d.\n", gp->name, sc->blksz, MAX_BLKSZ); } total_offsets = sc->nblocks + 1; if (sizeof(struct cloop_header) + total_offsets * sizeof(uint64_t) > pp->mediasize) { printf("%s: media too small for %u blocks\n", gp->name, sc->nblocks); goto e4; } sc->toc = malloc(total_offsets * sizeof(struct g_uzip_blk), M_GEOM_UZIP, M_WAITOK | M_ZERO); offsets_read = MIN(total_offsets, (pp->sectorsize - sizeof(*header)) / sizeof(uint64_t)); for (i = 0; i < offsets_read; i++) { sc->toc[i].offset = be64toh(((uint64_t *) (header + 1))[i]); sc->toc[i].blen = BLEN_UNDEF; } DPRINTF(GUZ_DBG_INFO, ("%s: %u offsets in the first sector\n", gp->name, offsets_read)); for (blk = 1; offsets_read < total_offsets; blk++) { uint32_t nread; free(buf, M_GEOM); buf = g_read_data( cp, blk * pp->sectorsize, pp->sectorsize, NULL); if (buf == NULL) goto e5; nread = MIN(total_offsets - offsets_read, pp->sectorsize / sizeof(uint64_t)); DPRINTF(GUZ_DBG_TOC, ("%s: %u offsets read from sector %d\n", gp->name, nread, blk)); for (i = 0; i < nread; i++) { sc->toc[offsets_read + i].offset = be64toh(((uint64_t *) buf)[i]); sc->toc[offsets_read + i].blen = BLEN_UNDEF; } offsets_read += nread; } free(buf, M_GEOM); buf = NULL; offsets_read -= 1; DPRINTF(GUZ_DBG_INFO, ("%s: done reading %u block offsets from %u " "sectors\n", gp->name, offsets_read, blk)); if (sc->nblocks != offsets_read) { DPRINTF(GUZ_DBG_ERR, ("%s: read %s offsets than expected " "blocks\n", gp->name, sc->nblocks < offsets_read ? "more" : "less")); goto e5; } /* * "Fake" last+1 block, to make it easier for the TOC parser to * iterate without making the last element a special case. */ sc->toc[sc->nblocks].offset = pp->mediasize; /* Massage TOC (table of contents), make sure it is sound */ if (g_uzip_parse_toc(sc, pp, gp) != 0) { DPRINTF(GUZ_DBG_ERR, ("%s: TOC error\n", gp->name)); goto e5; } mtx_init(&sc->last_mtx, "geom_uzip cache", NULL, MTX_DEF); mtx_init(&sc->queue_mtx, "geom_uzip wrkthread", NULL, MTX_DEF); bioq_init(&sc->bio_queue); sc->last_blk = -1; sc->last_buf = malloc(sc->blksz, M_GEOM_UZIP, M_WAITOK); sc->req_total = 0; sc->req_cached = 0; if (type == GEOM_UZIP) { sc->dcp = g_uzip_zlib_ctor(sc->blksz); } else { sc->dcp = g_uzip_lzma_ctor(sc->blksz); } if (sc->dcp == NULL) { goto e6; } sc->uzip_do = &g_uzip_do; error = kproc_create(g_uzip_wrkthr, sc, &sc->procp, 0, 0, "%s", gp->name); if (error != 0) { goto e7; } g_topology_lock(); pp2 = g_new_providerf(gp, "%s", gp->name); pp2->sectorsize = 512; pp2->mediasize = (off_t)sc->nblocks * sc->blksz; pp2->stripesize = pp->stripesize; pp2->stripeoffset = pp->stripeoffset; g_error_provider(pp2, 0); g_access(cp, -1, 0, 0); DPRINTF(GUZ_DBG_INFO, ("%s: taste ok (%d, %jd), (%d, %d), %x\n", gp->name, pp2->sectorsize, (intmax_t)pp2->mediasize, pp2->stripeoffset, pp2->stripesize, pp2->flags)); DPRINTF(GUZ_DBG_INFO, ("%s: %u x %u blocks\n", gp->name, sc->nblocks, sc->blksz)); return (gp); e7: sc->dcp->free(sc->dcp); e6: free(sc->last_buf, M_GEOM); mtx_destroy(&sc->queue_mtx); mtx_destroy(&sc->last_mtx); e5: free(sc->toc, M_GEOM); e4: free(gp->softc, M_GEOM_UZIP); e3: if (buf != NULL) { free(buf, M_GEOM); } e2: g_topology_lock(); g_access(cp, -1, 0, 0); e1: g_detach(cp); g_destroy_consumer(cp); g_destroy_geom(gp); return (NULL); }
static int cbb_pci_attach(device_t brdev) { static int curr_bus_number = 2; /* XXX EVILE BAD (see below) */ struct cbb_softc *sc = (struct cbb_softc *)device_get_softc(brdev); struct sysctl_ctx_list *sctx; struct sysctl_oid *soid; int rid; device_t parent; uint32_t pribus; parent = device_get_parent(brdev); mtx_init(&sc->mtx, device_get_nameunit(brdev), "cbb", MTX_DEF); sc->chipset = cbb_chipset(pci_get_devid(brdev), NULL); sc->dev = brdev; sc->cbdev = NULL; sc->exca[0].pccarddev = NULL; sc->domain = pci_get_domain(brdev); sc->secbus = pci_read_config(brdev, PCIR_SECBUS_2, 1); sc->subbus = pci_read_config(brdev, PCIR_SUBBUS_2, 1); sc->pribus = pcib_get_bus(parent); SLIST_INIT(&sc->rl); cbb_powerstate_d0(brdev); rid = CBBR_SOCKBASE; sc->base_res = bus_alloc_resource_any(brdev, SYS_RES_MEMORY, &rid, RF_ACTIVE); if (!sc->base_res) { device_printf(brdev, "Could not map register memory\n"); mtx_destroy(&sc->mtx); return (ENOMEM); } else { DEVPRINTF((brdev, "Found memory at %08lx\n", rman_get_start(sc->base_res))); } sc->bst = rman_get_bustag(sc->base_res); sc->bsh = rman_get_bushandle(sc->base_res); exca_init(&sc->exca[0], brdev, sc->bst, sc->bsh, CBB_EXCA_OFFSET); sc->exca[0].flags |= EXCA_HAS_MEMREG_WIN; sc->exca[0].chipset = EXCA_CARDBUS; sc->chipinit = cbb_chipinit; sc->chipinit(sc); /*Sysctls*/ sctx = device_get_sysctl_ctx(brdev); soid = device_get_sysctl_tree(brdev); SYSCTL_ADD_UINT(sctx, SYSCTL_CHILDREN(soid), OID_AUTO, "domain", CTLFLAG_RD, &sc->domain, 0, "Domain number"); SYSCTL_ADD_UINT(sctx, SYSCTL_CHILDREN(soid), OID_AUTO, "pribus", CTLFLAG_RD, &sc->pribus, 0, "Primary bus number"); SYSCTL_ADD_UINT(sctx, SYSCTL_CHILDREN(soid), OID_AUTO, "secbus", CTLFLAG_RD, &sc->secbus, 0, "Secondary bus number"); SYSCTL_ADD_UINT(sctx, SYSCTL_CHILDREN(soid), OID_AUTO, "subbus", CTLFLAG_RD, &sc->subbus, 0, "Subordinate bus number"); #if 0 SYSCTL_ADD_UINT(sctx, SYSCTL_CHILDREN(soid), OID_AUTO, "memory", CTLFLAG_RD, &sc->subbus, 0, "Memory window open"); SYSCTL_ADD_UINT(sctx, SYSCTL_CHILDREN(soid), OID_AUTO, "premem", CTLFLAG_RD, &sc->subbus, 0, "Prefetch memroy window open"); SYSCTL_ADD_UINT(sctx, SYSCTL_CHILDREN(soid), OID_AUTO, "io1", CTLFLAG_RD, &sc->subbus, 0, "io range 1 open"); SYSCTL_ADD_UINT(sctx, SYSCTL_CHILDREN(soid), OID_AUTO, "io2", CTLFLAG_RD, &sc->subbus, 0, "io range 2 open"); #endif /* * This is a gross hack. We should be scanning the entire pci * tree, assigning bus numbers in a way such that we (1) can * reserve 1 extra bus just in case and (2) all sub busses * are in an appropriate range. */ DEVPRINTF((brdev, "Secondary bus is %d\n", sc->secbus)); pribus = pci_read_config(brdev, PCIR_PRIBUS_2, 1); if (sc->secbus == 0 || sc->pribus != pribus) { if (curr_bus_number <= sc->pribus) curr_bus_number = sc->pribus + 1; if (pribus != sc->pribus) { DEVPRINTF((brdev, "Setting primary bus to %d\n", sc->pribus)); pci_write_config(brdev, PCIR_PRIBUS_2, sc->pribus, 1); } sc->secbus = curr_bus_number++; sc->subbus = curr_bus_number++; DEVPRINTF((brdev, "Secondary bus set to %d subbus %d\n", sc->secbus, sc->subbus)); pci_write_config(brdev, PCIR_SECBUS_2, sc->secbus, 1); pci_write_config(brdev, PCIR_SUBBUS_2, sc->subbus, 1); } /* attach children */ sc->cbdev = device_add_child(brdev, "cardbus", -1); if (sc->cbdev == NULL) DEVPRINTF((brdev, "WARNING: cannot add cardbus bus.\n")); else if (device_probe_and_attach(sc->cbdev) != 0) DEVPRINTF((brdev, "WARNING: cannot attach cardbus bus!\n")); sc->exca[0].pccarddev = device_add_child(brdev, "pccard", -1); if (sc->exca[0].pccarddev == NULL) DEVPRINTF((brdev, "WARNING: cannot add pccard bus.\n")); else if (device_probe_and_attach(sc->exca[0].pccarddev) != 0) DEVPRINTF((brdev, "WARNING: cannot attach pccard bus.\n")); /* Map and establish the interrupt. */ rid = 0; sc->irq_res = bus_alloc_resource_any(brdev, SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE); if (sc->irq_res == NULL) { device_printf(brdev, "Unable to map IRQ...\n"); goto err; } if (bus_setup_intr(brdev, sc->irq_res, INTR_TYPE_AV | INTR_MPSAFE, cbb_pci_filt, NULL, sc, &sc->intrhand)) { device_printf(brdev, "couldn't establish interrupt\n"); goto err; } /* reset 16-bit pcmcia bus */ exca_clrb(&sc->exca[0], EXCA_INTR, EXCA_INTR_RESET); /* turn off power */ cbb_power(brdev, CARD_OFF); /* CSC Interrupt: Card detect interrupt on */ cbb_setb(sc, CBB_SOCKET_MASK, CBB_SOCKET_MASK_CD); /* reset interrupt */ cbb_set(sc, CBB_SOCKET_EVENT, cbb_get(sc, CBB_SOCKET_EVENT)); if (bootverbose) cbb_print_config(brdev); /* Start the thread */ if (kproc_create(cbb_event_thread, sc, &sc->event_thread, 0, 0, "%s event thread", device_get_nameunit(brdev))) { device_printf(brdev, "unable to create event thread.\n"); panic("cbb_create_event_thread"); } sc->sc_root_token = root_mount_hold(device_get_nameunit(sc->dev)); return (0); err: if (sc->irq_res) bus_release_resource(brdev, SYS_RES_IRQ, 0, sc->irq_res); if (sc->base_res) { bus_release_resource(brdev, SYS_RES_MEMORY, CBBR_SOCKBASE, sc->base_res); } mtx_destroy(&sc->mtx); return (ENOMEM); }
int pmclog_configure_log(struct pmc_mdep *md, struct pmc_owner *po, int logfd) { int error; struct proc *p; /* * As long as it is possible to get a LOR between pmc_sx lock and * proctree/allproc sx locks used for adding a new process, assure * the former is not held here. */ sx_assert(&pmc_sx, SA_UNLOCKED); PMCDBG(LOG,CFG,1, "config po=%p logfd=%d", po, logfd); p = po->po_owner; /* return EBUSY if a log file was already present */ if (po->po_flags & PMC_PO_OWNS_LOGFILE) return (EBUSY); KASSERT(po->po_kthread == NULL, ("[pmclog,%d] po=%p kthread (%p) already present", __LINE__, po, po->po_kthread)); KASSERT(po->po_file == NULL, ("[pmclog,%d] po=%p file (%p) already present", __LINE__, po, po->po_file)); /* get a reference to the file state */ error = fget_write(curthread, logfd, CAP_WRITE, &po->po_file); if (error) goto error; /* mark process as owning a log file */ po->po_flags |= PMC_PO_OWNS_LOGFILE; error = kproc_create(pmclog_loop, po, &po->po_kthread, RFHIGHPID, 0, "hwpmc: proc(%d)", p->p_pid); if (error) goto error; /* mark process as using HWPMCs */ PROC_LOCK(p); p->p_flag |= P_HWPMC; PROC_UNLOCK(p); /* create a log initialization entry */ PMCLOG_RESERVE_WITH_ERROR(po, INITIALIZE, sizeof(struct pmclog_initialize)); PMCLOG_EMIT32(PMC_VERSION); PMCLOG_EMIT32(md->pmd_cputype); PMCLOG_DESPATCH(po); return (0); error: /* shutdown the thread */ if (po->po_kthread) pmclog_stop_kthread(po); KASSERT(po->po_kthread == NULL, ("[pmclog,%d] po=%p kthread not " "stopped", __LINE__, po)); if (po->po_file) (void) fdrop(po->po_file, curthread); po->po_file = NULL; /* clear file and error state */ po->po_error = 0; return (error); }
static void g_bde_create_geom(struct gctl_req *req, struct g_class *mp, struct g_provider *pp) { struct g_geom *gp; struct g_consumer *cp; struct g_bde_key *kp; int error, i; u_int sectorsize; off_t mediasize; struct g_bde_softc *sc; void *pass; void *key; g_trace(G_T_TOPOLOGY, "g_bde_create_geom(%s, %s)", mp->name, pp->name); g_topology_assert(); gp = NULL; gp = g_new_geomf(mp, "%s.bde", pp->name); cp = g_new_consumer(gp); g_attach(cp, pp); error = g_access(cp, 1, 1, 1); if (error) { g_detach(cp); g_destroy_consumer(cp); g_destroy_geom(gp); gctl_error(req, "could not access consumer"); return; } pass = NULL; key = NULL; do { pass = gctl_get_param(req, "pass", &i); if (pass == NULL || i != SHA512_DIGEST_LENGTH) { gctl_error(req, "No usable key presented"); break; } key = gctl_get_param(req, "key", &i); if (key != NULL && i != 16) { gctl_error(req, "Invalid key presented"); break; } sectorsize = cp->provider->sectorsize; mediasize = cp->provider->mediasize; sc = g_malloc(sizeof(struct g_bde_softc), M_WAITOK | M_ZERO); gp->softc = sc; sc->geom = gp; sc->consumer = cp; error = g_bde_decrypt_lock(sc, pass, key, mediasize, sectorsize, NULL); bzero(sc->sha2, sizeof sc->sha2); if (error) break; kp = &sc->key; /* Initialize helper-fields */ kp->keys_per_sector = kp->sectorsize / G_BDE_SKEYLEN; kp->zone_cont = kp->keys_per_sector * kp->sectorsize; kp->zone_width = kp->zone_cont + kp->sectorsize; kp->media_width = kp->sectorN - kp->sector0 - G_BDE_MAXKEYS * kp->sectorsize; /* Our external parameters */ sc->zone_cont = kp->zone_cont; sc->mediasize = g_bde_max_sector(kp); sc->sectorsize = kp->sectorsize; TAILQ_INIT(&sc->freelist); TAILQ_INIT(&sc->worklist); mtx_init(&sc->worklist_mutex, "g_bde_worklist", NULL, MTX_DEF); /* XXX: error check */ kproc_create(g_bde_worker, gp, &sc->thread, 0, 0, "g_bde %s", gp->name); pp = g_new_providerf(gp, gp->name); #if 0 /* * XXX: Disable this for now. Appearantly UFS no longer * XXX: issues BIO_DELETE requests correctly, with the obvious * XXX: outcome that userdata is trashed. */ pp->flags |= G_PF_CANDELETE; #endif pp->stripesize = kp->zone_cont; pp->stripeoffset = 0; pp->mediasize = sc->mediasize; pp->sectorsize = sc->sectorsize; g_error_provider(pp, 0); break; } while (0); if (pass != NULL) bzero(pass, SHA512_DIGEST_LENGTH); if (key != NULL) bzero(key, 16); if (error == 0) return; g_access(cp, -1, -1, -1); g_detach(cp); g_destroy_consumer(cp); if (gp->softc != NULL) g_free(gp->softc); g_destroy_geom(gp); return; }
static void g_bde_create_geom(struct gctl_req *req, struct g_class *mp, struct g_provider *pp) { struct g_geom *gp; struct g_consumer *cp; struct g_bde_key *kp; int error, i; u_int sectorsize; off_t mediasize; struct g_bde_softc *sc; void *pass; void *key; g_trace(G_T_TOPOLOGY, "g_bde_create_geom(%s, %s)", mp->name, pp->name); g_topology_assert(); gp = NULL; gp = g_new_geomf(mp, "%s.bde", pp->name); cp = g_new_consumer(gp); g_attach(cp, pp); error = g_access(cp, 1, 1, 1); if (error) { g_detach(cp); g_destroy_consumer(cp); g_destroy_geom(gp); gctl_error(req, "could not access consumer"); return; } pass = NULL; key = NULL; do { pass = gctl_get_param(req, "pass", &i); if (pass == NULL || i != SHA512_DIGEST_LENGTH) { gctl_error(req, "No usable key presented"); break; } key = gctl_get_param(req, "key", &i); if (key != NULL && i != 16) { gctl_error(req, "Invalid key presented"); break; } sectorsize = cp->provider->sectorsize; mediasize = cp->provider->mediasize; sc = g_malloc(sizeof(struct g_bde_softc), M_WAITOK | M_ZERO); gp->softc = sc; sc->geom = gp; sc->consumer = cp; error = g_bde_decrypt_lock(sc, pass, key, mediasize, sectorsize, NULL); bzero(sc->sha2, sizeof sc->sha2); if (error) break; kp = &sc->key; /* Initialize helper-fields */ kp->keys_per_sector = kp->sectorsize / G_BDE_SKEYLEN; kp->zone_cont = kp->keys_per_sector * kp->sectorsize; kp->zone_width = kp->zone_cont + kp->sectorsize; kp->media_width = kp->sectorN - kp->sector0 - G_BDE_MAXKEYS * kp->sectorsize; /* Our external parameters */ sc->zone_cont = kp->zone_cont; sc->mediasize = g_bde_max_sector(kp); sc->sectorsize = kp->sectorsize; TAILQ_INIT(&sc->freelist); TAILQ_INIT(&sc->worklist); mtx_init(&sc->worklist_mutex, "g_bde_worklist", NULL, MTX_DEF); /* XXX: error check */ kproc_create(g_bde_worker, gp, &sc->thread, 0, 0, "g_bde %s", gp->name); pp = g_new_providerf(gp, "%s", gp->name); pp->stripesize = kp->zone_cont; pp->stripeoffset = 0; pp->mediasize = sc->mediasize; pp->sectorsize = sc->sectorsize; g_error_provider(pp, 0); break; } while (0); if (pass != NULL) bzero(pass, SHA512_DIGEST_LENGTH); if (key != NULL) bzero(key, 16); if (error == 0) return; g_access(cp, -1, -1, -1); g_detach(cp); g_destroy_consumer(cp); if (gp->softc != NULL) g_free(gp->softc); g_destroy_geom(gp); switch (error) { case ENOENT: gctl_error(req, "Lock was destroyed"); break; case ESRCH: gctl_error(req, "Lock was nuked"); break; case EINVAL: gctl_error(req, "Could not open lock"); break; case ENOTDIR: gctl_error(req, "Lock not found"); break; default: gctl_error(req, "Could not open lock (%d)", error); break; } return; }
static int acpi_tz_attach(device_t dev) { struct acpi_tz_softc *sc; struct acpi_softc *acpi_sc; int error; char oidname[8]; ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__); sc = device_get_softc(dev); sc->tz_dev = dev; sc->tz_handle = acpi_get_handle(dev); sc->tz_requested = TZ_ACTIVE_NONE; sc->tz_active = TZ_ACTIVE_UNKNOWN; sc->tz_thflags = TZ_THFLAG_NONE; sc->tz_cooling_proc = NULL; sc->tz_cooling_proc_running = FALSE; sc->tz_cooling_active = FALSE; sc->tz_cooling_updated = FALSE; sc->tz_cooling_enabled = FALSE; /* * Parse the current state of the thermal zone and build control * structures. We don't need to worry about interference with the * control thread since we haven't fully attached this device yet. */ if ((error = acpi_tz_establish(sc)) != 0) return (error); /* * Register for any Notify events sent to this zone. */ AcpiInstallNotifyHandler(sc->tz_handle, ACPI_DEVICE_NOTIFY, acpi_tz_notify_handler, sc); /* * Create our sysctl nodes. * * XXX we need a mechanism for adding nodes under ACPI. */ if (device_get_unit(dev) == 0) { acpi_sc = acpi_device_get_parent_softc(dev); sysctl_ctx_init(&acpi_tz_sysctl_ctx); acpi_tz_sysctl_tree = SYSCTL_ADD_NODE(&acpi_tz_sysctl_ctx, SYSCTL_CHILDREN(acpi_sc->acpi_sysctl_tree), OID_AUTO, "thermal", CTLFLAG_RD, 0, ""); SYSCTL_ADD_INT(&acpi_tz_sysctl_ctx, SYSCTL_CHILDREN(acpi_tz_sysctl_tree), OID_AUTO, "min_runtime", CTLFLAG_RW, &acpi_tz_min_runtime, 0, "minimum cooling run time in sec"); SYSCTL_ADD_INT(&acpi_tz_sysctl_ctx, SYSCTL_CHILDREN(acpi_tz_sysctl_tree), OID_AUTO, "polling_rate", CTLFLAG_RW, &acpi_tz_polling_rate, 0, "monitor polling interval in seconds"); SYSCTL_ADD_INT(&acpi_tz_sysctl_ctx, SYSCTL_CHILDREN(acpi_tz_sysctl_tree), OID_AUTO, "user_override", CTLFLAG_RW, &acpi_tz_override, 0, "allow override of thermal settings"); } sysctl_ctx_init(&sc->tz_sysctl_ctx); sprintf(oidname, "tz%d", device_get_unit(dev)); sc->tz_sysctl_tree = SYSCTL_ADD_NODE(&sc->tz_sysctl_ctx, SYSCTL_CHILDREN(acpi_tz_sysctl_tree), OID_AUTO, oidname, CTLFLAG_RD, 0, ""); SYSCTL_ADD_PROC(&sc->tz_sysctl_ctx, SYSCTL_CHILDREN(sc->tz_sysctl_tree), OID_AUTO, "temperature", CTLTYPE_INT | CTLFLAG_RD, &sc->tz_temperature, 0, sysctl_handle_int, "IK", "current thermal zone temperature"); SYSCTL_ADD_PROC(&sc->tz_sysctl_ctx, SYSCTL_CHILDREN(sc->tz_sysctl_tree), OID_AUTO, "active", CTLTYPE_INT | CTLFLAG_RW, sc, 0, acpi_tz_active_sysctl, "I", "cooling is active"); SYSCTL_ADD_PROC(&sc->tz_sysctl_ctx, SYSCTL_CHILDREN(sc->tz_sysctl_tree), OID_AUTO, "passive_cooling", CTLTYPE_INT | CTLFLAG_RW, sc, 0, acpi_tz_cooling_sysctl, "I", "enable passive (speed reduction) cooling"); SYSCTL_ADD_INT(&sc->tz_sysctl_ctx, SYSCTL_CHILDREN(sc->tz_sysctl_tree), OID_AUTO, "thermal_flags", CTLFLAG_RD, &sc->tz_thflags, 0, "thermal zone flags"); SYSCTL_ADD_PROC(&sc->tz_sysctl_ctx, SYSCTL_CHILDREN(sc->tz_sysctl_tree), OID_AUTO, "_PSV", CTLTYPE_INT | CTLFLAG_RW, sc, offsetof(struct acpi_tz_softc, tz_zone.psv), acpi_tz_temp_sysctl, "IK", "passive cooling temp setpoint"); SYSCTL_ADD_PROC(&sc->tz_sysctl_ctx, SYSCTL_CHILDREN(sc->tz_sysctl_tree), OID_AUTO, "_HOT", CTLTYPE_INT | CTLFLAG_RW, sc, offsetof(struct acpi_tz_softc, tz_zone.hot), acpi_tz_temp_sysctl, "IK", "too hot temp setpoint (suspend now)"); SYSCTL_ADD_PROC(&sc->tz_sysctl_ctx, SYSCTL_CHILDREN(sc->tz_sysctl_tree), OID_AUTO, "_CRT", CTLTYPE_INT | CTLFLAG_RW, sc, offsetof(struct acpi_tz_softc, tz_zone.crt), acpi_tz_temp_sysctl, "IK", "critical temp setpoint (shutdown now)"); SYSCTL_ADD_PROC(&sc->tz_sysctl_ctx, SYSCTL_CHILDREN(sc->tz_sysctl_tree), OID_AUTO, "_ACx", CTLTYPE_INT | CTLFLAG_RD, &sc->tz_zone.ac, sizeof(sc->tz_zone.ac), sysctl_handle_opaque, "IK", ""); SYSCTL_ADD_PROC(&sc->tz_sysctl_ctx, SYSCTL_CHILDREN(sc->tz_sysctl_tree), OID_AUTO, "_TC1", CTLTYPE_INT | CTLFLAG_RW, sc, offsetof(struct acpi_tz_softc, tz_zone.tc1), acpi_tz_passive_sysctl, "I", "thermal constant 1 for passive cooling"); SYSCTL_ADD_PROC(&sc->tz_sysctl_ctx, SYSCTL_CHILDREN(sc->tz_sysctl_tree), OID_AUTO, "_TC2", CTLTYPE_INT | CTLFLAG_RW, sc, offsetof(struct acpi_tz_softc, tz_zone.tc2), acpi_tz_passive_sysctl, "I", "thermal constant 2 for passive cooling"); SYSCTL_ADD_PROC(&sc->tz_sysctl_ctx, SYSCTL_CHILDREN(sc->tz_sysctl_tree), OID_AUTO, "_TSP", CTLTYPE_INT | CTLFLAG_RW, sc, offsetof(struct acpi_tz_softc, tz_zone.tsp), acpi_tz_passive_sysctl, "I", "thermal sampling period for passive cooling"); /* * Create thread to service all of the thermal zones. Register * our power profile event handler. */ sc->tz_event = EVENTHANDLER_REGISTER(power_profile_change, acpi_tz_power_profile, sc, 0); if (acpi_tz_proc == NULL) { error = kproc_create(acpi_tz_thread, NULL, &acpi_tz_proc, RFHIGHPID, 0, "acpi_thermal"); if (error != 0) { device_printf(sc->tz_dev, "could not create thread - %d", error); goto out; } } /* * Create a thread to handle passive cooling for 1st zone which * has _PSV, _TSP, _TC1 and _TC2. Users can enable it for other * zones manually for now. * * XXX We enable only one zone to avoid multiple zones conflict * with each other since cpufreq currently sets all CPUs to the * given frequency whereas it's possible for different thermal * zones to specify independent settings for multiple CPUs. */ if (acpi_tz_cooling_unit < 0 && acpi_tz_cooling_is_available(sc)) sc->tz_cooling_enabled = TRUE; if (sc->tz_cooling_enabled) { error = acpi_tz_cooling_thread_start(sc); if (error != 0) { sc->tz_cooling_enabled = FALSE; goto out; } acpi_tz_cooling_unit = device_get_unit(dev); } /* * Flag the event handler for a manual invocation by our timeout. * We defer it like this so that the rest of the subsystem has time * to come up. Don't bother evaluating/printing the temperature at * this point; on many systems it'll be bogus until the EC is running. */ sc->tz_flags |= TZ_FLAG_GETPROFILE; out: if (error != 0) { EVENTHANDLER_DEREGISTER(power_profile_change, sc->tz_event); AcpiRemoveNotifyHandler(sc->tz_handle, ACPI_DEVICE_NOTIFY, acpi_tz_notify_handler); sysctl_ctx_free(&sc->tz_sysctl_ctx); } return_VALUE (error); }
/* ARGSUSED */ void random_yarrow_init(void) { int error, i; struct harvest *np; struct sysctl_oid *random_o, *random_sys_o, *random_sys_harvest_o; enum esource e; random_o = SYSCTL_ADD_NODE(&random_clist, SYSCTL_STATIC_CHILDREN(_kern), OID_AUTO, "random", CTLFLAG_RW, 0, "Software Random Number Generator"); random_yarrow_init_alg(&random_clist, random_o); random_sys_o = SYSCTL_ADD_NODE(&random_clist, SYSCTL_CHILDREN(random_o), OID_AUTO, "sys", CTLFLAG_RW, 0, "Entropy Device Parameters"); SYSCTL_ADD_PROC(&random_clist, SYSCTL_CHILDREN(random_sys_o), OID_AUTO, "seeded", CTLTYPE_INT | CTLFLAG_RW, &random_systat.seeded, 1, random_check_boolean, "I", "Seeded State"); random_sys_harvest_o = SYSCTL_ADD_NODE(&random_clist, SYSCTL_CHILDREN(random_sys_o), OID_AUTO, "harvest", CTLFLAG_RW, 0, "Entropy Sources"); SYSCTL_ADD_PROC(&random_clist, SYSCTL_CHILDREN(random_sys_harvest_o), OID_AUTO, "ethernet", CTLTYPE_INT | CTLFLAG_RW, &harvest.ethernet, 1, random_check_boolean, "I", "Harvest NIC entropy"); SYSCTL_ADD_PROC(&random_clist, SYSCTL_CHILDREN(random_sys_harvest_o), OID_AUTO, "point_to_point", CTLTYPE_INT | CTLFLAG_RW, &harvest.point_to_point, 1, random_check_boolean, "I", "Harvest serial net entropy"); SYSCTL_ADD_PROC(&random_clist, SYSCTL_CHILDREN(random_sys_harvest_o), OID_AUTO, "interrupt", CTLTYPE_INT | CTLFLAG_RW, &harvest.interrupt, 1, random_check_boolean, "I", "Harvest IRQ entropy"); SYSCTL_ADD_PROC(&random_clist, SYSCTL_CHILDREN(random_sys_harvest_o), OID_AUTO, "swi", CTLTYPE_INT | CTLFLAG_RW, &harvest.swi, 0, random_check_boolean, "I", "Harvest SWI entropy"); /* Initialise the harvest fifos */ STAILQ_INIT(&emptyfifo.head); emptyfifo.count = 0; for (i = 0; i < EMPTYBUFFERS; i++) { np = malloc(sizeof(struct harvest), M_ENTROPY, M_WAITOK); STAILQ_INSERT_TAIL(&emptyfifo.head, np, next); } for (e = RANDOM_START; e < ENTROPYSOURCE; e++) { STAILQ_INIT(&harvestfifo[e].head); harvestfifo[e].count = 0; } mtx_init(&harvest_mtx, "entropy harvest mutex", NULL, MTX_SPIN); /* Start the hash/reseed thread */ error = kproc_create(random_kthread, NULL, &random_kthread_proc, RFHIGHPID, 0, "yarrow"); if (error != 0) panic("Cannot create entropy maintenance thread."); /* Register the randomness harvesting routine */ random_yarrow_init_harvester(random_harvest_internal, random_yarrow_read); }