idm_status_t idm_conn_sm_init(idm_conn_t *ic) { char taskq_name[32]; /* * Caller should have assigned a unique connection ID. Use this * connection ID to create a unique connection name string */ ASSERT(ic->ic_internal_cid != 0); (void) snprintf(taskq_name, sizeof (taskq_name) - 1, "conn_sm%08x", ic->ic_internal_cid); ic->ic_state_taskq = taskq_create(taskq_name, 1, minclsyspri, 4, 16384, TASKQ_PREPOPULATE); if (ic->ic_state_taskq == NULL) { return (IDM_STATUS_FAIL); } idm_sm_audit_init(&ic->ic_state_audit); mutex_init(&ic->ic_state_mutex, NULL, MUTEX_DEFAULT, NULL); cv_init(&ic->ic_state_cv, NULL, CV_DEFAULT, NULL); ic->ic_state = CS_S1_FREE; ic->ic_last_state = CS_S1_FREE; return (IDM_STATUS_SUCCESS); }
/* ARGSUSED */ static int cnread(dev_t dev, struct uio *uio, struct cred *cred) { kcondvar_t sleep_forever; kmutex_t sleep_forever_mutex; if (rconsvp == NULL) { /* * Go to sleep forever. This seems like the least * harmful thing to do if there's no console. * EOF might be better if we're ending up single-user * mode. */ cv_init(&sleep_forever, NULL, CV_DRIVER, NULL); mutex_init(&sleep_forever_mutex, NULL, MUTEX_DRIVER, NULL); mutex_enter(&sleep_forever_mutex); (void) cv_wait_sig(&sleep_forever, &sleep_forever_mutex); mutex_exit(&sleep_forever_mutex); return (EIO); } if (rconsvp->v_stream != NULL) return (strread(rconsvp, uio, cred)); else return (cdev_read(rconsdev, uio, cred)); }
int testcall(struct lwp *l, void *uap, register_t *retval) { int i; mutex_init(&test_mutex, MUTEX_DEFAULT, IPL_NONE); cv_init(&test_cv, "testcv"); printf("test: creating threads\n"); test_count = NTHREADS; test_exit = 0; for (i = 0; i < test_count; i++) kthread_create(0, KTHREAD_MPSAFE, NULL, thread1, &primes[i], &test_threads[i], "thread%d", i); printf("test: sleeping\n"); mutex_enter(&test_mutex); while (test_count != 0) { (void)cv_timedwait(&test_cv, &test_mutex, hz * SECONDS); test_exit = 1; } mutex_exit(&test_mutex); printf("test: finished\n"); cv_destroy(&test_cv); mutex_destroy(&test_mutex); return 0; }
/* ------------------------------------------------------------------------ */ int fr_loginit() { int i; for (i = IPL_LOGMAX; i >= 0; i--) { iplt[i] = NULL; ipll[i] = NULL; iplh[i] = &iplt[i]; iplused[i] = 0; bzero((char *)&iplcrc[i], sizeof(iplcrc[i])); # ifdef IPL_SELECT iplog_ss[i].read_waiter = 0; iplog_ss[i].state = 0; # endif # if defined(linux) && defined(_KERNEL) init_waitqueue_head(iplh_linux + i); # endif } # if SOLARIS && defined(_KERNEL) cv_init(&iplwait, "ipl condvar", CV_DRIVER, NULL); # endif MUTEX_INIT(&ipl_mutex, "ipf log mutex"); ipl_log_init = 1; return 0; }
/* * log_event_init - Allocate and initialize log_event data structures. */ void log_event_init() { mutex_init(&eventq_head_mutex, NULL, MUTEX_DEFAULT, NULL); mutex_init(&eventq_sent_mutex, NULL, MUTEX_DEFAULT, NULL); cv_init(&log_event_cv, NULL, CV_DEFAULT, NULL); mutex_init(&event_qfull_mutex, NULL, MUTEX_DEFAULT, NULL); cv_init(&event_qfull_cv, NULL, CV_DEFAULT, NULL); mutex_init(&event_pause_mutex, NULL, MUTEX_DEFAULT, NULL); cv_init(&event_pause_cv, NULL, CV_DEFAULT, NULL); mutex_init(®istered_channel_mutex, NULL, MUTEX_DEFAULT, NULL); sysevent_evc_init(); }
/* * Completion API */ void init_completion(struct completion *c) { cv_init(&c->cv, "VCHI completion cv"); mtx_init(&c->lock, "VCHI completion lock", "condvar", MTX_DEF); c->done = 0; }
static dsl_pool_t * dsl_pool_open_impl(spa_t *spa, uint64_t txg) { dsl_pool_t *dp; blkptr_t *bp = spa_get_rootblkptr(spa); dp = kmem_zalloc(sizeof (dsl_pool_t), KM_SLEEP); dp->dp_spa = spa; dp->dp_meta_rootbp = *bp; rrw_init(&dp->dp_config_rwlock, B_TRUE); txg_init(dp, txg); txg_list_create(&dp->dp_dirty_datasets, offsetof(dsl_dataset_t, ds_dirty_link)); txg_list_create(&dp->dp_dirty_zilogs, offsetof(zilog_t, zl_dirty_link)); txg_list_create(&dp->dp_dirty_dirs, offsetof(dsl_dir_t, dd_dirty_link)); txg_list_create(&dp->dp_sync_tasks, offsetof(dsl_sync_task_t, dst_node)); mutex_init(&dp->dp_lock, NULL, MUTEX_DEFAULT, NULL); cv_init(&dp->dp_spaceavail_cv, NULL, CV_DEFAULT, NULL); dp->dp_vnrele_taskq = taskq_create("zfs_vn_rele_taskq", 1, minclsyspri, 1, 4, 0); return (dp); }
/* * allocate and init a scsipi_periph structure for a new device. */ struct scsipi_periph * scsipi_alloc_periph(int malloc_flag) { struct scsipi_periph *periph; u_int i; periph = malloc(sizeof(*periph), M_DEVBUF, malloc_flag|M_ZERO); if (periph == NULL) return NULL; periph->periph_dev = NULL; /* * Start with one command opening. The periph driver * will grow this if it knows it can take advantage of it. */ periph->periph_openings = 1; periph->periph_active = 0; for (i = 0; i < PERIPH_NTAGWORDS; i++) periph->periph_freetags[i] = 0xffffffff; TAILQ_INIT(&periph->periph_xferq); callout_init(&periph->periph_callout, 0); cv_init(&periph->periph_cv, "periph"); return periph; }
/* Get and initialize event structure corresponding to lwp event (i.e. address) * */ static afs_event_t * afs_getevent(char *event) { afs_event_t *evp, *newp = 0; int hashcode; AFS_ASSERT_GLOCK(); hashcode = afs_evhash(event); evp = afs_evhasht[hashcode]; while (evp) { if (evp->event == event) { evp->refcount++; return evp; } if (evp->refcount == 0) newp = evp; evp = evp->next; } if (!newp) { newp = osi_AllocSmallSpace(sizeof(afs_event_t)); afs_evhashcnt++; newp->next = afs_evhasht[hashcode]; afs_evhasht[hashcode] = newp; cv_init(&newp->cond, "event cond var", CV_DEFAULT, NULL); newp->seq = 0; } newp->event = event; newp->refcount = 1; return newp; }
int smb_t2_init(struct smb_t2rq *t2p, struct smb_connobj *source, ushort_t *setup, int setupcnt, struct smb_cred *scred) { int i; int error; bzero(t2p, sizeof (*t2p)); mutex_init(&t2p->t2_lock, NULL, MUTEX_DRIVER, NULL); cv_init(&t2p->t2_cond, NULL, CV_DEFAULT, NULL); t2p->t2_source = source; t2p->t2_setupcount = (u_int16_t)setupcnt; t2p->t2_setupdata = t2p->t2_setup; for (i = 0; i < setupcnt; i++) t2p->t2_setup[i] = setup[i]; t2p->t2_fid = 0xffff; t2p->t2_cred = scred; t2p->t2_share = (source->co_level == SMBL_SHARE ? CPTOSS(source) : NULL); /* for smb up/down */ error = smb_rq_getenv(source, &t2p->t2_vc, NULL); if (error) return (error); return (0); }
void HgfsInitRequestList(HgfsSuperInfo *sip) // IN: Pointer to superinfo structure { int i; DEBUG(VM_DEBUG_REQUEST, "HgfsInitRequestList().\n"); ASSERT(sip); mutex_init(&sip->reqMutex, NULL, MUTEX_DRIVER, NULL); /* Initialize free request list */ DblLnkLst_Init(&sip->reqFreeList); mutex_init(&sip->reqFreeMutex, NULL, MUTEX_DRIVER, NULL); cv_init(&sip->reqFreeCondVar, NULL, CV_DRIVER, NULL); /* * Initialize pool of requests * * Here we are setting each request's id to its index into the requestPool * so this can be used as an identifier in reply packets. Each request's * state is also set to UNUSED and is added to the free list. */ for (i = 0; i < ARRAYSIZE(requestPool); i++) { requestPool[i].id = i; requestPool[i].state = HGFS_REQ_UNUSED; DblLnkLst_Init(&requestPool[i].listNode); DblLnkLst_LinkLast(&sip->reqFreeList, &requestPool[i].listNode); } //HgfsDebugPrintReqList(&sip->reqFreeList); DEBUG(VM_DEBUG_REQUEST, "HgfsInitRequestList() done.\n"); }
int smb_rq_init(struct smb_rq *rqp, struct smb_connobj *co, uchar_t cmd, struct smb_cred *scred) { int error; bzero(rqp, sizeof (*rqp)); mutex_init(&rqp->sr_lock, NULL, MUTEX_DRIVER, NULL); cv_init(&rqp->sr_cond, NULL, CV_DEFAULT, NULL); error = smb_rq_getenv(co, &rqp->sr_vc, &rqp->sr_share); if (error) return (error); /* * We copied a VC pointer (vcp) into rqp->sr_vc, * but we do NOT do a smb_vc_hold here. Instead, * the caller is responsible for the hold on the * share or the VC as needed. For smbfs callers, * the hold is on the share, via the smbfs mount. * For nsmb ioctl callers, the hold is done when * the driver handle gets VC or share references. * This design avoids frequent hold/rele activity * when creating and completing requests. */ rqp->sr_rexmit = SMBMAXRESTARTS; rqp->sr_cred = scred; /* Note: ref hold done by caller. */ rqp->sr_pid = (uint16_t)ddi_get_pid(); error = smb_rq_new(rqp, cmd); return (error); }
/* * Look up the supplied hostname in the rdc_link_down chain. Add a new * entry if it isn't found. Return a pointer to the new or found entry. */ static rdc_link_down_t * rdc_lookup_host(char *host) { rdc_link_down_t *p; mutex_enter(&rdc_ping_lock); if (rdc_link_down == NULL) { rdc_link_down = kmem_zalloc(sizeof (*rdc_link_down), KM_SLEEP); rdc_link_down->next = rdc_link_down; } for (p = rdc_link_down->next; p != rdc_link_down; p = p->next) { if (strcmp(host, p->host) == 0) { /* Match */ mutex_exit(&rdc_ping_lock); return (p); } } /* No match, must create a new entry */ p = kmem_zalloc(sizeof (*p), KM_SLEEP); p->link_down = 1; p->next = rdc_link_down->next; rdc_link_down->next = p; (void) strncpy(p->host, host, MAX_RDC_HOST_SIZE); mutex_init(&p->syncd_mutex, NULL, MUTEX_DRIVER, NULL); cv_init(&p->syncd_cv, NULL, CV_DRIVER, NULL); mutex_exit(&rdc_ping_lock); return (p); }
/* * ksem object management including creation and reference counting * routines. */ static struct ksem * ksem_alloc(struct ucred *ucred, mode_t mode, unsigned int value) { struct ksem *ks; mtx_lock(&ksem_count_lock); if (nsems == p31b_getcfg(CTL_P1003_1B_SEM_NSEMS_MAX) || ksem_dead) { mtx_unlock(&ksem_count_lock); return (NULL); } nsems++; mtx_unlock(&ksem_count_lock); ks = malloc(sizeof(*ks), M_KSEM, M_WAITOK | M_ZERO); ks->ks_uid = ucred->cr_uid; ks->ks_gid = ucred->cr_gid; ks->ks_mode = mode; ks->ks_value = value; cv_init(&ks->ks_cv, "ksem"); vfs_timestamp(&ks->ks_birthtime); ks->ks_atime = ks->ks_mtime = ks->ks_ctime = ks->ks_birthtime; refcount_init(&ks->ks_ref, 1); #ifdef MAC mac_posixsem_init(ks); mac_posixsem_create(ucred, ks); #endif return (ks); }
ACPI_STATUS AcpiOsCreateSemaphore(UINT32 MaxUnits, UINT32 InitialUnits, ACPI_SEMAPHORE *OutHandle) { struct acpi_sema *as; ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__); if (OutHandle == NULL || MaxUnits == 0 || InitialUnits > MaxUnits) return_ACPI_STATUS (AE_BAD_PARAMETER); if ((as = malloc(sizeof(*as), M_ACPISEM, M_NOWAIT | M_ZERO)) == NULL) return_ACPI_STATUS (AE_NO_MEMORY); snprintf(as->as_name, sizeof(as->as_name), "ACPI sema (%p)", as); mtx_init(&as->as_lock, as->as_name, NULL, MTX_DEF); cv_init(&as->as_cv, as->as_name); as->as_maxunits = MaxUnits; as->as_units = InitialUnits; *OutHandle = (ACPI_SEMAPHORE)as; ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "created %s, max %u, initial %u\n", as->as_name, MaxUnits, InitialUnits)); return_ACPI_STATUS (AE_OK); }
static void lx_ptm_lh_insert(uint_t index, ldi_handle_t lh) { lx_ptm_ops_t *lpo; ASSERT(lh != NULL); /* Allocate and initialize the ops structure */ lpo = kmem_zalloc(sizeof (lx_ptm_ops_t), KM_SLEEP); mutex_init(&lpo->lpo_rops_lock, NULL, MUTEX_DEFAULT, NULL); cv_init(&lpo->lpo_rops_cv, NULL, CV_DEFAULT, NULL); rw_enter(&lps.lps_lh_rwlock, RW_WRITER); /* check if we need to grow the size of the layered handle array */ if (index >= lps.lps_lh_count) { rw_exit(&lps.lps_lh_rwlock); lx_ptm_lh_grow(index); rw_enter(&lps.lps_lh_rwlock, RW_WRITER); } ASSERT(index < lps.lps_lh_count); ASSERT(lps.lps_lh_array[index].lph_handle == NULL); ASSERT(lps.lps_lh_array[index].lph_pktio == 0); ASSERT(lps.lps_lh_array[index].lph_eofed == 0); ASSERT(lps.lps_lh_array[index].lph_lpo == NULL); /* insert the new handle and return */ lps.lps_lh_array[index].lph_handle = lh; lps.lps_lh_array[index].lph_pktio = 0; lps.lps_lh_array[index].lph_eofed = 0; lps.lps_lh_array[index].lph_lpo = lpo; rw_exit(&lps.lps_lh_rwlock); }
void fdcattach(struct fdc_softc *fdc) { mutex_init(&fdc->sc_mtx, MUTEX_DEFAULT, IPL_BIO); cv_init(&fdc->sc_cv, "fdcwakeup"); callout_init(&fdc->sc_timo_ch, 0); callout_init(&fdc->sc_intr_ch, 0); fdc->sc_state = DEVIDLE; TAILQ_INIT(&fdc->sc_drives); fdc->sc_maxiosize = isa_dmamaxsize(fdc->sc_ic, fdc->sc_drq); if (isa_drq_alloc(fdc->sc_ic, fdc->sc_drq) != 0) { aprint_normal_dev(fdc->sc_dev, "can't reserve drq %d\n", fdc->sc_drq); return; } if (isa_dmamap_create(fdc->sc_ic, fdc->sc_drq, fdc->sc_maxiosize, BUS_DMA_NOWAIT|BUS_DMA_ALLOCNOW)) { aprint_normal_dev(fdc->sc_dev, "can't set up ISA DMA map\n"); return; } config_interrupts(fdc->sc_dev, fdcfinishattach); if (!pmf_device_register(fdc->sc_dev, fdcsuspend, fdcresume)) { aprint_error_dev(fdc->sc_dev, "cannot set power mgmt handler\n"); } }
/* * iscsi_thread_create - Creates the needed resources to handle a thread */ iscsi_thread_t * iscsi_thread_create(dev_info_t *dip, char *name, iscsi_thread_ep_t entry_point, void *arg) { iscsi_thread_t *thread; thread = kmem_zalloc(sizeof (iscsi_thread_t), KM_SLEEP); if (thread != NULL) { thread->tq = ddi_taskq_create(dip, name, 1, TASKQ_DEFAULTPRI, 0); if (thread->tq != NULL) { thread->signature = SIG_ISCSI_THREAD; thread->dip = dip; thread->entry_point = entry_point; thread->arg = arg; thread->state = ISCSI_THREAD_STATE_STOPPED; thread->sign.bitmap = 0; mutex_init(&thread->mgnt.mtx, NULL, MUTEX_DRIVER, NULL); mutex_init(&thread->sign.mtx, NULL, MUTEX_DRIVER, NULL); cv_init(&thread->sign.cdv, NULL, CV_DRIVER, NULL); } else { kmem_free(thread, sizeof (iscsi_thread_t)); thread = NULL; } } return (thread); }
/*------------------------------------------------------------------------* * usb_dma_tag_setup - initialise USB DMA tags *------------------------------------------------------------------------*/ void usb_dma_tag_setup(struct usb_dma_parent_tag *udpt, struct usb_dma_tag *udt, bus_dma_tag_t dmat, struct mtx *mtx, usb_dma_callback_t *func, uint8_t ndmabits, uint8_t nudt) { memset(udpt, 0, sizeof(*udpt)); /* sanity checking */ if ((nudt == 0) || (ndmabits == 0) || (mtx == NULL)) { /* something is corrupt */ return; } /* initialise condition variable */ cv_init(udpt->cv, "USB DMA CV"); /* store some information */ udpt->mtx = mtx; udpt->func = func; udpt->tag = dmat; udpt->utag_first = udt; udpt->utag_max = nudt; udpt->dma_bits = ndmabits; while (nudt--) { memset(udt, 0, sizeof(*udt)); udt->tag_parent = udpt; udt++; } }
static int bcm2835_audio_attach(device_t dev) { struct bcm2835_audio_info *sc; sc = malloc(sizeof(*sc), M_DEVBUF, M_WAITOK | M_ZERO); sc->dev = dev; sc->bufsz = VCHIQ_AUDIO_BUFFER_SIZE; mtx_init(&sc->lock, device_get_nameunit(dev), "bcm_audio_lock", MTX_DEF); cv_init(&sc->worker_cv, "worker_cv"); sc->vchi_handle = VCHIQ_SERVICE_HANDLE_INVALID; /* * We need interrupts enabled for VCHI to work properly, * so delay initialization until it happens. */ sc->intr_hook.ich_func = bcm2835_audio_delayed_init; sc->intr_hook.ich_arg = sc; if (config_intrhook_establish(&sc->intr_hook) != 0) goto no; return 0; no: return ENXIO; }
/* Allocate a new request with request and reply buffers. */ struct ipmi_request * ipmi_alloc_request(struct ipmi_device *dev, long msgid, uint8_t addr, uint8_t command, size_t requestlen, size_t replylen) { struct ipmi_request *req; req = kmem_zalloc(sizeof (struct ipmi_request) + requestlen + replylen, KM_SLEEP); req->ir_sz = sizeof (struct ipmi_request) + requestlen + replylen; req->ir_owner = dev; req->ir_msgid = msgid; req->ir_addr = addr; req->ir_command = command; if (requestlen) { req->ir_request = (uchar_t *)&req[1]; req->ir_requestlen = requestlen; } if (replylen) { req->ir_reply = (uchar_t *)&req[1] + requestlen; req->ir_replybuflen = replylen; } cv_init(&req->ir_cv, NULL, CV_DEFAULT, NULL); req->ir_status = IRS_ALLOCATED; return (req); }
void once_init() { mutex_init(&oncemtx, MUTEX_DEFAULT, IPL_NONE); cv_init(&oncecv, "runonce"); }
void pcppi_attach(struct pcppi_softc *sc) { struct pcppi_attach_args pa; device_t self = sc->sc_dv; callout_init(&sc->sc_bell_ch, CALLOUT_MPSAFE); callout_setfunc(&sc->sc_bell_ch, pcppi_bell_callout, sc); cv_init(&sc->sc_slp, "bell"); sc->sc_bellactive = sc->sc_bellpitch = 0; #if NPCKBD > 0 /* Provide a beeper for the PC Keyboard, if there isn't one already. */ pckbd_hookup_bell(pcppi_pckbd_bell, sc); #endif #if NATTIMER > 0 config_defer(sc->sc_dv, pcppi_attach_speaker); #endif if (!pmf_device_register(self, NULL, NULL)) aprint_error_dev(self, "couldn't establish power handler\n"); pa.pa_cookie = sc; config_search_loc(pcppisearch, sc->sc_dv, "pcppi", NULL, &pa); }
/*ARGSUSED*/ static int ipmi_open(dev_t *devp, int flag, int otyp, cred_t *cred) { minor_t minor; ipmi_device_t *dev; if (ipmi_attached == B_FALSE) return (ENXIO); if (ipmi_found == B_FALSE) return (ENODEV); /* exclusive opens are not supported */ if (flag & FEXCL) return (ENOTSUP); if ((minor = (minor_t)id_alloc_nosleep(minor_ids)) == 0) return (ENODEV); /* Initialize the per file descriptor data. */ dev = kmem_zalloc(sizeof (ipmi_device_t), KM_SLEEP); dev->ipmi_pollhead = kmem_zalloc(sizeof (pollhead_t), KM_SLEEP); TAILQ_INIT(&dev->ipmi_completed_requests); dev->ipmi_address = IPMI_BMC_SLAVE_ADDR; dev->ipmi_lun = IPMI_BMC_SMS_LUN; *devp = makedevice(getmajor(*devp), minor); dev->ipmi_dev = *devp; cv_init(&dev->ipmi_cv, NULL, CV_DEFAULT, NULL); list_insert_head(&dev_list, dev); return (0); }
static int workqueue_initqueue(struct workqueue *wq, struct workqueue_queue *q, int ipl, struct cpu_info *ci) { int error, ktf; KASSERT(q->q_worker == NULL); mutex_init(&q->q_mutex, MUTEX_DEFAULT, ipl); cv_init(&q->q_cv, wq->wq_name); SIMPLEQ_INIT(&q->q_queue); ktf = ((wq->wq_flags & WQ_MPSAFE) != 0 ? KTHREAD_MPSAFE : 0); if (wq->wq_prio < PRI_KERNEL) ktf |= KTHREAD_TS; if (ci) { error = kthread_create(wq->wq_prio, ktf, ci, workqueue_worker, wq, &q->q_worker, "%s/%u", wq->wq_name, ci->ci_index); } else { error = kthread_create(wq->wq_prio, ktf, ci, workqueue_worker, wq, &q->q_worker, "%s", wq->wq_name); } if (error != 0) { mutex_destroy(&q->q_mutex); cv_destroy(&q->q_cv); KASSERT(q->q_worker == NULL); } return error; }
/* ARGSUSED */ static int dnode_cons(void *arg, void *unused, int kmflag) { int i; dnode_t *dn = arg; bzero(dn, sizeof (dnode_t)); rw_init(&dn->dn_struct_rwlock, NULL, RW_DEFAULT, NULL); mutex_init(&dn->dn_mtx, NULL, MUTEX_DEFAULT, NULL); mutex_init(&dn->dn_dbufs_mtx, NULL, MUTEX_DEFAULT, NULL); cv_init(&dn->dn_notxholds, NULL, CV_DEFAULT, NULL); refcount_create(&dn->dn_holds); refcount_create(&dn->dn_tx_holds); for (i = 0; i < TXG_SIZE; i++) { avl_create(&dn->dn_ranges[i], free_range_compar, sizeof (free_range_t), offsetof(struct free_range, fr_node)); list_create(&dn->dn_dirty_records[i], sizeof (dbuf_dirty_record_t), offsetof(dbuf_dirty_record_t, dr_dirty_node)); } list_create(&dn->dn_dbufs, sizeof (dmu_buf_impl_t), offsetof(dmu_buf_impl_t, db_link)); dmu_zfetch_cons(&dn->dn_zfetch); return (0); }
/* * Called from kcf:_init() */ void kcf_rnd_init() { hrtime_t ts; time_t now; mutex_init(&rndpool_lock, NULL, MUTEX_DEFAULT, NULL); cv_init(&rndpool_read_cv, NULL, CV_DEFAULT, NULL); /* * Add bytes to the cache using * . 2 unpredictable times: high resolution time since the boot-time, * and the current time-of-the day. * This is used only to make the timeout value in the timer * unpredictable. */ ts = gethrtime(); rndc_addbytes((uint8_t *)&ts, sizeof (ts)); (void) drv_getparm(TIME, &now); rndc_addbytes((uint8_t *)&now, sizeof (now)); rnbyte_cnt = 0; findex = rindex = 0; num_waiters = 0; rnd_alloc_magazines(); (void) taskq_dispatch(system_taskq, rnd_init2, NULL, TQ_SLEEP); }
/* * Allocate a new, uninitialized vnode. If 'mp' is non-NULL, this is a * marker vnode. */ vnode_t * vnalloc(struct mount *mp) { vnode_t *vp; vp = pool_cache_get(vnode_cache, PR_WAITOK); KASSERT(vp != NULL); memset(vp, 0, sizeof(*vp)); uvm_obj_init(&vp->v_uobj, &uvm_vnodeops, true, 0); cv_init(&vp->v_cv, "vnode"); /* * Done by memset() above. * LIST_INIT(&vp->v_nclist); * LIST_INIT(&vp->v_dnclist); */ if (mp != NULL) { vp->v_mount = mp; vp->v_type = VBAD; vp->v_iflag = VI_MARKER; } else { rw_init(&vp->v_lock); } return vp; }
/* open the xenevt device; this is where we clone */ int xenevtopen(dev_t dev, int flags, int mode, struct lwp *l) { struct xenevt_d *d; struct file *fp; int fd, error; switch(minor(dev)) { case DEV_EVT: /* falloc() will use the descriptor for us. */ if ((error = fd_allocfile(&fp, &fd)) != 0) return error; d = malloc(sizeof(*d), M_DEVBUF, M_WAITOK | M_ZERO); d->ci = &cpu_info_primary; mutex_init(&d->lock, MUTEX_DEFAULT, IPL_HIGH); cv_init(&d->cv, "xenevt"); selinit(&d->sel); return fd_clone(fp, fd, flags, &xenevt_fileops, d); case DEV_XSD: /* no clone for /dev/xsd_kva */ return (0); default: break; } return ENODEV; }
static dsl_pool_t * dsl_pool_open_impl(spa_t *spa, uint64_t txg) { dsl_pool_t *dp; blkptr_t *bp = spa_get_rootblkptr(spa); dp = kmem_zalloc(sizeof (dsl_pool_t), KM_SLEEP); dp->dp_spa = spa; dp->dp_meta_rootbp = *bp; rrw_init(&dp->dp_config_rwlock, B_TRUE); txg_init(dp, txg); txg_list_create(&dp->dp_dirty_datasets, offsetof(dsl_dataset_t, ds_dirty_link)); txg_list_create(&dp->dp_dirty_zilogs, offsetof(zilog_t, zl_dirty_link)); txg_list_create(&dp->dp_dirty_dirs, offsetof(dsl_dir_t, dd_dirty_link)); txg_list_create(&dp->dp_sync_tasks, offsetof(dsl_sync_task_t, dst_node)); mutex_init(&dp->dp_lock, NULL, MUTEX_DEFAULT, NULL); cv_init(&dp->dp_spaceavail_cv, NULL, CV_DEFAULT, NULL); dp->dp_iput_taskq = taskq_create("z_iput", max_ncpus, defclsyspri, max_ncpus * 8, INT_MAX, TASKQ_PREPOPULATE | TASKQ_DYNAMIC); return (dp); }