void kobj_class_uninstantiate(kobj_class_t cls) { lwkt_gettoken(&kobj_token); crit_enter(); cls->refs--; if (cls->refs == 0) kobj_class_free(cls); crit_exit(); lwkt_reltoken(&kobj_token); }
/** *************************************************************************** * Reduce the concurrent request count after this request has completed. * When max-connections is applied, check_request_limits() inserts this * function to be called during request cleanup. * * Params: * data: ptr to data passed in to request_set_data() when the call was * scheduled for this request. Used here to pass in a ptr to the * bucket tracking this request. * */ void reqlimit_conc_done(void *data) { bucket_info * bucket = (bucket_info *)data; assert(bucket != NULL); //----- START_CRIT ------------------------------ crit_enter(reqlimit_crit); bucket->conc--; crit_exit(reqlimit_crit); //----- END_CRIT ------------------------------ }
void kobj_class_instantiate(kobj_class_t cls) { lwkt_gettoken(&kobj_token); crit_enter(); if (!cls->ops) kobj_class_compile(cls); cls->refs++; crit_exit(); lwkt_reltoken(&kobj_token); }
static __inline void iopoll_reset_state(struct iopoll_ctx *io_ctx) { crit_enter(); io_ctx->poll_burst = 5; io_ctx->pending_polls = 0; io_ctx->residual_burst = 0; io_ctx->phase = 0; io_ctx->kern_frac = 0; bzero(&io_ctx->poll_start_t, sizeof(io_ctx->poll_start_t)); bzero(&io_ctx->prev_t, sizeof(io_ctx->prev_t)); crit_exit(); }
/* * Start call */ static void lgue_start_ipifunc(void *arg) { struct ifnet *ifp; struct lwkt_msg *lmsg; ifp = arg; lmsg = &ifp->if_start_nmsg[mycpuid].lmsg; crit_enter(); if (lmsg->ms_flags & MSGF_DONE) lwkt_sendmsg(ifnet_portfn(mycpuid), lmsg); crit_exit(); }
/* * Poll function. * * Generally this function gets called heavily when interrupts might be * non-operational, during a halt/reboot or panic. */ static void ahci_xpt_poll(struct cam_sim *sim) { struct ahci_port *ap; ap = cam_sim_softc(sim); crit_enter(); ahci_os_lock_port(ap); ahci_port_intr(ap, 1); ahci_os_unlock_port(ap); crit_exit(); }
static void ukbd_timeout(void *arg) { keyboard_t *kbd; ukbd_state_t *state; kbd = (keyboard_t *)arg; state = (ukbd_state_t *)kbd->kb_data; crit_enter(); kbd_intr(kbd, (void *)USBD_NORMAL_COMPLETION); callout_reset(&state->ks_timeout, hz / 40, ukbd_timeout, arg); crit_exit(); }
static __inline void poll_reset_state(struct pollctx *pctx) { crit_enter(); pctx->poll_burst = 5; pctx->reg_frac_count = 0; pctx->pending_polls = 0; pctx->residual_burst = 0; pctx->phase = 0; bzero(&pctx->poll_start_t, sizeof(pctx->poll_start_t)); bzero(&pctx->prev_t, sizeof(pctx->prev_t)); crit_exit(); }
/* * Called from process context when the hub is gone. * Detach all devices on active ports. */ static int uhub_detach(device_t self) { struct uhub_softc *sc = device_get_softc(self); struct usbd_hub *hub = sc->sc_hub->hub; struct usbd_port *rup; int port, nports; DPRINTF(("uhub_detach: sc=%port\n", sc)); crit_enter(); if (hub == NULL) { /* Must be partially working */ crit_exit(); return (0); } usbd_abort_pipe(sc->sc_ipipe); usbd_close_pipe(sc->sc_ipipe); nports = hub->hubdesc.bNbrPorts; for(port = 0; port < nports; port++) { rup = &hub->ports[port]; if (rup->device) usb_disconnect_port(rup, self); } usbd_add_drv_event(USB_EVENT_DRIVER_DETACH, sc->sc_hub, self); if (hub->ports[0].tt) kfree(hub->ports[0].tt, M_USBDEV); kfree(hub, M_USBDEV); sc->sc_hub->hub = NULL; crit_exit(); return (0); }
/* * MPSAFE */ int sys_osigsetmask(struct osigsetmask_args *uap) { struct lwp *lp = curthread->td_lwp; sigset_t set; OSIG2SIG(uap->mask, set); SIG_CANTMASK(set); crit_enter(); SIG2OSIG(lp->lwp_sigmask, uap->sysmsg_iresult); SIGSETLO(lp->lwp_sigmask, set); crit_exit(); return (0); }
static void ng_eiface_init(void *xsc) { priv_p sc = xsc; struct ifnet *ifp = sc->ifp; crit_enter(); ifp->if_flags |= IFF_RUNNING; ifq_clr_oactive(&ifp->if_snd); crit_exit(); }
/* move mouse */ void sc_mouse_move(scr_stat *scp, int x, int y) { crit_enter(); scp->mouse_xpos = scp->mouse_oldxpos = x; scp->mouse_ypos = scp->mouse_oldypos = y; if (scp->font_size <= 0) scp->mouse_pos = scp->mouse_oldpos = 0; else scp->mouse_pos = scp->mouse_oldpos = (y/scp->font_size - scp->yoff)*scp->xsize + x/8 - scp->xoff; scp->status |= MOUSE_MOVED; crit_exit(); }
/* * Wait for the target thread to terminate and then destroy the cothread * structure. */ void cothread_delete(cothread_t *cotdp) { cothread_t cotd; if ((cotd = *cotdp) != NULL) { unregister_int(cotd->intr_id, 0); crit_enter(); pthread_join(cotd->pthr, NULL); crit_exit(); kfree(cotd, M_DEVBUF); *cotdp = NULL; } }
static void adc_queue_start(void) { if (adc_busy) return; crit_enter(); if (head) { adc_busy = true; adc_sample_prepare(head->mode); adc_sample_start(head->channel, adc_queue_sample_done, NULL); } crit_exit(); }
/*************************************************************************** Function: sReadAiopID Purpose: Read the AIOP idenfication number directly from an AIOP. Call: sReadAiopID(CtlP, aiop) CONTROLLER_T *CtlP; Ptr to controller structure int aiop: AIOP index Return: int: Flag AIOPID_XXXX if a valid AIOP is found, where X is replace by an identifying number. Flag AIOPID_NULL if no valid AIOP is found Warnings: No context switches are allowed while executing this function. */ int sReadAiopID(CONTROLLER_T *CtlP, int aiop) { Byte_t AiopID; /* ID byte from AIOP */ crit_enter(); rp_writeaiop1(CtlP, aiop, _CMD_REG, RESET_ALL); /* reset AIOP */ rp_writeaiop1(CtlP, aiop, _CMD_REG, 0x0); AiopID = rp_readaiop1(CtlP, aiop, _CHN_STAT0) & 0x07; crit_exit(); if(AiopID == 0x06) return(1); else /* AIOP does not exist */ return(-1); }
void rtc_alarm_cancel(struct rtc_alarm_ctx *ctx) { crit_enter(); struct rtc_alarm_ctx *tail = alarm_head; while (tail) { if (tail->next == ctx) { tail->next = ctx->next; break; } tail = tail->next; } crit_exit(); }
static int atkbd_poll(keyboard_t *kbd, int on) { atkbd_state_t *state; state = (atkbd_state_t *)kbd->kb_data; crit_enter(); if (on) state->ks_polling = 1; else state->ks_polling = 0; crit_exit(); return 0; }
/* * rfcomm_ctloutput(request, socket, level, optname, opt) * */ void rfcomm_ctloutput(netmsg_t msg) { struct socket *so = msg->ctloutput.base.nm_so; struct sockopt *sopt = msg->ctloutput.nm_sopt; struct rfcomm_dlc *pcb = (struct rfcomm_dlc *) so->so_pcb; struct mbuf *m; int error = 0; #ifdef notyet /* XXX */ DPRINTFN(2, "%s\n", prcorequests[sopt->sopt_dir]); #endif if (pcb == NULL) { error = EINVAL; goto out; } if (sopt->sopt_level != BTPROTO_RFCOMM) { error = ENOPROTOOPT; goto out; } switch(sopt->sopt_dir) { case PRCO_GETOPT: m = m_get(M_WAITOK, MT_DATA); crit_enter(); m->m_len = rfcomm_getopt(pcb, sopt->sopt_name, mtod(m, void *)); crit_exit(); if (m->m_len == 0) { m_freem(m); m = NULL; error = ENOPROTOOPT; } soopt_from_kbuf(sopt, mtod(m, void *), m->m_len); break; case PRCO_SETOPT: error = rfcomm_setopt2(pcb, sopt->sopt_name, so, sopt); break; default: error = ENOPROTOOPT; break; } out: lwkt_replymsg(&msg->ctloutput.base.lmsg, error); }
/* * Schedule a control block timeout * * Place the supplied timer control block on the timer queue. The * function (func) will be called in 't' timer ticks with the * control block address as its only argument. There are ATM_HZ * timer ticks per second. The ticks value stored in each block is * a delta of the number of ticks from the previous block in the queue. * Thus, for each tick interval, only the first block in the queue * needs to have its tick value decremented. * * Arguments: * tip pointer to timer control block * t number of timer ticks until expiration * func pointer to function to call at expiration * * Returns: * none * */ void atm_timeout(struct atm_time *tip, int t, void (*func)(struct atm_time *)) { struct atm_time *tip1, *tip2; /* * Check for double queueing error */ if (tip->ti_flag & TIF_QUEUED) panic("atm_timeout: double queueing"); /* * Make sure we delay at least a little bit */ if (t <= 0) t = 1; /* * Find out where we belong on the queue */ crit_enter(); for (tip1 = NULL, tip2 = atm_timeq; tip2 && (tip2->ti_ticks <= t); tip1 = tip2, tip2 = tip1->ti_next) { t -= tip2->ti_ticks; } /* * Place ourselves on queue and update timer deltas */ if (tip1 == NULL) atm_timeq = tip; else tip1->ti_next = tip; tip->ti_next = tip2; if (tip2) tip2->ti_ticks -= t; /* * Setup timer block */ tip->ti_flag |= TIF_QUEUED; tip->ti_ticks = t; tip->ti_func = func; crit_exit(); return; }
static __inline struct adv_ccb_info * adv_get_ccb_info(struct adv_softc *adv) { struct adv_ccb_info *cinfo; crit_enter(); if ((cinfo = SLIST_FIRST(&adv->free_ccb_infos)) != NULL) { SLIST_REMOVE_HEAD(&adv->free_ccb_infos, links); } else { cinfo = adv_alloc_ccb_info(adv); } crit_exit(); return (cinfo); }
void ubt_xmit_cmd(struct device *self, struct mbuf *m) { struct ubt_softc *sc = device_get_softc(self); KKASSERT(sc->sc_enabled); crit_enter(); IF_ENQUEUE(&sc->sc_cmd_queue, m); if (sc->sc_cmd_busy == 0) ubt_xmit_cmd_start(sc); crit_exit(); }
/* a mouse button is pressed, start cut operation */ static void mouse_cut_start(scr_stat *scp) { int i; int j; if (scp->status & MOUSE_VISIBLE) { i = scp->mouse_cut_start; j = scp->mouse_cut_end; sc_remove_all_cutmarkings(scp->sc); if (scp->mouse_pos == i && i == j) { cut_buffer[0] = '\0'; } else if (skip_spc_right(scp, scp->mouse_pos) >= scp->xsize) { /* if the pointer is on trailing blank chars, mark towards eol */ i = skip_spc_left(scp, scp->mouse_pos) + 1; crit_enter(); scp->mouse_cut_start = (scp->mouse_pos / scp->xsize) * scp->xsize + i; scp->mouse_cut_end = (scp->mouse_pos / scp->xsize + 1) * scp->xsize - 1; crit_exit(); cut_buffer[0] = '\r'; cut_buffer[1] = '\0'; scp->status |= MOUSE_CUTTING; } else { crit_enter(); scp->mouse_cut_start = scp->mouse_pos; scp->mouse_cut_end = scp->mouse_cut_start; crit_exit(); cut_buffer[0] = sc_vtb_getc(&scp->vtb, scp->mouse_cut_start); cut_buffer[1] = '\0'; scp->status |= MOUSE_CUTTING; } mark_all(scp); /* this is probably overkill XXX */ } }
static void i4b_capi_bch_stat(int unit, int chan, bchan_statistics_t *bsp) { capi_softc_t *sc = capi_sc[unit]; crit_enter(); bsp->outbytes = sc->sc_bchan[chan].txcount; bsp->inbytes = sc->sc_bchan[chan].rxcount; sc->sc_bchan[chan].txcount = 0; sc->sc_bchan[chan].rxcount = 0; crit_exit(); }
/* finish using this keyboard */ static int ukbd_term(keyboard_t *kbd) { ukbd_state_t *state; int error; crit_enter(); state = (ukbd_state_t *)kbd->kb_data; DPRINTF(("ukbd_term: ks_ifstate=0x%x\n", state->ks_ifstate)); callout_stop(&state->ks_timeout); if (state->ks_ifstate & INTRENABLED) ukbd_enable_intr(kbd, FALSE, NULL); if (state->ks_ifstate & INTRENABLED) { crit_exit(); DPRINTF(("ukbd_term: INTRENABLED!\n")); return ENXIO; } error = kbd_unregister(kbd); DPRINTF(("ukbd_term: kbd_unregister() %d\n", error)); if (error == 0) { kbd->kb_flags = 0; if (kbd != &default_kbd) { kfree(kbd->kb_keymap, M_DEVBUF); kfree(kbd->kb_accentmap, M_DEVBUF); kfree(kbd->kb_fkeytab, M_DEVBUF); kfree(state, M_DEVBUF); kfree(kbd, M_DEVBUF); } } crit_exit(); return error; }
NSAPI_PUBLIC unsigned int cache_get_use_count(cache_t *cache, cache_entry_t *entry) { unsigned int result; #ifdef IRIX result = entry->access_count; #else crit_enter(cache->lock); result = entry->access_count; crit_exit(cache->lock); #endif return result; }
void in6_rtqdrain(void) { struct radix_node_head *rnh = rt_tables[mycpuid][AF_INET6]; struct rtqk_arg arg; arg.found = arg.killed = 0; arg.rnh = rnh; arg.nextstop = 0; arg.draining = 1; arg.updating = 0; crit_enter(); rnh->rnh_walktree(rnh, in6_rtqkill, &arg); crit_exit(); }
/*---------------------------------------------------------------------------* * fill statistics struct *---------------------------------------------------------------------------*/ static void isic_bchannel_stat(int unit, int h_chan, bchan_statistics_t *bsp) { struct l1_softc *sc = &l1_sc[unit]; l1_bchan_state_t *chan = &sc->sc_chan[h_chan]; crit_enter(); bsp->outbytes = chan->txcount; bsp->inbytes = chan->rxcount; chan->txcount = 0; chan->rxcount = 0; crit_exit(); }
/*************************************************************************** Function: sReadAiopNumChan Purpose: Read the number of channels available in an AIOP directly from an AIOP. Call: sReadAiopNumChan(CtlP, aiop) CONTROLLER_T *CtlP; Ptr to controller structure int aiop: AIOP index Return: int: The number of channels available Comments: The number of channels is determined by write/reads from identical offsets within the SRAM address spaces for channels 0 and 4. If the channel 4 space is mirrored to channel 0 it is a 4 channel AIOP, otherwise it is an 8 channel. Warnings: No context switches are allowed while executing this function. */ int sReadAiopNumChan(CONTROLLER_T *CtlP, int aiop) { Word_t x, y; crit_enter(); rp_writeaiop4(CtlP, aiop, _INDX_ADDR,0x12340000L); /* write to chan 0 SRAM */ rp_writeaiop2(CtlP, aiop, _INDX_ADDR,0); /* read from SRAM, chan 0 */ x = rp_readaiop2(CtlP, aiop, _INDX_DATA); rp_writeaiop2(CtlP, aiop, _INDX_ADDR,0x4000); /* read from SRAM, chan 4 */ y = rp_readaiop2(CtlP, aiop, _INDX_DATA); crit_exit(); if(x != y) /* if different must be 8 chan */ return(8); else return(4); }
void WA_lock(WA_recursiveLock _lock) #endif { NSAPIThreadRecursiveLock *lock = (NSAPIThreadRecursiveLock *)_lock; SYS_THREAD self = systhread_current(); #ifdef EXTRA_DEBUGGING_LOGS if (_lock != logMutex) WOLog(WO_DBG, " locking %s from %s:%d", lock->name, file, line); #endif crit_enter(lock->crit); while (lock->lockingThread != self && lock->lockCount != 0) condvar_wait(lock->condvar); lock->lockingThread = self; lock->lockCount++; crit_exit(lock->crit); }
/*---------------------------------------------------------------------------* * return B-channel statistics *---------------------------------------------------------------------------*/ static void iwic_bchannel_stat(int unit, int chan_no, bchan_statistics_t *bsp) { struct iwic_softc *sc = iwic_find_sc(unit); struct iwic_bchan *bchan = &sc->sc_bchan[chan_no]; crit_enter(); bsp->outbytes = bchan->txcount; bsp->inbytes = bchan->rxcount; bchan->txcount = 0; bchan->rxcount = 0; crit_exit(); }