static int ukbd_getc(ukbd_state_t *state, int wait) { int c; if (state->ks_polling) { DPRINTFN(1,("ukbd_getc: polling\n")); crit_enter(); while (state->ks_inputs <= 0) { usbd_dopoll(state->ks_iface); if (wait == 0) break; } crit_exit(); } crit_enter(); if (state->ks_inputs <= 0) { c = -1; } else { c = state->ks_input[state->ks_inputhead]; --state->ks_inputs; state->ks_inputhead = (state->ks_inputhead + 1)%INPUTBUFSIZE; } crit_exit(); return c; }
static void cdc_tx_done(void *buf, ssize_t len, void *data) { struct cdc_ctx *ctx = data; if (len <= 0) goto queue; crit_enter(); ctx->out_sent += len; memcpy(ctx->outbuf, &ctx->outbuf[ctx->out_sent], ctx->out_pos - ctx->out_sent); ctx->out_pos -= ctx->out_sent; ctx->out_sent = 0; ctx->out_queued = 0; crit_exit(); if (ctx->data_sent_cb != NULL) ctx->data_sent_cb(sizeof(ctx->outbuf) - ctx->out_pos); queue: crit_enter(); if (ctx->out_pos > 0 && !ctx->out_queued) { ctx->out_queued = 1; usb_tx(ctx->tx_pipe, ctx->outbuf, ctx->out_pos, CDC_TX_SIZE, cdc_tx_done, ctx); } crit_exit(); }
static int fw_write (struct dev_write_args *ap) { cdev_t dev = ap->a_head.a_dev; struct uio *uio = ap->a_uio; int err = 0; struct firewire_softc *sc; int unit = DEV2UNIT(dev); int slept = 0; struct fw_pkt *fp; struct fw_xferq *it; if (DEV_FWMEM(dev)) return physwrite(ap); sc = devclass_get_softc(firewire_devclass, unit); it = ((struct fw_drv1 *)dev->si_drv1)->it; if (it == NULL || it->buf == NULL) return (EIO); isoloop: if (it->stproc == NULL) { it->stproc = STAILQ_FIRST(&it->stfree); if (it->stproc != NULL) { crit_enter(); STAILQ_REMOVE_HEAD(&it->stfree, link); crit_exit(); it->queued = 0; } else if (slept == 0) { slept = 1; err = sc->fc->itx_enable(sc->fc, it->dmach); if (err) return err; err = tsleep(it, FWPRI, "fw_write", hz); if (err) return err; goto isoloop; } else { err = EIO; return err; } } fp = (struct fw_pkt *)fwdma_v_addr(it->buf, it->stproc->poffset + it->queued); err = uiomove((caddr_t)fp, sizeof(struct fw_isohdr), uio); err = uiomove((caddr_t)fp->mode.stream.payload, fp->mode.stream.len, uio); it->queued ++; if (it->queued >= it->bnpacket) { crit_enter(); STAILQ_INSERT_TAIL(&it->stvalid, it->stproc, link); crit_exit(); it->stproc = NULL; err = sc->fc->itx_enable(sc->fc, it->dmach); } if (uio->uio_resid >= sizeof(struct fw_isohdr)) { slept = 0; goto isoloop; } return err; }
/* * Create a co-processor thread for a virtual kernel. This thread operates * outside of the virtual kernel cpu abstraction and may only make direct * cothread and libc calls. */ cothread_t cothread_create(void (*thr_func)(cothread_t cotd), void (*thr_intr)(cothread_t cotd), void *arg, const char *name) { cothread_t cotd; cotd = kmalloc(sizeof(*cotd), M_DEVBUF, M_WAITOK|M_ZERO); cotd->thr_intr = thr_intr; cotd->thr_func = thr_func; cotd->arg = arg; crit_enter(); pthread_mutex_init(&cotd->mutex, NULL); pthread_cond_init(&cotd->cond, NULL); crit_exit(); cotd->pintr = pthread_self(); cotd->intr_id = register_int(1, (void *)thr_intr, cotd, name, NULL, INTR_MPSAFE, 0); /* * The vkernel's cpu_disable_intr() masks signals. We don't want * our coprocessor thread taking any unix signals :-) */ crit_enter(); cpu_mask_all_signals(); pthread_create(&cotd->pthr, NULL, (void *)cothread_thread, cotd); cpu_unmask_all_signals(); crit_exit(); return(cotd); }
NSAPI_PUBLIC void cache_destroy(void *cache_ptr) { cache_t *cache = (cache_t *)cache_ptr; cache_t *search, *last; cache_entry_t *ptr; SOLARIS_PROBE(cache_destroy_start, "cache"); #ifdef IRIX NS_ASSERT(!cache->fast_mode); #endif NS_ASSERT(cache_crit); NS_ASSERT(cache_ptr); #ifdef CACHE_DEBUG NS_ASSERT(cache->magic == CACHE_MAGIC); #endif crit_enter(cache_crit); crit_enter(cache->lock); ptr = cache->lru_head; while(ptr) { /* Caller MUST bump the access_count before calling delete * We can do this since we hold the cache lock. */ cache_use_increment(cache, ptr); cache_delete(cache, ptr, 0); ptr = cache->lru_head; } PERM_FREE(cache->table); cache->max_size = 0; cache->hash_size = 0; for ( last = NULL, search = cache_list; search; last = search, search = search->next) if (search == cache) break; if (search) { if (last) last->next = search->next; else cache_list = search->next; } else { ereport(LOG_WARN, XP_GetAdminStr(DBT_cacheDestroyCacheTablesAppearCor_)); } crit_exit(cache_crit); crit_exit(cache->lock); crit_terminate(cache->lock); PERM_FREE(cache); SOLARIS_PROBE(cache_destroy_end, "cache"); }
/* * lwkt_thread_replyport() - Backend to lwkt_replymsg() * * Called with the reply port as an argument but in the context of the * original target port. Completion must occur on the target port's * cpu. * * The critical section protects us from IPIs on the this CPU. */ static void lwkt_thread_replyport(lwkt_port_t port, lwkt_msg_t msg) { int flags; KKASSERT((msg->ms_flags & (MSGF_DONE|MSGF_QUEUED|MSGF_INTRANSIT)) == 0); if (msg->ms_flags & MSGF_SYNC) { /* * If a synchronous completion has been requested, just wakeup * the message without bothering to queue it to the target port. * * Assume the target thread is non-preemptive, so no critical * section is required. */ if (port->mpu_td->td_gd == mycpu) { crit_enter(); flags = msg->ms_flags; cpu_sfence(); msg->ms_flags |= MSGF_DONE | MSGF_REPLY; if (port->mp_flags & MSGPORTF_WAITING) _lwkt_schedule_msg(port->mpu_td, flags); crit_exit(); } else { #ifdef INVARIANTS atomic_set_int(&msg->ms_flags, MSGF_INTRANSIT); #endif atomic_set_int(&msg->ms_flags, MSGF_REPLY); lwkt_send_ipiq(port->mpu_td->td_gd, (ipifunc1_t)lwkt_thread_replyport_remote, msg); } } else { /* * If an asynchronous completion has been requested the message * must be queued to the reply port. * * A critical section is required to interlock the port queue. */ if (port->mpu_td->td_gd == mycpu) { crit_enter(); _lwkt_enqueue_reply(port, msg); if (port->mp_flags & MSGPORTF_WAITING) _lwkt_schedule_msg(port->mpu_td, msg->ms_flags); crit_exit(); } else { #ifdef INVARIANTS atomic_set_int(&msg->ms_flags, MSGF_INTRANSIT); #endif atomic_set_int(&msg->ms_flags, MSGF_REPLY); lwkt_send_ipiq(port->mpu_td->td_gd, (ipifunc1_t)lwkt_thread_replyport_remote, msg); } } }
static void in6_rtqtimo(void *rock) { struct radix_node_head *rnh = rock; struct rtqk_arg arg; struct timeval atv; static time_t last_adjusted_timeout = 0; arg.found = arg.killed = 0; arg.rnh = rnh; arg.nextstop = time_second + rtq_timeout; arg.draining = arg.updating = 0; crit_enter(); rnh->rnh_walktree(rnh, in6_rtqkill, &arg); crit_exit(); /* * Attempt to be somewhat dynamic about this: * If there are ``too many'' routes sitting around taking up space, * then crank down the timeout, and see if we can't make some more * go away. However, we make sure that we will never adjust more * than once in rtq_timeout seconds, to keep from cranking down too * hard. */ if ((arg.found - arg.killed > rtq_toomany) && (time_second - last_adjusted_timeout >= rtq_timeout) && rtq_reallyold > rtq_minreallyold) { rtq_reallyold = 2*rtq_reallyold / 3; if (rtq_reallyold < rtq_minreallyold) { rtq_reallyold = rtq_minreallyold; } last_adjusted_timeout = time_second; #ifdef DIAGNOSTIC log(LOG_DEBUG, "in6_rtqtimo: adjusted rtq_reallyold to %d", rtq_reallyold); #endif arg.found = arg.killed = 0; arg.updating = 1; crit_enter(); rnh->rnh_walktree(rnh, in6_rtqkill, &arg); crit_exit(); } atv.tv_usec = 0; atv.tv_sec = arg.nextstop; callout_reset(&in6_rtqtimo_ch[mycpuid], tvtohz_high(&atv), in6_rtqtimo, rock); }
/* this is supposed to mark a buffer dirty on ready for delayed writing */ void mark_buffer_dirty(struct buf *bh) { crit_enter(); bh->b_flags |= B_DIRTY; crit_exit(); }
static int lwkt_thread_putport(lwkt_port_t port, lwkt_msg_t msg) { KKASSERT((msg->ms_flags & (MSGF_DONE | MSGF_REPLY)) == 0); msg->ms_target_port = port; if (port->mpu_td->td_gd == mycpu) { crit_enter(); _lwkt_pushmsg(port, msg); if (port->mp_flags & MSGPORTF_WAITING) _lwkt_schedule_msg(port->mpu_td, msg->ms_flags); crit_exit(); } else { #ifdef INVARIANTS /* * Cleanup. * * An atomic op is needed on ms_flags vs originator. Also * note that the originator might be using a different type * of msgport. */ atomic_set_int(&msg->ms_flags, MSGF_INTRANSIT); #endif lwkt_send_ipiq(port->mpu_td->td_gd, (ipifunc1_t)lwkt_thread_putport_remote, msg); } return (EASYNC); }
/* * Exchange the two most recent tokens on the tokref stack. This allows * you to release a token out of order. * * We have to be careful about the case where the top two tokens are * the same token. In this case tok->t_ref will point to the deeper * ref and must remain pointing to the deeper ref. If we were to swap * it the first release would clear the token even though a second * ref is still present. * * Only exclusively held tokens contain a reference to the tokref which * has to be flipped along with the swap. */ void lwkt_token_swap(void) { lwkt_tokref_t ref1, ref2; lwkt_token_t tok1, tok2; long count1, count2; thread_t td = curthread; crit_enter(); ref1 = td->td_toks_stop - 1; ref2 = td->td_toks_stop - 2; KKASSERT(ref1 >= &td->td_toks_base); KKASSERT(ref2 >= &td->td_toks_base); tok1 = ref1->tr_tok; tok2 = ref2->tr_tok; count1 = ref1->tr_count; count2 = ref2->tr_count; if (tok1 != tok2) { ref1->tr_tok = tok2; ref1->tr_count = count2; ref2->tr_tok = tok1; ref2->tr_count = count1; if (tok1->t_ref == ref1) tok1->t_ref = ref2; if (tok2->t_ref == ref2) tok2->t_ref = ref1; } crit_exit(); }
static void uhub_child_detached(device_t self, device_t child) { struct uhub_softc *sc = device_get_softc(self); usbd_device_handle devhub = sc->sc_hub; usbd_device_handle dev = NULL; int nports, port, i = 0; crit_enter(); nports = devhub->hub->hubdesc.bNbrPorts; for (port = 0; port < nports; port++) { dev = devhub->hub->ports[port].device; if (dev && dev->subdevs) { for (i = 0; dev->subdevs[i]; ++i) { if (dev->subdevs[i] == child) goto found_dev; } } } crit_exit(); return; found_dev: #if 0 device_printf(dev->subdevs[i], "at %s", device_get_nameunit(self)); if (port != 0) kprintf(" port %d", port); kprintf(" (addr %d) disconnected\n", dev->address); #endif kfree(device_get_ivars(dev->subdevs[i]), M_USB); dev->subdevs[i] = NULL; crit_exit(); }
static void InterruptWrapper(void *arg) { crit_enter(); InterruptHandler(arg); crit_exit(); }
static int acpi_cpu_global_cx_lowest_sysctl(SYSCTL_HANDLER_ARGS) { struct acpi_cpu_softc *sc; char state[8]; int val, error, i; ksnprintf(state, sizeof(state), "C%d", cpu_cx_lowest + 1); error = sysctl_handle_string(oidp, state, sizeof(state), req); if (error != 0 || req->newptr == NULL) return (error); if (strlen(state) < 2 || toupper(state[0]) != 'C') return (EINVAL); val = (int) strtol(state + 1, NULL, 10) - 1; if (val < 0 || val > cpu_cx_count - 1) return (EINVAL); cpu_cx_lowest = val; /* Update the new lowest useable Cx state for all CPUs. */ crit_enter(); for (i = 0; i < cpu_ndevices; i++) { sc = device_get_softc(cpu_devices[i]); error = acpi_cpu_set_cx_lowest(sc, val); if (error) { KKASSERT(i == 0); break; } } crit_exit(); return error; }
/* copy a line under the mouse pointer */ static void mouse_cut_line(scr_stat *scp) { int i; int j; if (scp->status & MOUSE_VISIBLE) { /* remove the current cut mark */ crit_enter(); if (scp->mouse_cut_start <= scp->mouse_cut_end) { mark_for_update(scp, scp->mouse_cut_start); mark_for_update(scp, scp->mouse_cut_end); } else if (scp->mouse_cut_end >= 0) { mark_for_update(scp, scp->mouse_cut_end); mark_for_update(scp, scp->mouse_cut_start); } /* mark the entire line */ scp->mouse_cut_start = (scp->mouse_pos / scp->xsize) * scp->xsize; scp->mouse_cut_end = scp->mouse_cut_start + scp->xsize - 1; mark_for_update(scp, scp->mouse_cut_start); mark_for_update(scp, scp->mouse_cut_end); crit_exit(); /* copy the line into the cut buffer */ for (i = 0, j = scp->mouse_cut_start; j <= scp->mouse_cut_end; ++j) cut_buffer[i++] = sc_vtb_getc(&scp->vtb, j); cut_buffer[i++] = '\r'; cut_buffer[i] = '\0'; scp->status |= MOUSE_CUTTING; } }
/* This will need only if native IP used, or (unlikely) NCP will be * implemented on the socket level */ static int ncp_soconnect(struct socket *so,struct sockaddr *target, struct thread *td) { int error; error = soconnect(so, (struct sockaddr*)target, td); if (error) return error; /* * Wait for the connection to complete. Cribbed from the * connect system call but with the wait timing out so * that interruptible mounts don't hang here for a long time. */ error = EIO; crit_enter(); while ((so->so_state & SS_ISCONNECTING) && so->so_error == 0) { tsleep((caddr_t)&so->so_timeo, 0, "ncpcon", 2 * hz); if ((so->so_state & SS_ISCONNECTING) && so->so_error == 0 /*&& rep &&*/) { soclrstate(so, SS_ISCONNECTING); crit_exit(); goto bad; } } if (so->so_error) { error = so->so_error; so->so_error = 0; crit_exit(); goto bad; } crit_exit(); error=0; bad: return error; }
/*************************************************************************** Function: sFlushRxFIFO Purpose: Flush the Rx FIFO Call: sFlushRxFIFO(ChP) CHANNEL_T *ChP; Ptr to channel structure Return: void Comments: To prevent data from being enqueued or dequeued in the Tx FIFO while it is being flushed the receive processor is stopped and the transmitter is disabled. After these operations a 4 uS delay is done before clearing the pointers to allow the receive processor to stop. These items are handled inside this function. Warnings: No context switches are allowed while executing this function. */ void sFlushRxFIFO(CHANNEL_T *ChP) { int i; Byte_t Ch; /* channel number within AIOP */ int RxFIFOEnabled; /* TRUE if Rx FIFO enabled */ if(sGetRxCnt(ChP) == 0) /* Rx FIFO empty */ return; /* don't need to flush */ crit_enter(); RxFIFOEnabled = FALSE; if(ChP->R[0x32] == 0x08) /* Rx FIFO is enabled */ { RxFIFOEnabled = TRUE; sDisRxFIFO(ChP); /* disable it */ for(i=0; i < 2000/200; i++) /* delay 2 uS to allow proc to disable FIFO*/ rp_readch1(ChP,_INT_CHAN); /* depends on bus i/o timing */ } sGetChanStatus(ChP); /* clear any pending Rx errors in chan stat */ Ch = (Byte_t)sGetChanNum(ChP); rp_writech1(ChP,_CMD_REG,Ch | RESRXFCNT); /* apply reset Rx FIFO count */ rp_writech1(ChP,_CMD_REG,Ch); /* remove reset Rx FIFO count */ rp_writech2(ChP,_INDX_ADDR,ChP->RxFIFOPtrs); /* clear Rx out ptr */ rp_writech2(ChP,_INDX_DATA,0); rp_writech2(ChP,_INDX_ADDR,ChP->RxFIFOPtrs + 2); /* clear Rx in ptr */ rp_writech2(ChP,_INDX_DATA,0); if(RxFIFOEnabled) sEnRxFIFO(ChP); /* enable Rx FIFO */ crit_exit(); }
static int stghw_start_selection(struct stg_softc *sc, struct slccb *cb) { bus_space_tag_t iot = sc->sc_iot; bus_space_handle_t ioh = sc->sc_ioh; struct targ_info *ti = cb->ti; u_int8_t stat; sc->sc_tmaxcnt = cb->ccb_tcmax * 1000 * 1000; sc->sc_dataout_timeout = 0; sc->sc_ubf_timeout = 0; stghw_bcr_write_1(sc, BCTL_BUSFREE); bus_space_write_1(iot, ioh, tmc_ictl, sc->sc_icinit); crit_enter(); stat = bus_space_read_1(iot, ioh, tmc_astat); if ((stat & ASTAT_INT) != 0) { crit_exit(); return SCSI_LOW_START_FAIL; } bus_space_write_1(iot, ioh, tmc_scsiid, sc->sc_idbit); bus_space_write_1(iot, ioh, tmc_fctl, sc->sc_fcRinit | FCTL_ARBIT); crit_exit(); SCSI_LOW_SETUP_PHASE(ti, PH_ARBSTART); return SCSI_LOW_START_OK; }
NSAPI_PUBLIC void cache_lock(cache_t *cache) { SOLARIS_PROBE(cache_lock_start, "cache"); crit_enter(cache->lock); SOLARIS_PROBE(cache_lock_end, "cache"); }
/*************************************************************************** Function: sFlushTxFIFO Purpose: Flush the Tx FIFO Call: sFlushTxFIFO(ChP) CHANNEL_T *ChP; Ptr to channel structure Return: void Comments: To prevent data from being enqueued or dequeued in the Tx FIFO while it is being flushed the receive processor is stopped and the transmitter is disabled. After these operations a 4 uS delay is done before clearing the pointers to allow the receive processor to stop. These items are handled inside this function. Warnings: No context switches are allowed while executing this function. */ void sFlushTxFIFO(CHANNEL_T *ChP) { int i; Byte_t Ch; /* channel number within AIOP */ int TxEnabled; /* TRUE if transmitter enabled */ crit_enter(); if(sGetTxCnt(ChP) == 0) { /* Tx FIFO empty */ crit_exit(); return; /* don't need to flush */ } TxEnabled = FALSE; if(ChP->TxControl[3] & TX_ENABLE) { TxEnabled = TRUE; sDisTransmit(ChP); /* disable transmitter */ } sStopRxProcessor(ChP); /* stop Rx processor */ for(i = 0; i < 4000/200; i++) /* delay 4 uS to allow proc to stop */ rp_readch1(ChP,_INT_CHAN); /* depends on bus i/o timing */ Ch = (Byte_t)sGetChanNum(ChP); rp_writech1(ChP,_CMD_REG,Ch | RESTXFCNT); /* apply reset Tx FIFO count */ rp_writech1(ChP,_CMD_REG,Ch); /* remove reset Tx FIFO count */ rp_writech2(ChP,_INDX_ADDR,ChP->TxFIFOPtrs); /* clear Tx in/out ptrs */ rp_writech2(ChP,_INDX_DATA,0); if(TxEnabled) sEnTransmit(ChP); /* enable transmitter */ sStartRxProcessor(ChP); /* restart Rx processor */ crit_exit(); }
/*************************************************************************** Function: sWriteTxPrioByte Purpose: Write a byte of priority transmit data to a channel Call: sWriteTxPrioByte(ChP,Data) CHANNEL_T *ChP; Ptr to channel structure Byte_t Data; The transmit data byte Return: int: 1 if the bytes is successfully written, otherwise 0. Comments: The priority byte is transmitted before any data in the Tx FIFO. Warnings: No context switches are allowed while executing this function. */ int sWriteTxPrioByte(CHANNEL_T *ChP, Byte_t Data) { Byte_t DWBuf[4]; /* buffer for double word writes */ Word_t *WordPtr; /* must be far because Win SS != DS */ crit_enter(); if(sGetTxCnt(ChP) > 1) /* write it to Tx priority buffer */ { rp_writech2(ChP,_INDX_ADDR,ChP->TxPrioCnt); /* get priority buffer status */ if(rp_readch1(ChP,_INDX_DATA) & PRI_PEND) {/* priority buffer busy */ crit_exit(); return(0); /* nothing sent */ } WordPtr = (Word_t *)(&DWBuf[0]); *WordPtr = ChP->TxPrioBuf; /* data byte address */ DWBuf[2] = Data; /* data byte value */ rp_writech4(ChP,_INDX_ADDR,*((DWord_t *)(&DWBuf[0]))); /* write it out */ *WordPtr = ChP->TxPrioCnt; /* Tx priority count address */ DWBuf[2] = PRI_PEND + 1; /* indicate 1 byte pending */ DWBuf[3] = 0; /* priority buffer pointer */ rp_writech4(ChP,_INDX_ADDR,*((DWord_t *)(&DWBuf[0]))); /* write it out */ } else /* write it to Tx FIFO */ { sWriteTxByte(ChP,sGetTxRxDataIO(ChP),Data); } crit_exit(); return(1); /* 1 byte sent */ }
/* * SPANS ARP SVC activation notification * * This function is called when a previously opened SVC has successfully * been connected. * * Arguments: * ivp pointer to SVC's IPVCC control block * * Returns: * 0 activation processing successful * errno activation failed - reason indicated * */ int spansarp_svcactive(struct ipvcc *ivp) { struct spansarp *sap; crit_enter(); /* * Find an entry for the destination address */ SPANSARP_LOOKUP(ivp->iv_dst.s_addr, sap); if (sap) { /* * IP is finished with entry, so remove IP VCC from chain */ UNLINK(ivp, struct ipvcc, sap->sa_ivp, iv_arpnext); ivp->iv_arpent = NULL; /* * This seems like a reasonable reason to refresh the entry */ sap->sa_reftime = 0; } crit_exit(); return (0); }
/*---------------------------------------------------------------------------* * send MSG_CONNECT_ACTIVE_IND message to userland *---------------------------------------------------------------------------*/ void i4b_l4_connect_active_ind(call_desc_t *cd) { struct mbuf *m; crit_enter(); cd->last_active_time = cd->connect_time = SECOND; NDBGL4(L4_TIMO, "last_active/connect_time=%ld", (long)cd->connect_time); i4b_link_bchandrvr(cd); (*cd->dlt->line_connected)(cd->driver_unit, (void *)cd); i4b_l4_setup_timeout(cd); crit_exit(); if((m = i4b_Dgetmbuf(sizeof(msg_connect_active_ind_t))) != NULL) { msg_connect_active_ind_t *mp = (msg_connect_active_ind_t *)m->m_data; mp->header.type = MSG_CONNECT_ACTIVE_IND; mp->header.cdid = cd->cdid; mp->controller = cd->controller; mp->channel = cd->channelid; if(cd->datetime[0] != '\0') strcpy(mp->datetime, cd->datetime); else mp->datetime[0] = '\0'; i4bputqueue(m); } }
/* cache_service_debug() * To force the creation of good debugging tools, all known caches must * have a debugging routine. The cache_service_debug() routine is a common * routine that can be inserted via NSAPI to view the status of all * known caches. This is a hidden entry point which can be enabled when * debugging is needed. * */ NSAPI_PUBLIC int cache_service_debug(pblock *pb, Session *sn, Request *rq) { cache_t *ptr; char buf[MAX_DEBUG_LINE]; int len; NS_ASSERT(cache_crit); param_free(pblock_removekey(pb_key_content_type, rq->srvhdrs)); pblock_nvinsert("content-type", "text/html", rq->srvhdrs); len = util_sprintf(buf, XP_GetClientStr(DBT_http10200OkNcontentTypeTextHtmlN_)); net_write(sn->csd, buf, len); len = util_sprintf(buf, XP_GetClientStr(DBT_H2NetscapeCacheStatusReportH2N_)); net_write(sn->csd, buf, len); crit_enter(cache_crit); if (cache_list) { len = util_sprintf(buf, "<HR>"); net_write(sn->csd, buf, len); for (ptr = cache_list; ptr; ptr = ptr->next) { if (ptr->virtual_fn->debug_fn) if ( ptr->virtual_fn->debug_fn(pb, sn, rq) == REQ_ABORTED ) return REQ_ABORTED; } } else { len = util_sprintf(buf, XP_GetClientStr(DBT_noCachesOnSystemP_)); net_write(sn->csd, buf, len); } crit_exit(cache_crit); return REQ_PROCEED; }
static int udp6_getcred(SYSCTL_HANDLER_ARGS) { struct sockaddr_in6 addrs[2]; struct inpcb *inp; int error; error = priv_check(req->td, PRIV_ROOT); if (error) return (error); if (req->newlen != sizeof(addrs)) return (EINVAL); if (req->oldlen != sizeof(struct ucred)) return (EINVAL); error = SYSCTL_IN(req, addrs, sizeof(addrs)); if (error) return (error); crit_enter(); inp = in6_pcblookup_hash(&udbinfo[0], &addrs[1].sin6_addr, addrs[1].sin6_port, &addrs[0].sin6_addr, addrs[0].sin6_port, 1, NULL); if (!inp || !inp->inp_socket) { error = ENOENT; goto out; } error = SYSCTL_OUT(req, inp->inp_socket->so_cred, sizeof(struct ucred)); out: crit_exit(); return (error); }
/* check if data is waiting */ static int ukbd_check(keyboard_t *kbd) { ukbd_state_t *state; if (!KBD_IS_ACTIVE(kbd)) { return FALSE; } state = (ukbd_state_t *)kbd->kb_data; if (state->ks_polling) { crit_enter(); usbd_dopoll(state->ks_iface); crit_exit(); } #ifdef UKBD_EMULATE_ATSCANCODE if (((ukbd_state_t *)kbd->kb_data)->ks_buffered_char[0]) { return TRUE; } #endif if (((ukbd_state_t *)kbd->kb_data)->ks_inputs > 0) { return TRUE; } return FALSE; }
static int logo_saver(video_adapter_t *adp, int blank) { int i; if (blank) { /* switch to graphics mode */ if (blanked <= 0) { crit_enter(); set_video_mode(adp, scrmode); load_palette(adp, logo_pal); #if 0 /* XXX conflict */ set_border(adp, 0); #endif blanked++; vid = (u_char *)adp->va_window; banksize = adp->va_window_size; bpsl = adp->va_line_width; crit_exit(); for (i = 0; i < bpsl*scrh; i += banksize) { set_origin(adp, i); bzero(vid, banksize); } } logo_update(adp); } else { blanked = 0; } return 0; }
/* * Called on a per-cpu basis */ void initclocks_pcpu(void) { struct globaldata *gd = mycpu; crit_enter(); if (gd->gd_cpuid == 0) { gd->gd_time_seconds = 1; gd->gd_cpuclock_base = sys_cputimer->count(); } else { /* XXX */ gd->gd_time_seconds = globaldata_find(0)->gd_time_seconds; gd->gd_cpuclock_base = globaldata_find(0)->gd_cpuclock_base; } systimer_intr_enable(); #ifdef IFPOLL_ENABLE ifpoll_init_pcpu(gd->gd_cpuid); #endif /* * Use a non-queued periodic systimer to prevent multiple ticks from * building up if the sysclock jumps forward (8254 gets reset). The * sysclock will never jump backwards. Our time sync is based on * the actual sysclock, not the ticks count. */ systimer_init_periodic_nq(&gd->gd_hardclock, hardclock, NULL, hz); systimer_init_periodic_nq(&gd->gd_statclock, statclock, NULL, stathz); /* XXX correct the frequency for scheduler / estcpu tests */ systimer_init_periodic_nq(&gd->gd_schedclock, schedclock, NULL, ESTCPUFREQ); crit_exit(); }
void fputc(int c, FILE *f) { crit_enter(); f->outbuf[f->outbuf_head] = c; f->outbuf_head = (f->outbuf_head + 1) % sizeof(f->outbuf); /* number of buffered characters */ size_t buffered = f->outbuf_head - f->outbuf_tail; if (buffered < 0) buffered += sizeof(f->outbuf) - 1; /* flush on newline or if more than half full */ if (f->ops && (c == '\n' || buffered > sizeof(f->outbuf) / 2)) { size_t n; if (f->outbuf_head > f->outbuf_tail) { n = buffered; } else { n = sizeof(f->outbuf) - f->outbuf_tail; } size_t written = f->ops->write(&f->outbuf[f->outbuf_tail], n, f->ops_data); f->outbuf_tail = (f->outbuf_tail + written) % sizeof(f->outbuf); } crit_exit(); }
static __inline void schedpoll(struct pollctx *pctx) { crit_enter(); schedpoll_oncpu((netmsg_t)&pctx->poll_netmsg); crit_exit(); }
static void kcollect_thread(void *dummy) { uint32_t i; int n; for (;;) { lockmgr(&kcollect_lock, LK_EXCLUSIVE); i = kcollect_index % kcollect_samples; bzero(&kcollect_ary[i], sizeof(kcollect_ary[i])); crit_enter(); kcollect_ary[i].ticks = ticks; getmicrotime(&kcollect_ary[i].realtime); crit_exit(); for (n = 0; n < KCOLLECT_ENTRIES; ++n) { if (kcollect_callback[n]) { kcollect_ary[i].data[n] = kcollect_callback[n](n); } } cpu_sfence(); ++kcollect_index; lockmgr(&kcollect_lock, LK_RELEASE); tsleep(&dummy, 0, "sleep", hz * KCOLLECT_INTERVAL); } }