static void _rm_unlock_hard(struct thread *td,struct rm_priotracker *tracker) { if (td->td_owepreempt) { td->td_critnest++; critical_exit(); } if (!tracker->rmp_flags) return; mtx_lock_spin(&rm_spinlock); LIST_REMOVE(tracker, rmp_qentry); if (tracker->rmp_flags & RMPF_SIGNAL) { struct rmlock *rm; struct turnstile *ts; rm = tracker->rmp_rmlock; turnstile_chain_lock(&rm->lock_object); mtx_unlock_spin(&rm_spinlock); ts = turnstile_lookup(&rm->lock_object); turnstile_signal(ts, TS_EXCLUSIVE_QUEUE); turnstile_unpend(ts, TS_EXCLUSIVE_LOCK); turnstile_chain_unlock(&rm->lock_object); } else mtx_unlock_spin(&rm_spinlock); }
static int vm_page_zero_idle(void) { static int free_rover; vm_page_t m; mtx_lock_spin(&vm_page_queue_free_mtx); zero_state = 0; m = vm_pageq_find(PQ_FREE, free_rover, FALSE); if (m != NULL && (m->flags & PG_ZERO) == 0) { vm_pageq_remove_nowakeup(m); mtx_unlock_spin(&vm_page_queue_free_mtx); pmap_zero_page_idle(m); mtx_lock_spin(&vm_page_queue_free_mtx); m->flags |= PG_ZERO; vm_pageq_enqueue(PQ_FREE + m->pc, m); ++vm_page_zero_count; ++cnt_prezero; if (vm_page_zero_count >= ZIDLE_HI(cnt.v_free_count)) zero_state = 1; } free_rover = (free_rover + PQ_PRIME2) & PQ_L2_MASK; mtx_unlock_spin(&vm_page_queue_free_mtx); return (1); }
int mc146818_getsecs(device_t dev, int *secp) { struct mc146818_softc *sc; int sec, timeout; sc = device_get_softc(dev); timeout = 1000000; /* XXX how long should we wait? */ for (;;) { mtx_lock_spin(&sc->sc_mtx); if (!((*sc->sc_mcread)(dev, MC_REGA) & MC_REGA_UIP)) { sec = FROMREG((*sc->sc_mcread)(dev, MC_SEC)); mtx_unlock_spin(&sc->sc_mtx); break; } mtx_unlock_spin(&sc->sc_mtx); if (--timeout == 0) { device_printf(dev, "%s: timeout\n", __func__); return (EBUSY); } } #undef FROMREG *secp = sec; return (0); }
static int pic_attach_isrc(struct mips_pic_softc *sc, struct intr_irqsrc *isrc, u_int irq) { /* * 1. The link between ISRC and controller must be set atomically. * 2. Just do things only once in rare case when consumers * of shared interrupt came here at the same moment. */ mtx_lock_spin(&sc->mutex); if (sc->pic_irqs[irq] != NULL) { mtx_unlock_spin(&sc->mutex); return (sc->pic_irqs[irq] == isrc ? 0 : EEXIST); } sc->pic_irqs[irq] = isrc; isrc->isrc_data = irq; mtx_unlock_spin(&sc->mutex); if (irq < NSOFT_IRQS) intr_irq_set_name(isrc, "sint%u", irq); else if (irq < NREAL_IRQS) intr_irq_set_name(isrc, "int%u", irq - NSOFT_IRQS); else panic("Invalid irq %u", irq); return (0); }
/* * Peek at the full contents of a message buffer without marking any * data as read. `seqp' should point to an unsigned integer that * msgbuf_peekbytes() can use to retain state between calls so that * the whole message buffer can be read in multiple short reads. * To initialise this variable to the start of the message buffer, * call msgbuf_peekbytes() with a NULL `buf' parameter. * * Returns the number of characters that were placed in `buf'. */ int msgbuf_peekbytes(struct msgbuf *mbp, char *buf, int buflen, u_int *seqp) { u_int len, pos, wseq; mtx_lock_spin(&mbp->msg_lock); if (buf == NULL) { /* Just initialise *seqp. */ *seqp = MSGBUF_SEQNORM(mbp, mbp->msg_wseq - mbp->msg_size); mtx_unlock_spin(&mbp->msg_lock); return (0); } wseq = mbp->msg_wseq; len = MSGBUF_SEQSUB(mbp, wseq, *seqp); if (len == 0) { mtx_unlock_spin(&mbp->msg_lock); return (0); } if (len > mbp->msg_size) { *seqp = MSGBUF_SEQNORM(mbp, wseq - mbp->msg_size); len = mbp->msg_size; } pos = MSGBUF_SEQ_TO_POS(mbp, *seqp); len = min(len, mbp->msg_size - pos); len = min(len, (u_int)buflen); bcopy(&mbp->msg_ptr[MSGBUF_SEQ_TO_POS(mbp, *seqp)], buf, len); *seqp = MSGBUF_SEQNORM(mbp, *seqp + len); mtx_unlock_spin(&mbp->msg_lock); return (len); }
/* * Read and mark as read a number of characters from a message buffer. * Returns the number of characters that were placed in `buf'. */ int msgbuf_getbytes(struct msgbuf *mbp, char *buf, int buflen) { u_int len, pos, wseq; mtx_lock_spin(&mbp->msg_lock); wseq = mbp->msg_wseq; len = MSGBUF_SEQSUB(mbp, wseq, mbp->msg_rseq); if (len == 0) { mtx_unlock_spin(&mbp->msg_lock); return (0); } if (len > mbp->msg_size) { mbp->msg_rseq = MSGBUF_SEQNORM(mbp, wseq - mbp->msg_size); len = mbp->msg_size; } pos = MSGBUF_SEQ_TO_POS(mbp, mbp->msg_rseq); len = min(len, mbp->msg_size - pos); len = min(len, (u_int)buflen); bcopy(&mbp->msg_ptr[pos], buf, len); mbp->msg_rseq = MSGBUF_SEQNORM(mbp, mbp->msg_rseq + len); mtx_unlock_spin(&mbp->msg_lock); return (len); }
/** * @brief Read without advancing the read index. */ int hv_ring_buffer_peek( hv_vmbus_ring_buffer_info* in_ring_info, void* buffer, uint32_t buffer_len) { uint32_t bytesAvailToWrite; uint32_t bytesAvailToRead; uint32_t nextReadLocation = 0; mtx_lock_spin(&in_ring_info->ring_lock); get_ring_buffer_avail_bytes(in_ring_info, &bytesAvailToRead, &bytesAvailToWrite); /* * Make sure there is something to read */ if (bytesAvailToRead < buffer_len) { mtx_unlock_spin(&in_ring_info->ring_lock); return (EAGAIN); } /* * Convert to byte offset */ nextReadLocation = get_next_read_location(in_ring_info); nextReadLocation = copy_from_ring_buffer( in_ring_info, (char *)buffer, buffer_len, nextReadLocation); mtx_unlock_spin(&in_ring_info->ring_lock); return (0); }
/* * NOTE: * We assume (dlen + skip) == sizeof(channel packet). */ int vmbus_rxbr_read(struct vmbus_rxbr *rbr, void *data, int dlen, uint32_t skip) { uint32_t rindex, br_dsize = rbr->rxbr_dsize; KASSERT(dlen + skip > 0, ("invalid dlen %d, offset %u", dlen, skip)); mtx_lock_spin(&rbr->rxbr_lock); if (vmbus_rxbr_avail(rbr) < dlen + skip + sizeof(uint64_t)) { mtx_unlock_spin(&rbr->rxbr_lock); return (EAGAIN); } /* * Copy channel packet from RX bufring. */ rindex = VMBUS_BR_IDXINC(rbr->rxbr_rindex, skip, br_dsize); rindex = vmbus_rxbr_copyfrom(rbr, rindex, data, dlen); /* * Discard this channel packet's 64bits offset, which is useless to us. */ rindex = VMBUS_BR_IDXINC(rindex, sizeof(uint64_t), br_dsize); /* * Update the read index _after_ the channel packet is fetched. */ __compiler_membar(); rbr->rxbr_rindex = rindex; mtx_unlock_spin(&rbr->rxbr_lock); return (0); }
/* * Write scattered channel packet to TX bufring. * * The offset of this channel packet is written as a 64bits value * immediately after this channel packet. */ int vmbus_txbr_write(struct vmbus_txbr *tbr, const struct iovec iov[], int iovlen, boolean_t *need_sig) { uint32_t old_windex, windex, total; uint64_t save_windex; int i; total = 0; for (i = 0; i < iovlen; i++) total += iov[i].iov_len; total += sizeof(save_windex); mtx_lock_spin(&tbr->txbr_lock); /* * NOTE: * If this write is going to make br_windex same as br_rindex, * i.e. the available space for write is same as the write size, * we can't do it then, since br_windex == br_rindex means that * the bufring is empty. */ if (vmbus_txbr_avail(tbr) <= total) { mtx_unlock_spin(&tbr->txbr_lock); return (EAGAIN); } /* Save br_windex for later use */ old_windex = tbr->txbr_windex; /* * Copy the scattered channel packet to the TX bufring. */ windex = old_windex; for (i = 0; i < iovlen; i++) { windex = vmbus_txbr_copyto(tbr, windex, iov[i].iov_base, iov[i].iov_len); } /* * Set the offset of the current channel packet. */ save_windex = ((uint64_t)old_windex) << 32; windex = vmbus_txbr_copyto(tbr, windex, &save_windex, sizeof(save_windex)); /* * Update the write index _after_ the channel packet * is copied. */ __compiler_membar(); tbr->txbr_windex = windex; mtx_unlock_spin(&tbr->txbr_lock); *need_sig = vmbus_txbr_need_signal(tbr, old_windex); return (0); }
int inthand_add(const char *name, u_int irq, void (*handler)(void *), void *arg, int flags, void **cookiep) { struct intr_handler *ih; struct ithd *ithd, *orphan; int error = 0; int created_ithd = 0; /* * Work around a race where more than one CPU may be registering * handlers on the same IRQ at the same time. */ ih = &intr_handlers[irq]; mtx_lock_spin(&intr_table_lock); ithd = ih->ih_ithd; mtx_unlock_spin(&intr_table_lock); if (ithd == NULL) { error = ithread_create(&ithd, irq, 0, irq_disable, irq_enable, "irq%d:", irq); if (error) return (error); mtx_lock_spin(&intr_table_lock); if (ih->ih_ithd == NULL) { ih->ih_ithd = ithd; created_ithd++; mtx_unlock_spin(&intr_table_lock); } else { orphan = ithd; ithd = ih->ih_ithd; mtx_unlock_spin(&intr_table_lock); ithread_destroy(orphan); } } error = ithread_add_handler(ithd, name, handler, arg, ithread_priority(flags), flags, cookiep); if ((flags & INTR_FAST) == 0 || error) { intr_setup(irq, sched_ithd, ih); error = 0; } if (error) return (error); if (flags & INTR_FAST) intr_setup(irq, handler, arg); intr_stray_count[irq] = 0; return (0); }
static int iodi_setup_intr(device_t dev, device_t child, struct resource *ires, int flags, driver_filter_t * filt, driver_intr_t * intr, void *arg, void **cookiep) { int level; xlr_reg_t *mmio = xlr_io_mmio(XLR_IO_PIC_OFFSET); xlr_reg_t reg; /* FIXME is this the right place to fiddle with PIC? */ if (strcmp(device_get_name(child), "uart") == 0) { /* FIXME uart 1? */ if (rmi_spin_mutex_safe) mtx_lock_spin(&xlr_pic_lock); level = PIC_IRQ_IS_EDGE_TRIGGERED(PIC_IRT_UART_0_INDEX); xlr_write_reg(mmio, PIC_IRT_0_UART_0, 0x01); xlr_write_reg(mmio, PIC_IRT_1_UART_0, ((1 << 31) | (level << 30) | (1 << 6) | (PIC_UART_0_IRQ))); if (rmi_spin_mutex_safe) mtx_unlock_spin(&xlr_pic_lock); cpu_establish_hardintr("uart", filt, (driver_intr_t *) intr, (void *)arg, PIC_UART_0_IRQ, flags, cookiep); } else if (strcmp(device_get_name(child), "rge") == 0) { int irq; /* This is a hack to pass in the irq */ irq = (intptr_t)ires->__r_i; if (rmi_spin_mutex_safe) mtx_lock_spin(&xlr_pic_lock); reg = xlr_read_reg(mmio, PIC_IRT_1_BASE + irq - PIC_IRQ_BASE); xlr_write_reg(mmio, PIC_IRT_1_BASE + irq - PIC_IRQ_BASE, reg | (1 << 6) | (1 << 30) | (1 << 31)); if (rmi_spin_mutex_safe) mtx_unlock_spin(&xlr_pic_lock); cpu_establish_hardintr("rge", filt, (driver_intr_t *) intr, (void *)arg, irq, flags, cookiep); } else if (strcmp(device_get_name(child), "ehci") == 0) { if (rmi_spin_mutex_safe) mtx_lock_spin(&xlr_pic_lock); reg = xlr_read_reg(mmio, PIC_IRT_1_BASE + PIC_USB_IRQ - PIC_IRQ_BASE); xlr_write_reg(mmio, PIC_IRT_1_BASE + PIC_USB_IRQ - PIC_IRQ_BASE, reg | (1 << 6) | (1 << 30) | (1 << 31)); if (rmi_spin_mutex_safe) mtx_unlock_spin(&xlr_pic_lock); cpu_establish_hardintr("ehci", filt, (driver_intr_t *) intr, (void *)arg, PIC_USB_IRQ, flags, cookiep); } /* * This causes a panic and looks recursive to me (RRS). * BUS_SETUP_INTR(device_get_parent(dev), child, ires, flags, filt, * intr, arg, cookiep); */ return (0); }
int pmclog_deconfigure_log(struct pmc_owner *po) { int error; struct pmclog_buffer *lb; PMCDBG1(LOG,CFG,1, "de-config po=%p", po); if ((po->po_flags & PMC_PO_OWNS_LOGFILE) == 0) return (EINVAL); KASSERT(po->po_sscount == 0, ("[pmclog,%d] po=%p still owning SS PMCs", __LINE__, po)); KASSERT(po->po_file != NULL, ("[pmclog,%d] po=%p no log file", __LINE__, po)); /* stop the kthread, this will reset the 'OWNS_LOGFILE' flag */ pmclog_stop_kthread(po); KASSERT(po->po_kthread == NULL, ("[pmclog,%d] po=%p kthread not stopped", __LINE__, po)); /* return all queued log buffers to the global pool */ while ((lb = TAILQ_FIRST(&po->po_logbuffers)) != NULL) { TAILQ_REMOVE(&po->po_logbuffers, lb, plb_next); PMCLOG_INIT_BUFFER_DESCRIPTOR(lb); mtx_lock_spin(&pmc_bufferlist_mtx); TAILQ_INSERT_HEAD(&pmc_bufferlist, lb, plb_next); mtx_unlock_spin(&pmc_bufferlist_mtx); } /* return the 'current' buffer to the global pool */ if ((lb = po->po_curbuf) != NULL) { PMCLOG_INIT_BUFFER_DESCRIPTOR(lb); mtx_lock_spin(&pmc_bufferlist_mtx); TAILQ_INSERT_HEAD(&pmc_bufferlist, lb, plb_next); mtx_unlock_spin(&pmc_bufferlist_mtx); } /* drop a reference to the fd */ if (po->po_file != NULL) { error = fdrop(po->po_file, curthread); po->po_file = NULL; } else error = 0; po->po_error = 0; return (error); }
/* * Get time of day and convert it to a struct timespec. * Return 0 on success, an error number otherwise. */ int mc146818_gettime(device_t dev, struct timespec *ts) { struct mc146818_softc *sc; struct clocktime ct; int timeout, cent, year; sc = device_get_softc(dev); timeout = 1000000; /* XXX how long should we wait? */ /* * If MC_REGA_UIP is 0 we have at least 244us before the next * update. If it's 1 an update is imminent. */ for (;;) { mtx_lock_spin(&sc->sc_mtx); if (!((*sc->sc_mcread)(dev, MC_REGA) & MC_REGA_UIP)) break; mtx_unlock_spin(&sc->sc_mtx); if (--timeout < 0) { device_printf(dev, "%s: timeout\n", __func__); return (EBUSY); } } #define FROMREG(x) ((sc->sc_flag & MC146818_BCD) ? FROMBCD(x) : (x)) ct.nsec = 0; ct.sec = FROMREG((*sc->sc_mcread)(dev, MC_SEC)); ct.min = FROMREG((*sc->sc_mcread)(dev, MC_MIN)); ct.hour = FROMREG((*sc->sc_mcread)(dev, MC_HOUR)); /* Map dow from 1 - 7 to 0 - 6. */ ct.dow = FROMREG((*sc->sc_mcread)(dev, MC_DOW)) - 1; ct.day = FROMREG((*sc->sc_mcread)(dev, MC_DOM)); ct.mon = FROMREG((*sc->sc_mcread)(dev, MC_MONTH)); year = FROMREG((*sc->sc_mcread)(dev, MC_YEAR)); year += sc->sc_year0; if (sc->sc_flag & MC146818_NO_CENT_ADJUST) { cent = (*sc->sc_getcent)(dev); year += cent * 100; } else if (year < POSIX_BASE_YEAR) year += 100; mtx_unlock_spin(&sc->sc_mtx); ct.year = year; return (clock_ct_to_ts(&ct, ts)); }
void _rm_wlock(struct rmlock *rm) { struct rm_priotracker *prio; struct turnstile *ts; cpuset_t readcpus; if (SCHEDULER_STOPPED()) return; if (rm->lock_object.lo_flags & LO_SLEEPABLE) sx_xlock(&rm->rm_lock_sx); else mtx_lock(&rm->rm_lock_mtx); if (CPU_CMP(&rm->rm_writecpus, &all_cpus)) { /* Get all read tokens back */ readcpus = all_cpus; CPU_NAND(&readcpus, &rm->rm_writecpus); rm->rm_writecpus = all_cpus; /* * Assumes rm->rm_writecpus update is visible on other CPUs * before rm_cleanIPI is called. */ #ifdef SMP smp_rendezvous_cpus(readcpus, smp_no_rendevous_barrier, rm_cleanIPI, smp_no_rendevous_barrier, rm); #else rm_cleanIPI(rm); #endif mtx_lock_spin(&rm_spinlock); while ((prio = LIST_FIRST(&rm->rm_activeReaders)) != NULL) { ts = turnstile_trywait(&rm->lock_object); prio->rmp_flags = RMPF_ONQUEUE | RMPF_SIGNAL; mtx_unlock_spin(&rm_spinlock); turnstile_wait(ts, prio->rmp_thread, TS_EXCLUSIVE_QUEUE); mtx_lock_spin(&rm_spinlock); } mtx_unlock_spin(&rm_spinlock); } }
static void ntb_complete_rxc(void *arg, int pending) { struct ntb_transport_qp *qp = arg; struct ntb_queue_entry *entry; struct mbuf *m; unsigned len; CTR0(KTR_NTB, "RX: rx_completion_task"); mtx_lock_spin(&qp->ntb_rx_q_lock); while (!STAILQ_EMPTY(&qp->rx_post_q)) { entry = STAILQ_FIRST(&qp->rx_post_q); if ((entry->flags & IF_NTB_DESC_DONE_FLAG) == 0) break; entry->x_hdr->flags = 0; iowrite32(entry->index, &qp->rx_info->entry); STAILQ_REMOVE_HEAD(&qp->rx_post_q, entry); len = entry->len; m = entry->buf; /* * Re-initialize queue_entry for reuse; rx_handler takes * ownership of the mbuf. */ entry->buf = NULL; entry->len = transport_mtu; entry->cb_data = qp->transport->ifp; STAILQ_INSERT_TAIL(&qp->rx_pend_q, entry, entry); mtx_unlock_spin(&qp->ntb_rx_q_lock); CTR2(KTR_NTB, "RX: completing entry %p, mbuf %p", entry, m); if (qp->rx_handler != NULL && qp->client_ready) qp->rx_handler(qp, qp->cb_data, m, len); else m_freem(m); mtx_lock_spin(&qp->ntb_rx_q_lock); } mtx_unlock_spin(&qp->ntb_rx_q_lock); }
int mc146818_attach(device_t dev) { struct mc146818_softc *sc; sc = device_get_softc(dev); if (mtx_initialized(&sc->sc_mtx) == 0) { device_printf(dev, "%s: mutex not initialized\n", __func__); return (ENXIO); } if (sc->sc_mcread == NULL) sc->sc_mcread = mc146818_def_read; if (sc->sc_mcwrite == NULL) sc->sc_mcwrite = mc146818_def_write; if (sc->sc_flag & MC146818_NO_CENT_ADJUST) { /* * Note that setting MC146818_NO_CENT_ADJUST means that * the century has to be stored in NVRAM somewhere. */ if (sc->sc_getcent == NULL) sc->sc_getcent = mc146818_def_getcent; if (sc->sc_setcent == NULL) sc->sc_setcent = mc146818_def_setcent; } mtx_lock_spin(&sc->sc_mtx); if (!(*sc->sc_mcread)(dev, MC_REGD) & MC_REGD_VRT) { mtx_unlock_spin(&sc->sc_mtx); device_printf(dev, "%s: battery low\n", __func__); return (ENXIO); } sc->sc_rega = MC_BASE_32_KHz; (*sc->sc_mcwrite)(dev, MC_REGA, sc->sc_rega); sc->sc_regb = 0; sc->sc_regb |= (sc->sc_flag & MC146818_BCD) ? 0 : MC_REGB_BINARY; sc->sc_regb |= (sc->sc_flag & MC146818_12HR) ? 0 : MC_REGB_24HR; (*sc->sc_mcwrite)(dev, MC_REGB, sc->sc_regb); mtx_unlock_spin(&sc->sc_mtx); clock_register(dev, 1000000); /* 1 second resolution. */ return (0); }
static int pmclog_get_buffer(struct pmc_owner *po) { struct pmclog_buffer *plb; mtx_assert(&po->po_mtx, MA_OWNED); KASSERT(po->po_curbuf == NULL, ("[pmclog,%d] po=%p current buffer still valid", __LINE__, po)); mtx_lock_spin(&pmc_bufferlist_mtx); if ((plb = TAILQ_FIRST(&pmc_bufferlist)) != NULL) TAILQ_REMOVE(&pmc_bufferlist, plb, plb_next); mtx_unlock_spin(&pmc_bufferlist_mtx); PMCDBG(LOG,GTB,1, "po=%p plb=%p", po, plb); #ifdef DEBUG if (plb) KASSERT(plb->plb_ptr == plb->plb_base && plb->plb_base < plb->plb_fence, ("[pmclog,%d] po=%p buffer invariants: ptr=%p " "base=%p fence=%p", __LINE__, po, plb->plb_ptr, plb->plb_base, plb->plb_fence)); #endif po->po_curbuf = plb; /* update stats */ atomic_add_int(&pmc_stats.pm_buffer_requests, 1); if (plb == NULL) atomic_add_int(&pmc_stats.pm_buffer_requests_failed, 1); return (plb ? 0 : ENOMEM); }
static void a10dmac_free(device_t dev, void *priv) { struct a10dmac_channel *ch = priv; struct a10dmac_softc *sc = ch->ch_sc; uint32_t irqen, sta, cfg; mtx_lock_spin(&sc->sc_mtx); irqen = DMA_READ(sc, AWIN_DMA_IRQ_EN_REG); cfg = a10dmac_read_ctl(ch); if (ch->ch_type == CH_NDMA) { sta = AWIN_DMA_IRQ_NDMA_END(ch->ch_index); cfg &= ~AWIN_NDMA_CTL_DMA_LOADING; } else { sta = AWIN_DMA_IRQ_DDMA_END(ch->ch_index); cfg &= ~AWIN_DDMA_CTL_DMA_LOADING; } irqen &= ~sta; a10dmac_write_ctl(ch, cfg); DMA_WRITE(sc, AWIN_DMA_IRQ_EN_REG, irqen); DMA_WRITE(sc, AWIN_DMA_IRQ_PEND_STA_REG, sta); ch->ch_callback = NULL; ch->ch_callbackarg = NULL; mtx_unlock_spin(&sc->sc_mtx); }
static void unbind_from_irq(int irq) { struct evtchn_close close; int evtchn = evtchn_from_irq(irq); int cpu; mtx_lock_spin(&irq_mapping_update_lock); if ((--irq_bindcount[irq] == 0) && VALID_EVTCHN(evtchn)) { close.port = evtchn; HYPERVISOR_event_channel_op(EVTCHNOP_close, &close); switch (type_from_irq(irq)) { case IRQT_VIRQ: cpu = cpu_from_evtchn(evtchn); pcpu_find(cpu)->pc_virq_to_irq[index_from_irq(irq)] = -1; break; case IRQT_IPI: cpu = cpu_from_evtchn(evtchn); pcpu_find(cpu)->pc_ipi_to_irq[index_from_irq(irq)] = -1; break; default: break; } /* Closed ports are implicitly re-bound to VCPU0. */ bind_evtchn_to_cpu(evtchn, 0); evtchn_to_irq[evtchn] = -1; irq_info[irq] = IRQ_UNBOUND; } mtx_unlock_spin(&irq_mapping_update_lock); }
int bind_ipi_to_irq(unsigned int ipi, unsigned int cpu) { struct evtchn_bind_ipi bind_ipi; int irq; int evtchn = 0; mtx_lock_spin(&irq_mapping_update_lock); if ((irq = pcpu_find(cpu)->pc_ipi_to_irq[ipi]) == -1) { if ((irq = find_unbound_irq()) < 0) goto out; bind_ipi.vcpu = cpu; HYPERVISOR_event_channel_op(EVTCHNOP_bind_ipi, &bind_ipi); evtchn = bind_ipi.port; evtchn_to_irq[evtchn] = irq; irq_info[irq] = mk_irq_info(IRQT_IPI, ipi, evtchn); pcpu_find(cpu)->pc_ipi_to_irq[ipi] = irq; bind_evtchn_to_cpu(evtchn, cpu); } irq_bindcount[irq]++; unmask_evtchn(evtchn); out: mtx_unlock_spin(&irq_mapping_update_lock); return irq; }
static int bind_virq_to_irq(unsigned int virq, unsigned int cpu) { struct evtchn_bind_virq bind_virq; int evtchn = 0, irq; mtx_lock_spin(&irq_mapping_update_lock); if ((irq = pcpu_find(cpu)->pc_virq_to_irq[virq]) == -1) { if ((irq = find_unbound_irq()) < 0) goto out; bind_virq.virq = virq; bind_virq.vcpu = cpu; HYPERVISOR_event_channel_op(EVTCHNOP_bind_virq, &bind_virq); evtchn = bind_virq.port; evtchn_to_irq[evtchn] = irq; irq_info[irq] = mk_irq_info(IRQT_VIRQ, virq, evtchn); pcpu_find(cpu)->pc_virq_to_irq[virq] = irq; bind_evtchn_to_cpu(evtchn, cpu); } irq_bindcount[irq]++; unmask_evtchn(evtchn); out: mtx_unlock_spin(&irq_mapping_update_lock); return irq; }
static int bind_local_port_to_irq(unsigned int local_port) { int irq; mtx_lock_spin(&irq_mapping_update_lock); KASSERT(evtchn_to_irq[local_port] == -1, ("evtchn_to_irq inconsistent")); if ((irq = find_unbound_irq()) < 0) { struct evtchn_close close = { .port = local_port }; HYPERVISOR_event_channel_op(EVTCHNOP_close, &close); goto out; } evtchn_to_irq[local_port] = irq; irq_info[irq] = mk_irq_info(IRQT_LOCAL_PORT, 0, local_port); irq_bindcount[irq]++; unmask_evtchn(local_port); out: mtx_unlock_spin(&irq_mapping_update_lock); return irq; }
void __ntfs_debug(const char *file, int line, const char *function, const char *fmt, ...) { va_list args; const char *filename; int len; if (!ntfs_debug_messages) return; /* * We really want strrchr() here but that is not exported so do it by * hand. */ filename = file; if (filename) { for (len = strlen(filename); len > 0; len--) { if (filename[len - 1] == '/') { filename += len; break; } } } mtx_lock_spin(&ntfs_err_buf_lock); va_start(args, fmt); vsnprintf(ntfs_err_buf, sizeof(ntfs_err_buf), fmt, args); va_end(args); printf("NTFS-fs DEBUG (%s, %d): %s(): %s\n", filename ? filename : "", line, function ? function : "", ntfs_err_buf); mtx_unlock_spin(&ntfs_err_buf_lock); }
static int pcireg_cfgread(int bus, int slot, int func, int reg, int bytes) { int data = -1; int port; mtx_lock_spin(&pcicfg_mtx); port = pci_cfgenable(bus, slot, func, reg, bytes); if (port != 0) { switch (bytes) { case 1: data = inb(port); break; case 2: data = inw(port); break; case 4: data = inl(port); break; } pci_cfgdisable(); } mtx_unlock_spin(&pcicfg_mtx); return (data); }
u_int32_t ath_hal_reg_read(struct ath_hal *ah, u_int32_t reg) { bus_space_tag_t tag = BUSTAG(ah); bus_space_handle_t h = ah->ah_sh; u_int32_t val; if (ah->ah_config.ah_serialise_reg_war) mtx_lock_spin(&ah_regser_mtx); val = bus_space_read_4(tag, h, reg); if (ah->ah_config.ah_serialise_reg_war) mtx_unlock_spin(&ah_regser_mtx); if (ath_hal_alq) { struct ale *ale = ath_hal_alq_get(ah); if (ale) { struct athregrec *r = (struct athregrec *) ale->ae_data; r->threadid = curthread->td_tid; r->op = OP_READ; r->reg = reg; r->val = val; alq_post(ath_hal_alq, ale); } } return val; }
void AcpiOsReleaseLock(ACPI_SPINLOCK Handle, ACPI_CPU_FLAGS Flags) { struct acpi_spinlock *al = (struct acpi_spinlock *)Handle; ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__); if (al == NULL) { ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "cannot release null spinlock\n")); return_VOID; } ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "release %s\n", al->al_name)); if (mtx_owned(&al->al_lock)) { if (al->al_nested > 0) { ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "release nested %s, depth %d\n", al->al_name, al->al_nested)); al->al_nested--; } else mtx_unlock_spin(&al->al_lock); } else ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "cannot release unowned %s\n", al->al_name)); }
static int clkintr(struct trapframe *frame) { if (timecounter->tc_get_timecount == i8254_get_timecount) { mtx_lock_spin(&clock_lock); if (i8254_ticked) i8254_ticked = 0; else { i8254_offset += i8254_max_count; i8254_lastcount = 0; } clkintr_pending = 0; mtx_unlock_spin(&clock_lock); } KASSERT(!using_lapic_timer, ("clk interrupt enabled with lapic timer")); #ifdef KDTRACE_HOOKS /* * If the DTrace hooks are configured and a callback function * has been registered, then call it to process the high speed * timers. */ int cpu = PCPU_GET(cpuid); if (lapic_cyclic_clock_func[cpu] != NULL) (*lapic_cyclic_clock_func[cpu])(frame); #endif #ifdef SMP if (smp_started) ipi_all_but_self(IPI_HARDCLOCK); #endif hardclockintr(frame); return (FILTER_HANDLED); }
DECLHIDDEN(int) rtThreadNativeSetPriority(PRTTHREADINT pThread, RTTHREADTYPE enmType) { int iPriority; switch (enmType) { case RTTHREADTYPE_INFREQUENT_POLLER: iPriority = PZERO + 8; break; case RTTHREADTYPE_EMULATION: iPriority = PZERO + 4; break; case RTTHREADTYPE_DEFAULT: iPriority = PZERO; break; case RTTHREADTYPE_MSG_PUMP: iPriority = PZERO - 4; break; case RTTHREADTYPE_IO: iPriority = PRIBIO; break; case RTTHREADTYPE_TIMER: iPriority = PRI_MIN_KERN; break; default: AssertMsgFailed(("enmType=%d\n", enmType)); return VERR_INVALID_PARAMETER; } #if __FreeBSD_version < 700000 /* Do like they're doing in subr_ntoskrnl.c... */ mtx_lock_spin(&sched_lock); #else thread_lock(curthread); #endif sched_prio(curthread, iPriority); #if __FreeBSD_version < 600000 curthread->td_base_pri = iPriority; #endif #if __FreeBSD_version < 700000 mtx_unlock_spin(&sched_lock); #else thread_unlock(curthread); #endif return VINF_SUCCESS; }
u_int32_t ath_hal_reg_read(struct ath_hal *ah, u_int32_t reg) { bus_space_tag_t tag = BUSTAG(ah); bus_space_handle_t h = ah->ah_sh; u_int32_t val; #ifdef AH_DEBUG /* Debug - complain if we haven't fully waken things up */ if (! ath_hal_reg_whilst_asleep(ah, reg) && ah->ah_powerMode != HAL_PM_AWAKE) { ath_hal_printf(ah, "%s: reg=0x%08x, pm=%d\n", __func__, reg, ah->ah_powerMode); } #endif if (ah->ah_config.ah_serialise_reg_war) mtx_lock_spin(&ah_regser_mtx); OS_BUS_BARRIER_REG(ah, reg, OS_BUS_BARRIER_READ); val = bus_space_read_4(tag, h, reg); if (ah->ah_config.ah_serialise_reg_war) mtx_unlock_spin(&ah_regser_mtx); if (ath_hal_alq) { struct ale *ale = ath_hal_alq_get(ah); if (ale) { struct athregrec *r = (struct athregrec *) ale->ae_data; r->threadid = curthread->td_tid; r->op = OP_READ; r->reg = reg; r->val = val; alq_post(ath_hal_alq, ale); } } return val; }
int pmclog_close(struct pmc_owner *po) { PMCDBG(LOG,CLO,1, "po=%p", po); mtx_lock(&pmc_kthread_mtx); /* * Schedule the current buffer. */ mtx_lock_spin(&po->po_mtx); if (po->po_curbuf) pmclog_schedule_io(po); else wakeup_one(po); mtx_unlock_spin(&po->po_mtx); /* * Initiate shutdown: no new data queued, * thread will close file on last block. */ po->po_flags |= PMC_PO_SHUTDOWN; mtx_unlock(&pmc_kthread_mtx); return (0); }