/** * ntb_transport_tx_enqueue - Enqueue a new NTB queue entry * @qp: NTB transport layer queue the entry is to be enqueued on * @cb: per buffer pointer for callback function to use * @data: pointer to data buffer that will be sent * @len: length of the data buffer * * Enqueue a new transmit buffer onto the transport queue from which a NTB * payload will be transmitted. This assumes that a lock is behing held to * serialize access to the qp. * * RETURNS: An appropriate ERRNO error value on error, or zero for success. */ static int ntb_transport_tx_enqueue(struct ntb_transport_qp *qp, void *cb, void *data, unsigned int len) { struct ntb_queue_entry *entry; int rc; if (qp == NULL || qp->qp_link != NTB_LINK_UP || len == 0) { CTR0(KTR_NTB, "TX: link not up"); return (EINVAL); } entry = ntb_list_rm(&qp->ntb_tx_free_q_lock, &qp->tx_free_q); if (entry == NULL) { CTR0(KTR_NTB, "TX: could not get entry from tx_free_q"); return (ENOMEM); } CTR1(KTR_NTB, "TX: got entry %p from tx_free_q", entry); entry->cb_data = cb; entry->buf = data; entry->len = len; entry->flags = 0; rc = ntb_process_tx(qp, entry); if (rc != 0) { ntb_list_add(&qp->ntb_tx_free_q_lock, entry, &qp->tx_free_q); CTR1(KTR_NTB, "TX: process_tx failed. Returning entry %p to tx_free_q", entry); } return (rc); }
int rwindow_load(struct thread *td, struct trapframe *tf, int n) { struct rwindow rw; u_long usp; int error; int i; CTR3(KTR_TRAP, "rwindow_load: td=%p (%s) n=%d", td, td->td_proc->p_comm, n); /* * In case current window is still only on-chip, push it out; * if it cannot get all the way out, we cannot continue either. */ if ((error = rwindow_save(td)) != 0) return (error); usp = tf->tf_out[6]; for (i = 0; i < n; i++) { CTR1(KTR_TRAP, "rwindow_load: usp=%#lx", usp); usp += SPOFF; if ((error = (usp & 0x7)) != 0) break; error = copyin((void *)usp, &rw, sizeof rw); usp = rw.rw_in[6]; } CTR1(KTR_TRAP, "rwindow_load: error=%d", error); return (error == 0 ? 0 : SIGILL); }
static void ntb_start(struct ifnet *ifp) { struct mbuf *m_head; struct ntb_netdev *nt = ifp->if_softc; int rc; mtx_lock(&nt->tx_lock); ifp->if_drv_flags &= ~IFF_DRV_OACTIVE; CTR0(KTR_NTB, "TX: ntb_start"); while (!IFQ_DRV_IS_EMPTY(&ifp->if_snd)) { IFQ_DRV_DEQUEUE(&ifp->if_snd, m_head); CTR1(KTR_NTB, "TX: start mbuf %p", m_head); rc = ntb_transport_tx_enqueue(nt->qp, m_head, m_head, m_length(m_head, NULL)); if (rc != 0) { CTR1(KTR_NTB, "TX: could not tx mbuf %p. Returning to snd q", m_head); if (rc == EAGAIN) { ifp->if_drv_flags |= IFF_DRV_OACTIVE; IFQ_DRV_PREPEND(&ifp->if_snd, m_head); callout_reset(&nt->qp->queue_full, hz / 1000, ntb_qp_full, ifp); } break; } } mtx_unlock(&nt->tx_lock); }
/* * Select the KSE that will be run next. From that find the thread, and * remove it from the KSEGRP's run queue. If there is thread clustering, * this will be what does it. */ struct thread * choosethread(void) { struct kse *ke; struct thread *td; struct ksegrp *kg; #if defined(SMP) && (defined(__i386__) || defined(__amd64__)) if (smp_active == 0 && PCPU_GET(cpuid) != 0) { /* Shutting down, run idlethread on AP's */ td = PCPU_GET(idlethread); ke = td->td_kse; CTR1(KTR_RUNQ, "choosethread: td=%p (idle)", td); ke->ke_flags |= KEF_DIDRUN; TD_SET_RUNNING(td); return (td); } #endif retry: ke = sched_choose(); if (ke) { td = ke->ke_thread; KASSERT((td->td_kse == ke), ("kse/thread mismatch")); kg = ke->ke_ksegrp; if (td->td_proc->p_flag & P_HADTHREADS) { if (kg->kg_last_assigned == td) { kg->kg_last_assigned = TAILQ_PREV(td, threadqueue, td_runq); } TAILQ_REMOVE(&kg->kg_runq, td, td_runq); kg->kg_runnable--; } CTR2(KTR_RUNQ, "choosethread: td=%p pri=%d", td, td->td_priority); } else { /* Simulate runq_choose() having returned the idle thread */ td = PCPU_GET(idlethread); ke = td->td_kse; CTR1(KTR_RUNQ, "choosethread: td=%p (idle)", td); } ke->ke_flags |= KEF_DIDRUN; /* * If we are in panic, only allow system threads, * plus the one we are running in, to be run. */ if (panicstr && ((td->td_proc->p_flag & P_SYSTEM) == 0 && (td->td_flags & TDF_INPANIC) == 0)) { /* note that it is no longer on the run queue */ TD_SET_CAN_RUN(td); goto retry; } TD_SET_RUNNING(td); return (td); }
static void ntb_rx_completion_task(void *arg, int pending) { struct ntb_transport_qp *qp = arg; struct mbuf *m; struct ntb_queue_entry *entry; CTR0(KTR_NTB, "RX: rx_completion_task"); while ((entry = ntb_list_rm(&qp->ntb_rx_free_q_lock, &qp->rx_free_q))) { m = entry->buf; CTR2(KTR_NTB, "RX: completing entry %p, mbuf %p", entry, m); if (qp->rx_handler && qp->client_ready == NTB_LINK_UP) qp->rx_handler(qp, qp->cb_data, m, entry->len); entry->buf = NULL; entry->len = qp->transport->bufsize; CTR1(KTR_NTB,"RX: entry %p removed from rx_free_q " "and added to rx_pend_q", entry); ntb_list_add(&qp->ntb_rx_pend_q_lock, entry, &qp->rx_pend_q); if (qp->rx_err_no_buf > qp->last_rx_no_buf) { qp->last_rx_no_buf = qp->rx_err_no_buf; CTR0(KTR_NTB, "RX: could spawn rx task"); callout_reset(&qp->rx_full, hz / 1000, ntb_rx_pendq_full, qp); } } }
int freebsd32_sigreturn(struct thread *td, struct freebsd32_sigreturn_args *uap) { ucontext32_t uc; int error; CTR2(KTR_SIG, "sigreturn: td=%p ucp=%p", td, uap->sigcntxp); if (copyin(uap->sigcntxp, &uc, sizeof(uc)) != 0) { CTR1(KTR_SIG, "sigreturn: efault td=%p", td); return (EFAULT); } error = set_mcontext32(td, &uc.uc_mcontext); if (error != 0) return (error); kern_sigprocmask(td, SIG_SETMASK, &uc.uc_sigmask, NULL, 0); #if 0 CTR3(KTR_SIG, "sigreturn: return td=%p pc=%#x sp=%#x", td, uc.uc_mcontext.mc_srr0, uc.uc_mcontext.mc_gpr[1]); #endif return (EJUSTRETURN); }
/* * When called the executing CPU will send an IPI to all other CPUs * requesting that they halt execution. * * Usually (but not necessarily) called with 'other_cpus' as its arg. * * - Signals all CPUs in map to suspend. * - Waits for each to suspend. * * Returns: * -1: error * 0: NA * 1: ok * * XXX FIXME: this is not MP-safe, needs a lock to prevent multiple CPUs * from executing at same time. */ int suspend_cpus(cpumask_t map) { int i; if (!smp_started) return (0); CTR1(KTR_SMP, "suspend_cpus(%x)", map); /* send the suspend IPI to all CPUs in map */ ipi_selected(map, IPI_SUSPEND); i = 0; while ((stopped_cpus & map) != map) { /* spin */ cpu_spinwait(); i++; #ifdef DIAGNOSTIC if (i == 100000) { printf("timeout suspending cpus\n"); break; } #endif } return (1); }
void cxgb_log_tcb(struct adapter *sc, unsigned int tid) { char buf[TCB_SIZE]; uint64_t *tcb = (uint64_t *)buf; int i, error; struct mc7 *mem = &sc->cm; error = t3_mc7_bd_read(mem, tid*TCB_SIZE/8, TCB_SIZE/8, tcb); if (error) printf("cxgb_tcb_log failed\n"); CTR1(KTR_CXGB, "TCB tid=%u", tid); for (i = 0; i < TCB_SIZE / 32; i++) { CTR5(KTR_CXGB, "%1d: %08x %08x %08x %08x", i, (uint32_t)tcb[1], (uint32_t)(tcb[1] >> 32), (uint32_t)tcb[0], (uint32_t)(tcb[0] >> 32)); tcb += 2; CTR4(KTR_CXGB, " %08x %08x %08x %08x", (uint32_t)tcb[1], (uint32_t)(tcb[1] >> 32), (uint32_t)tcb[0], (uint32_t)(tcb[0] >> 32)); tcb += 2; } }
void forward_signal(struct thread *td) { int id; /* * signotify() has already set TDF_ASTPENDING and TDF_NEEDSIGCHECK on * this thread, so all we need to do is poke it if it is currently * executing so that it executes ast(). */ THREAD_LOCK_ASSERT(td, MA_OWNED); KASSERT(TD_IS_RUNNING(td), ("forward_signal: thread is not TDS_RUNNING")); CTR1(KTR_SMP, "forward_signal(%p)", td->td_proc); if (!smp_started || cold || panicstr) return; if (!forward_signal_enabled) return; /* No need to IPI ourself. */ if (td == curthread) return; id = td->td_oncpu; if (id == NOCPU) return; ipi_cpu(id, IPI_AST); }
void cxio_dump_tpt(struct cxio_rdev *rdev, uint32_t stag) { struct ch_mem_range *m; u64 *data; int rc; int size = 32; m = kmalloc(sizeof(*m) + size, M_NOWAIT); if (!m) { CTR1(KTR_IW_CXGB, "%s couldn't allocate memory.", __FUNCTION__); return; } m->mem_id = MEM_PMRX; m->addr = (stag>>8) * 32 + rdev->rnic_info.tpt_base; m->len = size; CTR3(KTR_IW_CXGB, "%s TPT addr 0x%x len %d", __FUNCTION__, m->addr, m->len); rc = rdev->t3cdev_p->ctl(rdev->t3cdev_p, RDMA_GET_MEM, m); if (rc) { CTR2(KTR_IW_CXGB, "%s toectl returned error %d", __FUNCTION__, rc); free(m, M_DEVBUF); return; } data = (u64 *)m->buf; while (size > 0) { CTR2(KTR_IW_CXGB, "TPT %08x: %016llx", m->addr, (unsigned long long) *data); size -= 8; data++; m->addr += 8; } free(m, M_DEVBUF); }
int drm_poll(struct cdev *kdev, int events, struct thread *td) { struct drm_file *file_priv; struct drm_device *dev; int error, revents; error = devfs_get_cdevpriv((void **)&file_priv); if (error != 0) { DRM_ERROR("can't find authenticator\n"); return (EINVAL); } dev = drm_get_device_from_kdev(kdev); revents = 0; mtx_lock(&dev->event_lock); if ((events & (POLLIN | POLLRDNORM)) != 0) { if (list_empty(&file_priv->event_list)) { CTR0(KTR_DRM, "drm_poll empty list"); selrecord(td, &file_priv->event_poll); } else { revents |= events & (POLLIN | POLLRDNORM); CTR1(KTR_DRM, "drm_poll revents %x", revents); } } mtx_unlock(&dev->event_lock); return (revents); }
void dumpsys_pa_init(void) { CTR1(KTR_PMAP, "%s()", __func__); return (MMU_SCAN_INIT(mmu_obj)); }
void cxio_dump_tcb(struct cxio_rdev *rdev, uint32_t hwtid) { struct ch_mem_range *m; int size = TCB_SIZE; uint32_t *data; int rc; m = kmalloc(sizeof(*m) + size, M_NOWAIT); if (!m) { CTR1(KTR_IW_CXGB, "%s couldn't allocate memory.", __FUNCTION__); return; } m->mem_id = MEM_CM; m->addr = hwtid * size; m->len = size; CTR3(KTR_IW_CXGB, "%s TCB %d len %d", __FUNCTION__, m->addr, m->len); rc = rdev->t3cdev_p->ctl(rdev->t3cdev_p, RDMA_GET_MEM, m); if (rc) { CTR2(KTR_IW_CXGB, "%s toectl returned error %d", __FUNCTION__, rc); free(m, M_DEVBUF); return; } data = (uint32_t *)m->buf; while (size > 0) { printf("%2u: %08x %08x %08x %08x %08x %08x %08x %08x\n", m->addr, *(data+2), *(data+3), *(data),*(data+1), *(data+6), *(data+7), *(data+4), *(data+5)); size -= 32; data += 8; m->addr += 32; } free(m, M_DEVBUF); }
void pmap_init(void) { CTR1(KTR_PMAP, "%s()", __func__); MMU_INIT(mmu_obj); }
static ACPI_STATUS EcRead(struct acpi_ec_softc *sc, UINT8 Address, UINT8 *Data) { ACPI_STATUS status; u_int gen_count; int retry; ACPI_SERIAL_ASSERT(ec); CTR1(KTR_ACPI, "ec read from %#x", Address); for (retry = 0; retry < 2; retry++) { status = EcCommand(sc, EC_COMMAND_READ); if (ACPI_FAILURE(status)) return (status); gen_count = sc->ec_gencount; EC_SET_DATA(sc, Address); status = EcWaitEvent(sc, EC_EVENT_OUTPUT_BUFFER_FULL, gen_count); if (ACPI_FAILURE(status)) { if (ACPI_SUCCESS(EcCheckStatus(sc, "retr_check", EC_EVENT_INPUT_BUFFER_EMPTY))) continue; else break; } *Data = EC_GET_DATA(sc); return (AE_OK); } device_printf(sc->ec_dev, "EcRead: failed waiting to get data\n"); return (status); }
/* Network Device Callbacks */ static void ntb_net_tx_handler(struct ntb_transport_qp *qp, void *qp_data, void *data, int len) { m_freem(data); CTR1(KTR_NTB, "TX: tx_handler freeing mbuf %p", data); }
static ACPI_STATUS EcCommand(struct acpi_ec_softc *sc, EC_COMMAND cmd) { ACPI_STATUS status; EC_EVENT event; EC_STATUS ec_status; u_int gen_count; ACPI_SERIAL_ASSERT(ec); /* Don't use burst mode if user disabled it. */ if (!ec_burst_mode && cmd == EC_COMMAND_BURST_ENABLE) return (AE_ERROR); /* Decide what to wait for based on command type. */ switch (cmd) { case EC_COMMAND_READ: case EC_COMMAND_WRITE: case EC_COMMAND_BURST_DISABLE: event = EC_EVENT_INPUT_BUFFER_EMPTY; break; case EC_COMMAND_QUERY: case EC_COMMAND_BURST_ENABLE: event = EC_EVENT_OUTPUT_BUFFER_FULL; break; default: device_printf(sc->ec_dev, "EcCommand: invalid command %#x\n", cmd); return (AE_BAD_PARAMETER); } /* * Ensure empty input buffer before issuing command. * Use generation count of zero to force a quick check. */ status = EcWaitEvent(sc, EC_EVENT_INPUT_BUFFER_EMPTY, 0); if (ACPI_FAILURE(status)) return (status); /* Run the command and wait for the chosen event. */ CTR1(KTR_ACPI, "ec running command %#x", cmd); gen_count = sc->ec_gencount; EC_SET_CSR(sc, cmd); status = EcWaitEvent(sc, event, gen_count); if (ACPI_SUCCESS(status)) { /* If we succeeded, burst flag should now be present. */ if (cmd == EC_COMMAND_BURST_ENABLE) { ec_status = EC_GET_CSR(sc); if ((ec_status & EC_FLAG_BURST_MODE) == 0) status = AE_ERROR; } } else device_printf(sc->ec_dev, "EcCommand: no response to %#x\n", cmd); return (status); }
static void ntb_memcpy_tx(struct ntb_transport_qp *qp, struct ntb_queue_entry *entry, void *offset) { struct ntb_payload_header *hdr; /* This piece is from Linux' ntb_async_tx() */ hdr = (struct ntb_payload_header *)((char *)offset + qp->tx_max_frame - sizeof(struct ntb_payload_header)); entry->x_hdr = hdr; iowrite32(entry->len, &hdr->len); iowrite32(qp->tx_pkts, &hdr->ver); /* This piece is ntb_memcpy_tx() */ CTR2(KTR_NTB, "TX: copying %d bytes to offset %p", entry->len, offset); if (entry->buf != NULL) { m_copydata((struct mbuf *)entry->buf, 0, entry->len, offset); /* * Ensure that the data is fully copied before setting the * flags */ wmb(); } /* The rest is ntb_tx_copy_callback() */ iowrite32(entry->flags | IF_NTB_DESC_DONE_FLAG, &hdr->flags); CTR1(KTR_NTB, "TX: hdr %p set DESC_DONE", hdr); ntb_peer_db_set(qp->ntb, 1ull << qp->qp_num); /* * The entry length can only be zero if the packet is intended to be a * "link down" or similar. Since no payload is being sent in these * cases, there is nothing to add to the completion queue. */ if (entry->len > 0) { qp->tx_bytes += entry->len; if (qp->tx_handler) qp->tx_handler(qp, qp->cb_data, entry->buf, entry->len); else m_freem(entry->buf); entry->buf = NULL; } CTR3(KTR_NTB, "TX: entry %p sent. hdr->ver = %u, hdr->flags = 0x%x, Returning " "to tx_free_q", entry, hdr->ver, hdr->flags); ntb_list_add(&qp->ntb_tx_free_q_lock, entry, &qp->tx_free_q); }
/* * _mtx_lock_spin: the tougher part of acquiring an MTX_SPIN lock. * * This is only called if we need to actually spin for the lock. Recursion * is handled inline. */ void _mtx_lock_spin(struct mtx *m, uintptr_t tid, int opts, const char *file, int line) { int i = 0; #ifdef LOCK_PROFILING int contested = 0; uint64_t waittime = 0; #endif if (LOCK_LOG_TEST(&m->lock_object, opts)) CTR1(KTR_LOCK, "_mtx_lock_spin: %p spinning", m); lock_profile_obtain_lock_failed(&m->lock_object, &contested, &waittime); while (!_obtain_lock(m, tid)) { /* Give interrupts a chance while we spin. */ spinlock_exit(); while (m->mtx_lock != MTX_UNOWNED) { if (i++ < 10000000) { cpu_spinwait(); continue; } if (i < 60000000 || kdb_active || panicstr != NULL) DELAY(1); else _mtx_lock_spin_failed(m); cpu_spinwait(); } spinlock_enter(); } if (LOCK_LOG_TEST(&m->lock_object, opts)) CTR1(KTR_LOCK, "_mtx_lock_spin: %p spin done", m); LOCKSTAT_PROFILE_OBTAIN_LOCK_SUCCESS(LS_MTX_SPIN_LOCK_ACQUIRE, m, contested, waittime, (file), (line)); LOCKSTAT_RECORD1(LS_MTX_SPIN_LOCK_SPIN, m, i); }
int rwindow_save(struct thread *td) { struct rwindow *rw; struct pcb *pcb; u_long *ausp; u_long usp; int error; int i; pcb = td->td_pcb; CTR3(KTR_TRAP, "rwindow_save: td=%p (%s) nsaved=%d", td, td->td_proc->p_comm, pcb->pcb_nsaved); flushw(); KASSERT(pcb->pcb_nsaved < MAXWIN, ("rwindow_save: pcb_nsaved > MAXWIN")); if ((i = pcb->pcb_nsaved) == 0) return (0); ausp = pcb->pcb_rwsp; rw = pcb->pcb_rw; error = 0; do { usp = *ausp; CTR1(KTR_TRAP, "rwindow_save: usp=%#lx", usp); usp += SPOFF; if ((error = (usp & 0x7)) != 0) break; error = copyout(rw, (void *)usp, sizeof *rw); if (error) break; ausp++; rw++; } while (--i > 0); CTR1(KTR_TRAP, "rwindow_save: error=%d", error); if (error == 0) pcb->pcb_nsaved = 0; return (error == 0 ? 0 : SIGILL); }
static unsigned int cxgbei_task_reserve_itt(struct icl_conn *ic, void **prv, struct ccb_scsiio *scmd, unsigned int *itt) { struct icl_cxgbei_conn *icc = ic_to_icc(ic); int xferlen = scmd->dxfer_len; struct cxgbei_task_data *tdata = NULL; struct cxgbei_sgl *sge = NULL; struct toepcb *toep = icc->toep; struct adapter *sc = td_adapter(toep->td); struct cxgbei_data *ci = sc->iscsi_ulp_softc; int err = -1; MPASS(icc->icc_signature == CXGBEI_CONN_SIGNATURE); tdata = (struct cxgbei_task_data *)*prv; if (xferlen == 0 || tdata == NULL) goto out; if (xferlen < DDP_THRESHOLD) goto out; if ((scmd->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) { tdata->nsge = cxgbei_map_sg(tdata->sgl, scmd); if (tdata->nsge == 0) { CTR1(KTR_CXGBE, "%s: map_sg failed", __func__); return 0; } sge = tdata->sgl; tdata->sc_ddp_tag = *itt; CTR3(KTR_CXGBE, "%s: *itt:0x%x sc_ddp_tag:0x%x", __func__, *itt, tdata->sc_ddp_tag); if (cxgbei_ulp2_sw_tag_usable(&ci->tag_format, tdata->sc_ddp_tag)) { err = t4_sk_ddp_tag_reserve(ci, icc, scmd->dxfer_len, sge, tdata->nsge, &tdata->sc_ddp_tag); } else { CTR3(KTR_CXGBE, "%s: itt:0x%x sc_ddp_tag:0x%x not usable", __func__, *itt, tdata->sc_ddp_tag); } } out: if (err < 0) tdata->sc_ddp_tag = cxgbei_ulp2_set_non_ddp_tag(&ci->tag_format, *itt); return tdata->sc_ddp_tag; }
void t3_post_kbuf(struct toepcb *toep, int modulate, int nonblock) { struct ddp_state *p = &toep->tp_ddp_state; t3_set_ddp_tag(toep, p->cur_buf, p->kbuf_tag[p->cur_buf] << 6); t3_set_ddp_buf(toep, p->cur_buf, 0, p->kbuf[p->cur_buf]->dgl_length); t3_repost_kbuf(toep, p->cur_buf, modulate, 1, nonblock); #ifdef T3_TRACE T3_TRACE1(TIDTB(so), "t3_post_kbuf: cur_buf = kbuf_idx = %u ", p->cur_buf); #endif CTR1(KTR_TOM, "t3_post_kbuf: cur_buf = kbuf_idx = %u ", p->cur_buf); }
/* * Handle an IPI sent to this processor. */ intrmask_t smp_handle_ipi(struct trapframe *frame) { cpumask_t cpumask; /* This cpu mask */ u_int ipi, ipi_bitmap; ipi_bitmap = atomic_readandclear_int(PCPU_PTR(pending_ipis)); cpumask = PCPU_GET(cpumask); CTR1(KTR_SMP, "smp_handle_ipi(), ipi_bitmap=%x", ipi_bitmap); while (ipi_bitmap) { /* * Find the lowest set bit. */ ipi = ipi_bitmap & ~(ipi_bitmap - 1); ipi_bitmap &= ~ipi; switch (ipi) { case IPI_INVLTLB: CTR0(KTR_SMP, "IPI_INVLTLB"); break; case IPI_RENDEZVOUS: CTR0(KTR_SMP, "IPI_RENDEZVOUS"); smp_rendezvous_action(); break; case IPI_AST: CTR0(KTR_SMP, "IPI_AST"); break; case IPI_STOP: /* * IPI_STOP_HARD is mapped to IPI_STOP so it is not * necessary to add it in the switch. */ CTR0(KTR_SMP, "IPI_STOP or IPI_STOP_HARD"); atomic_set_int(&stopped_cpus, cpumask); while ((started_cpus & cpumask) == 0) ; atomic_clear_int(&started_cpus, cpumask); atomic_clear_int(&stopped_cpus, cpumask); break; } } return CR_INT_IPI; }
/* * Called by a CPU to restart stopped CPUs. * * Usually (but not necessarily) called with 'stopped_cpus' as its arg. * * - Signals all CPUs in map to restart. * - Waits for each to restart. * * Returns: * -1: error * 0: NA * 1: ok */ int restart_cpus(cpumask_t map) { if (!smp_started) return 0; CTR1(KTR_SMP, "restart_cpus(%x)", map); /* signal other cpus to restart */ atomic_store_rel_int(&started_cpus, map); /* wait for each to clear its bit */ while ((stopped_cpus & map) != 0) cpu_spinwait(); return 1; }
static ACPI_STATUS EcCheckStatus(struct acpi_ec_softc *sc, const char *msg, EC_EVENT event) { ACPI_STATUS status; EC_STATUS ec_status; status = AE_NO_HARDWARE_RESPONSE; ec_status = EC_GET_CSR(sc); if (sc->ec_burstactive && !(ec_status & EC_FLAG_BURST_MODE)) { CTR1(KTR_ACPI, "ec burst disabled in waitevent (%s)", msg); sc->ec_burstactive = FALSE; } if (EVENT_READY(event, ec_status)) { CTR2(KTR_ACPI, "ec %s wait ready, status %#x", msg, ec_status); status = AE_OK; } return (status); }
static ACPI_STATUS EcRead(struct acpi_ec_softc *sc, UINT8 Address, UINT8 *Data) { ACPI_STATUS status; UINT8 data; u_int gen_count; ACPI_SERIAL_ASSERT(ec); CTR1(KTR_ACPI, "ec read from %#x", Address); /* If we can't start burst mode, continue anyway. */ status = EcCommand(sc, EC_COMMAND_BURST_ENABLE); if (status == AE_OK) { data = EC_GET_DATA(sc); if (data == EC_BURST_ACK) { CTR0(KTR_ACPI, "ec burst enabled"); sc->ec_burstactive = TRUE; } } status = EcCommand(sc, EC_COMMAND_READ); if (ACPI_FAILURE(status)) return (status); gen_count = sc->ec_gencount; EC_SET_DATA(sc, Address); status = EcWaitEvent(sc, EC_EVENT_OUTPUT_BUFFER_FULL, gen_count); if (ACPI_FAILURE(status)) { device_printf(sc->ec_dev, "EcRead: failed waiting to get data\n"); return (status); } *Data = EC_GET_DATA(sc); if (sc->ec_burstactive) { sc->ec_burstactive = FALSE; status = EcCommand(sc, EC_COMMAND_BURST_DISABLE); if (ACPI_FAILURE(status)) return (status); CTR0(KTR_ACPI, "ec disabled burst ok"); } return (AE_OK); }
void openpic_dispatch(device_t dev, struct trapframe *tf) { struct openpic_softc *sc; u_int cpuid, vector; CTR1(KTR_INTR, "%s: got interrupt", __func__); cpuid = (dev == root_pic) ? PCPU_GET(cpuid) : 0; sc = device_get_softc(dev); while (1) { vector = openpic_read(sc, OPENPIC_PCPU_IACK(cpuid)); vector &= OPENPIC_VECTOR_MASK; if (vector == 255) break; powerpc_dispatch_intr(vector, tf); } }
/* * Remove a thread from its KSEGRP's run queue. * This in turn may remove it from a KSE if it was already assigned * to one, possibly causing a new thread to be assigned to the KSE * and the KSE getting a new priority. */ static void remrunqueue(struct thread *td) { struct thread *td2, *td3; struct ksegrp *kg; struct kse *ke; mtx_assert(&sched_lock, MA_OWNED); KASSERT((TD_ON_RUNQ(td)), ("remrunqueue: Bad state on run queue")); kg = td->td_ksegrp; ke = td->td_kse; CTR1(KTR_RUNQ, "remrunqueue: td%p", td); TD_SET_CAN_RUN(td); /* * If it is not a threaded process, take the shortcut. */ if ((td->td_proc->p_flag & P_HADTHREADS) == 0) { /* remve from sys run queue and free up a slot */ sched_rem(td); ke->ke_state = KES_THREAD; return; } td3 = TAILQ_PREV(td, threadqueue, td_runq); TAILQ_REMOVE(&kg->kg_runq, td, td_runq); kg->kg_runnable--; if (ke->ke_state == KES_ONRUNQ) { /* * This thread has been assigned to the system run queue. * We need to dissociate it and try assign the * KSE to the next available thread. Then, we should * see if we need to move the KSE in the run queues. */ sched_rem(td); ke->ke_state = KES_THREAD; td2 = kg->kg_last_assigned; KASSERT((td2 != NULL), ("last assigned has wrong value")); if (td2 == td) kg->kg_last_assigned = td3; /* slot_fill(kg); */ /* will replace it with another */ } }
static unsigned int cxgbei_task_reserve_ttt(struct icl_conn *ic, void **prv, union ctl_io *io, unsigned int *ttt) { struct icl_cxgbei_conn *icc = ic_to_icc(ic); struct toepcb *toep = icc->toep; struct adapter *sc = td_adapter(toep->td); struct cxgbei_data *ci = sc->iscsi_ulp_softc; struct cxgbei_task_data *tdata = NULL; int xferlen, err = -1; struct cxgbei_sgl *sge = NULL; MPASS(icc->icc_signature == CXGBEI_CONN_SIGNATURE); xferlen = (io->scsiio.kern_data_len - io->scsiio.ext_data_filled); tdata = (struct cxgbei_task_data *)*prv; if ((xferlen == 0) || (tdata == NULL)) goto out; if (xferlen < DDP_THRESHOLD) goto out; tdata->nsge = cxgbei_map_sg_tgt(tdata->sgl, io); if (tdata->nsge == 0) { CTR1(KTR_CXGBE, "%s: map_sg failed", __func__); return 0; } sge = tdata->sgl; tdata->sc_ddp_tag = *ttt; if (cxgbei_ulp2_sw_tag_usable(&ci->tag_format, tdata->sc_ddp_tag)) { err = t4_sk_ddp_tag_reserve(ci, icc, xferlen, sge, tdata->nsge, &tdata->sc_ddp_tag); } else { CTR2(KTR_CXGBE, "%s: sc_ddp_tag:0x%x not usable", __func__, tdata->sc_ddp_tag); } out: if (err < 0) tdata->sc_ddp_tag = cxgbei_ulp2_set_non_ddp_tag(&ci->tag_format, *ttt); return tdata->sc_ddp_tag; }
/* * Called by a CPU to restart stopped CPUs. * * Usually (but not necessarily) called with 'stopped_cpus' as its arg. * * - Signals all CPUs in map to restart. * - Waits for each to restart. * * Returns: * -1: error * 0: NA * 1: ok */ int restart_cpus(cpuset_t map) { #ifdef KTR char cpusetbuf[CPUSETBUFSIZ]; #endif if (!smp_started) return 0; CTR1(KTR_SMP, "restart_cpus(%s)", cpusetobj_strprint(cpusetbuf, &map)); /* signal other cpus to restart */ CPU_COPY_STORE_REL(&map, &started_cpus); /* wait for each to clear its bit */ while (CPU_OVERLAP(&stopped_cpus, &map)) cpu_spinwait(); return 1; }