static int mpcsa_usart_disable(struct at91usart_softc *dev) { struct mpcsa_usart_softc *sc = (struct mpcsa_usart_softc *)dev; if (sc->sc_tx_busy || sc->sc_rx_busy) { sc->sc_tx_busy = sc->sc_rx_busy = 0; comm_led(sc, 1); } switch (sc->sc_dev.sc_pid) { case PID_US3: at91pio_intr_disestablish(sc->sc_piob, PB_CTS4, sc->sc_cts_ih); at91pio_clear(sc->sc_pioa, PA_GSMON); ltsleep(sc, 0, "gsmoffd", (hz * 350 + 999) / 1000, NULL); at91pio_per(sc->sc_pioa, PA_TXD4, -1); at91pio_in(sc->sc_piob, PB_RTS4); at91pio_in(sc->sc_piod, PD_DTR4); at91pio_set(sc->sc_pioa, PA_GSMOFF); ltsleep(sc, 0, "gsmoff", hz * 4, NULL); at91pio_clear(sc->sc_pioa, PA_GSMOFF); break; } conn_led(sc, 0); return 0; }
static int mpcsa_usart_enable(struct at91usart_softc *dev) { struct mpcsa_usart_softc *sc = (struct mpcsa_usart_softc *)dev; conn_led(sc, 1); switch (sc->sc_dev.sc_pid) { case PID_US3: /* turn gsm on */ at91pio_clear(sc->sc_pioa, PA_GSMOFF); ltsleep(sc, 0, "gsmond", 4 * hz, NULL); at91pio_set(sc->sc_pioa, PA_GSMON); ltsleep(sc, 0, "gsmon", 2 * hz, NULL); at91pio_clear(sc->sc_pioa, PA_GSMON); /* then attach pins to devices etc */ at91pio_per(sc->sc_pioa, PA_TXD4, 1); at91pio_clear(sc->sc_piob, PB_RTS4); at91pio_clear(sc->sc_piod, PD_DTR4); at91pio_out(sc->sc_piob, PB_RTS4); at91pio_out(sc->sc_piod, PD_DTR4); /* catch CTS interrupt */ sc->sc_cts_ih = at91pio_intr_establish(sc->sc_piob, PB_CTS4, IPL_TTY, mpcsa_gsm_cts_intr, sc); break; } return 0; }
static void rf_ShutdownEngine(void *arg) { RF_Raid_t *raidPtr; int ks; raidPtr = (RF_Raid_t *) arg; /* Tell the rf_RaidIOThread to shutdown */ simple_lock(&(raidPtr->iodone_lock)); raidPtr->shutdown_raidio = 1; wakeup(&(raidPtr->iodone)); /* ...and wait for it to tell us it has finished */ while (raidPtr->shutdown_raidio) ltsleep(&(raidPtr->shutdown_raidio), PRIBIO, "raidshutdown", 0, &(raidPtr->iodone_lock)); simple_unlock(&(raidPtr->iodone_lock)); /* Now shut down the DAG execution engine. */ DO_LOCK(raidPtr); raidPtr->shutdown_engine = 1; DO_SIGNAL(raidPtr); DO_UNLOCK(raidPtr); }
static void rf_RaidIOThread(RF_ThreadArg_t arg) { RF_Raid_t *raidPtr; RF_DiskQueueData_t *req; int s; raidPtr = (RF_Raid_t *) arg; s = splbio(); simple_lock(&(raidPtr->iodone_lock)); while (!raidPtr->shutdown_raidio) { /* if there is nothing to do, then snooze. */ if (TAILQ_EMPTY(&(raidPtr->iodone)) && rf_buf_queue_check(raidPtr->raidid)) { ltsleep(&(raidPtr->iodone), PRIBIO, "raidiow", 0, &(raidPtr->iodone_lock)); } /* Check for deferred parity-map-related work. */ if (raidPtr->parity_map != NULL) { simple_unlock(&(raidPtr->iodone_lock)); rf_paritymap_checkwork(raidPtr->parity_map); simple_lock(&(raidPtr->iodone_lock)); } /* See what I/Os, if any, have arrived */ while ((req = TAILQ_FIRST(&(raidPtr->iodone))) != NULL) { TAILQ_REMOVE(&(raidPtr->iodone), req, iodone_entries); simple_unlock(&(raidPtr->iodone_lock)); rf_DiskIOComplete(req->queue, req, req->error); (req->CompleteFunc) (req->argument, req->error); simple_lock(&(raidPtr->iodone_lock)); } /* process any pending outgoing IO */ simple_unlock(&(raidPtr->iodone_lock)); raidstart(raidPtr); simple_lock(&(raidPtr->iodone_lock)); } /* Let rf_ShutdownEngine know that we're done... */ raidPtr->shutdown_raidio = 0; wakeup(&(raidPtr->shutdown_raidio)); simple_unlock(&(raidPtr->iodone_lock)); splx(s); kthread_exit(0); }
void spi_wait(struct spi_transfer *st) { int s; s = splserial(); simple_lock(&st->st_lock); while (!st->st_flags & SPI_F_DONE) { ltsleep(st, PWAIT, "spi_wait", 0, &st->st_lock); } simple_unlock(&st->st_lock); splx(s); }
/* deletes an entry from the ps status table after reconstruction has completed */ void rf_RemoveFromActiveReconTable(RF_Raid_t *raidPtr, RF_StripeNum_t psid, RF_ReconUnitNum_t which_ru) { RF_PSStatusHeader_t *hdr = &(raidPtr->reconControl->pssTable[RF_HASH_PSID(raidPtr, psid)]); RF_ReconParityStripeStatus_t *p, *pt; RF_CallbackDesc_t *cb, *cb1; RF_LOCK_MUTEX(hdr->mutex); while(hdr->lock) { ltsleep(&hdr->lock, PRIBIO, "rf_racrecon", 0, &hdr->mutex); } hdr->lock = 1; RF_UNLOCK_MUTEX(hdr->mutex); for (pt = NULL, p = hdr->chain; p; pt = p, p = p->next) { if ((p->parityStripeID == psid) && (p->which_ru == which_ru)) break; } if (p == NULL) { rf_PrintPSStatusTable(raidPtr); } RF_ASSERT(p); /* it must be there */ Dprintf2("PSS: deleting pss for psid %ld ru %d\n", psid, which_ru); /* delete this entry from the hash chain */ if (pt) pt->next = p->next; else hdr->chain = p->next; p->next = NULL; RF_LOCK_MUTEX(hdr->mutex); hdr->lock = 0; RF_UNLOCK_MUTEX(hdr->mutex); /* wakup anyone waiting on the parity stripe ID */ cb = p->procWaitList; p->procWaitList = NULL; while (cb) { Dprintf1("Waking up access waiting on parity stripe ID %ld\n", p->parityStripeID); cb1 = cb->next; (cb->callbackFunc) (cb->callbackArg); rf_FreeCallbackDesc(cb); cb = cb1; } rf_FreePSStatus(raidPtr, p); }
/* * dmio_write: * * Write file op. */ static int dmio_write(struct file *fp, off_t *offp, struct uio *uio, kauth_cred_t cred, int flags) { struct dmio_state *ds = (struct dmio_state *) fp->f_data; struct dmio_usrreq_state *dus; struct dmover_request *dreq; struct dmio_usrreq req; int error = 0, s, progress = 0; if ((uio->uio_resid % sizeof(req)) != 0) return (EINVAL); if (ds->ds_session == NULL) return (ENXIO); s = splsoftclock(); simple_lock(&ds->ds_slock); while (uio->uio_resid != 0) { if (ds->ds_nreqs == DMIO_NREQS_MAX) { if (fp->f_flag & FNONBLOCK) { error = progress ? 0 : EWOULDBLOCK; break; } ds->ds_flags |= DMIO_STATE_WRITE_WAIT; error = ltsleep(&ds->ds_nreqs, PRIBIO | PCATCH, "dmiowr", 0, &ds->ds_slock); if (error) break; continue; } ds->ds_nreqs++; simple_unlock(&ds->ds_slock); splx(s); progress = 1; error = uiomove(&req, sizeof(req), uio); if (error) { s = splsoftclock(); simple_lock(&ds->ds_slock); ds->ds_nreqs--; break; } /* XXX How should this interact with FNONBLOCK? */ dreq = dmover_request_alloc(ds->ds_session, NULL); if (dreq == NULL) { /* XXX */ s = splsoftclock(); simple_lock(&ds->ds_slock); ds->ds_nreqs--; error = ENOMEM; break; } s = splsoftclock(); dus = pool_get(&dmio_usrreq_state_pool, PR_WAITOK); splx(s); error = dmio_usrreq_init(fp, dus, &req, dreq); if (error) { dmover_request_free(dreq); s = splsoftclock(); pool_put(&dmio_usrreq_state_pool, dus); simple_lock(&ds->ds_slock); break; } dreq->dreq_callback = dmio_usrreq_done; dreq->dreq_cookie = dus; dus->dus_req = dreq; dus->dus_id = req.req_id; s = splsoftclock(); simple_lock(&ds->ds_slock); TAILQ_INSERT_TAIL(&ds->ds_pending, dus, dus_q); simple_unlock(&ds->ds_slock); splx(s); dmover_process(dreq); s = splsoftclock(); simple_lock(&ds->ds_slock); } simple_unlock(&ds->ds_slock); splx(s); return (error); }
/* * dmio_read: * * Read file op. */ static int dmio_read(struct file *fp, off_t *offp, struct uio *uio, kauth_cred_t cred, int flags) { struct dmio_state *ds = (struct dmio_state *) fp->f_data; struct dmio_usrreq_state *dus; struct dmover_request *dreq; struct dmio_usrresp resp; int s, error = 0, progress = 0; if ((uio->uio_resid % sizeof(resp)) != 0) return (EINVAL); if (ds->ds_session == NULL) return (ENXIO); s = splsoftclock(); simple_lock(&ds->ds_slock); while (uio->uio_resid != 0) { for (;;) { dus = TAILQ_FIRST(&ds->ds_complete); if (dus == NULL) { if (fp->f_flag & FNONBLOCK) { error = progress ? 0 : EWOULDBLOCK; goto out; } ds->ds_flags |= DMIO_STATE_READ_WAIT; error = ltsleep(&ds->ds_complete, PRIBIO | PCATCH, "dmvrrd", 0, &ds->ds_slock); if (error) goto out; continue; } /* Have a completed request. */ TAILQ_REMOVE(&ds->ds_complete, dus, dus_q); ds->ds_nreqs--; if (ds->ds_flags & DMIO_STATE_WRITE_WAIT) { ds->ds_flags &= ~DMIO_STATE_WRITE_WAIT; wakeup(&ds->ds_nreqs); } if (ds->ds_flags & DMIO_STATE_SEL) { ds->ds_flags &= ~DMIO_STATE_SEL; selnotify(&ds->ds_selq, POLLIN | POLLRDNORM, 0); } break; } simple_unlock(&ds->ds_slock); dreq = dus->dus_req; resp.resp_id = dus->dus_id; if (dreq->dreq_flags & DMOVER_REQ_ERROR) resp.resp_error = dreq->dreq_error; else { resp.resp_error = 0; memcpy(resp.resp_immediate, dreq->dreq_immediate, sizeof(resp.resp_immediate)); } dmio_usrreq_fini(ds, dus); splx(s); progress = 1; dmover_request_free(dreq); error = uiomove(&resp, sizeof(resp), uio); if (error) return (error); s = splsoftclock(); simple_lock(&ds->ds_slock); } out: simple_unlock(&ds->ds_slock); splx(s); return (error); }
RF_ReconEvent_t * rf_GetNextReconEvent(RF_RaidReconDesc_t *reconDesc) { RF_Raid_t *raidPtr = reconDesc->raidPtr; RF_ReconCtrl_t *rctrl = raidPtr->reconControl; RF_ReconEvent_t *event; int stall_count; RF_LOCK_MUTEX(rctrl->eq_mutex); /* q null and count==0 must be equivalent conditions */ RF_ASSERT((rctrl->eventQueue == NULL) == (rctrl->eq_count == 0)); /* mpsleep timeout value: secs = timo_val/hz. 'ticks' here is defined as cycle-counter ticks, not softclock ticks */ #define MAX_RECON_EXEC_USECS (100 * 1000) /* 100 ms */ #define RECON_DELAY_MS 25 #define RECON_TIMO ((RECON_DELAY_MS * hz) / 1000) /* we are not pre-emptible in the kernel, but we don't want to run * forever. If we run w/o blocking for more than MAX_RECON_EXEC_TICKS * ticks of the cycle counter, delay for RECON_DELAY before * continuing. this may murder us with context switches, so we may * need to increase both the MAX...TICKS and the RECON_DELAY_MS. */ if (reconDesc->reconExecTimerRunning) { int status; RF_ETIMER_STOP(reconDesc->recon_exec_timer); RF_ETIMER_EVAL(reconDesc->recon_exec_timer); reconDesc->reconExecTicks += RF_ETIMER_VAL_US(reconDesc->recon_exec_timer); if (reconDesc->reconExecTicks > reconDesc->maxReconExecTicks) reconDesc->maxReconExecTicks = reconDesc->reconExecTicks; if (reconDesc->reconExecTicks >= MAX_RECON_EXEC_USECS) { /* we've been running too long. delay for * RECON_DELAY_MS */ #if RF_RECON_STATS > 0 reconDesc->numReconExecDelays++; #endif /* RF_RECON_STATS > 0 */ status = ltsleep(&reconDesc->reconExecTicks, PRIBIO, "recon delay", RECON_TIMO, &rctrl->eq_mutex); RF_ASSERT(status == EWOULDBLOCK); reconDesc->reconExecTicks = 0; } } stall_count = 0; while (!rctrl->eventQueue) { #if RF_RECON_STATS > 0 reconDesc->numReconEventWaits++; #endif /* RF_RECON_STATS > 0 */ ltsleep(&(rctrl)->eventQueue, PRIBIO, "raidframe eventq", RF_EVENTQ_WAIT, &((rctrl)->eq_mutex)); stall_count++; if ((stall_count > 10) && rctrl->headSepCBList) { /* There is work to do on the callback list, and we've waited long enough... */ rf_WakeupHeadSepCBWaiters(raidPtr); stall_count = 0; } reconDesc->reconExecTicks = 0; /* we've just waited */ } reconDesc->reconExecTimerRunning = 1; if (RF_ETIMER_VAL_US(reconDesc->recon_exec_timer)!=0) { /* it moved!! reset the timer. */ RF_ETIMER_START(reconDesc->recon_exec_timer); } event = rctrl->eventQueue; rctrl->eventQueue = event->next; event->next = NULL; rctrl->eq_count--; /* q null and count==0 must be equivalent conditions */ RF_ASSERT((rctrl->eventQueue == NULL) == (rctrl->eq_count == 0)); RF_UNLOCK_MUTEX(rctrl->eq_mutex); return (event); }