Exemplo n.º 1
0
int
dmawait(int chan)
{
	Ctlr *ctlr;
	u32int *r;
	int s;

	ctlr = &dma[chan];
	tsleep(&ctlr->r, dmadone, ctlr, 3000);
	ctlr->dmadone = 0;
	r = ctlr->regs;
	DBG dumpdregs("after sleep", r);
	s = r[Cs];
	if((s & (Active|End|Error)) != End){
		print("dma chan %d %s Cs %ux Debug %ux\n", chan,
			(s&End)? "error" : "timeout", s, r[Debug]);
		r[Cs] = Reset;
		r[Debug] = Clrerrors;
		return -1;
	}
	r[Cs] = Int|End;
	return 0;
}
Exemplo n.º 2
0
static int
cuda_todr_get(todr_chip_handle_t tch, volatile struct timeval *tvp)
{
	struct cuda_softc *sc = tch->cookie;
	int cnt = 0;
	uint8_t cmd[] = { CUDA_PSEUDO, CMD_READ_RTC};

	sc->sc_tod = 0;
	cuda_send(sc, 0, 2, cmd);

	while ((sc->sc_tod == 0) && (cnt < 10)) {
		tsleep(&sc->sc_todev, 0, "todr", 10);
		cnt++;
	}

	if (sc->sc_tod == 0)
		return EIO;

	tvp->tv_sec = sc->sc_tod - DIFF19041970;
	DPRINTF("tod: %ld\n", tvp->tv_sec);
	tvp->tv_usec = 0;
	return 0;
}
Exemplo n.º 3
0
static int
ti_iic_wait(struct ti_iic_softc *sc, uint16_t mask, uint16_t val, int flags)
{
	int retry = 10;
	uint16_t v;
	DPRINTF(("ti_iic_wait mask %#x val %#x flags %#x\n", mask, val, flags));

	while (((v = I2C_READ_REG(sc, AM335X_I2C_IRQSTATUS_RAW)) & mask) != val) {
		--retry;
		if (retry == 0) {
			printf("%s: wait timeout, mask=%#x val=%#x stat=%#x\n",
			    DEVNAME(sc), mask, val, v);
			return EBUSY;
		}
		if (flags & I2C_F_POLL)
			delay(50000);
		else
			tsleep(&sc->sc_dev, PWAIT, "tiiic", 50);
	}
	DPRINTF(("ti_iic_wait done retry %#x\n", retry));

	return 0;
}
Exemplo n.º 4
0
/*
 * (Try to) put the drive online. This is done the first time the
 * drive is opened, or if it har fallen offline.
 */
int
rx_putonline(struct rx_softc *rx)
{
	struct	mscp *mp;
	struct	mscp_softc *mi = device_private(device_parent(rx->ra_dev));

	rx->ra_state = DK_CLOSED;
	mp = mscp_getcp(mi, MSCP_WAIT);
	mp->mscp_opcode = M_OP_ONLINE;
	mp->mscp_unit = rx->ra_hwunit;
	mp->mscp_cmdref = 1;
	*mp->mscp_addr |= MSCP_OWN | MSCP_INT;

	/* Poll away */
	bus_space_read_2(mi->mi_iot, mi->mi_iph, 0);
	if (tsleep(&rx->ra_state, PRIBIO, "rxonline", 100*100))
		rx->ra_state = DK_CLOSED;

	if (rx->ra_state == DK_CLOSED)
		return MSCP_FAILED;

	return MSCP_DONE;
}
Exemplo n.º 5
0
/*
 * Wait for operations on the buffer to complete.
 * When they do, extract and return the I/O's error value.
 */
int
biowait(struct buf *bp)
{
	int s;

	KASSERT(!(bp->b_flags & B_ASYNC));

	s = splbio();
	while (!ISSET(bp->b_flags, B_DONE))
		tsleep(bp, PRIBIO + 1, "biowait", 0);
	splx(s);

	/* check for interruption of I/O (e.g. via NFS), then errors. */
	if (ISSET(bp->b_flags, B_EINTR)) {
		CLR(bp->b_flags, B_EINTR);
		return (EINTR);
	}

	if (ISSET(bp->b_flags, B_ERROR))
		return (bp->b_error ? bp->b_error : EIO);
	else
		return (0);
}
Exemplo n.º 6
0
static char*
flushq(Ctlr *ctlr, uint qid)
{
	TXQ *q;
	int i;

	q = &ctlr->tx[qid];
	qlock(q);
	for(i = 0; i < 200 && !ctlr->broken; i++){
		if(txqempty(q)){
			qunlock(q);
			return nil;
		}
		if(islo() && !waserror()){
			tsleep(q, txqempty, q, 10);
			poperror();
		}
	}
	qunlock(q);
	if(ctlr->broken)
		return "flushq: broken";
	return "flushq: timeout";
}
Exemplo n.º 7
0
void
randomdev_deinit(void)
{
	/* Deregister the randomness harvesting routine */
	randomdev_deinit_harvester();

	/*
	 * Command the hash/reseed thread to end and wait for it to finish
	 */
	random_kthread_control = -1;
	tsleep((void *)&random_kthread_control, 0, "term", 0);

#if defined(RANDOM_YARROW)
	random_yarrow_deinit_alg();
#endif
#if defined(RANDOM_FORTUNA)
	random_fortuna_deinit_alg();
#endif

#ifndef __OSV__
	sysctl_ctx_free(&random_clist);
#endif
}
Exemplo n.º 8
0
int
ata_suspend(device_t dev)
{
    struct ata_channel *ch;

    /* check for valid device */
    if (!dev || !(ch = device_get_softc(dev)))
	return ENXIO;

    /* wait for the channel to be IDLE or detached before suspending */
    while (ch->r_irq) {
	lockmgr(&ch->state_mtx, LK_EXCLUSIVE);
	if (ch->state == ATA_IDLE) {
	    ch->state = ATA_ACTIVE;
	    lockmgr(&ch->state_mtx, LK_RELEASE);
	    break;
	}
	lockmgr(&ch->state_mtx, LK_RELEASE);
	tsleep(ch, 0, "atasusp", hz/10);
    }
    ATA_LOCKING(dev, ATA_LF_UNLOCK);
    return 0;
}
Exemplo n.º 9
0
void
hammer_lock_ex_ident(struct hammer_lock *lock, const char *ident)
{
	thread_t td = curthread;
	u_int lv;
	u_int nlv;

	KKASSERT(lock->refs);
	for (;;) {
		lv = lock->lockval;

		if (lv == 0) {
			nlv = 1 | HAMMER_LOCKF_EXCLUSIVE;
			if (atomic_cmpset_int(&lock->lockval, lv, nlv)) {
				lock->lowner = td;
				break;
			}
		} else if ((lv & HAMMER_LOCKF_EXCLUSIVE) &&
			   lock->lowner == td) {
			nlv = (lv + 1);
			if (atomic_cmpset_int(&lock->lockval, lv, nlv))
				break;
		} else {
			if (hammer_debug_locks) {
				hdkprintf("held by %p\n", lock->lowner);
			}
			nlv = lv | HAMMER_LOCKF_WANTED;
			++hammer_contention_count;
			tsleep_interlock(&lock->lockval, 0);
			if (atomic_cmpset_int(&lock->lockval, lv, nlv)) {
				tsleep(&lock->lockval, PINTERLOCKED, ident, 0);
				if (hammer_debug_locks)
					hdkprintf("try again\n");
			}
		}
	}
}
Exemplo n.º 10
0
Arquivo: olpt.c Projeto: MarginC/kame
static	int
lptclose(dev_t dev, int flags, int fmt, struct thread *td)
{
	struct lpt_softc *sc;
#ifndef PC98
	int port;
#endif

	sc = devclass_get_softc(olpt_devclass, LPTUNIT(minor(dev)));
	if(sc->sc_flags & LP_BYPASS)
		goto end_close;

#ifndef PC98
	port = sc->sc_port;
#endif
	sc->sc_state &= ~OPEN;

#ifndef PC98
	/* if the last write was interrupted, don't complete it */
	if((!(sc->sc_state  & INTERRUPTED)) && (sc->sc_irq & LP_USE_IRQ))
		while ((inb(port+lpt_status) & (LPS_SEL|LPS_OUT|LPS_NBSY|LPS_NERR)) !=
			(LPS_SEL|LPS_NBSY|LPS_NERR) || sc->sc_xfercnt)
			/* wait 1/4 second, give up if we get a signal */
			if (tsleep ((caddr_t)sc, LPPRI|PCATCH,
				"lpclose", hz) != EWOULDBLOCK)
				break;

	outb(sc->sc_port+lpt_control, LPC_NINIT);
#endif
	brelse(sc->sc_inbuf);

end_close:
	sc->sc_state = 0;
	sc->sc_xfercnt = 0;
	lprintf(("closed.\n"));
	return(0);
}
Exemplo n.º 11
0
void
sbscn_shutdown(struct sbscn_channel *ch)
{
	struct tty *tp = ch->ch_tty;
	int s;

	s = splserial();

	/* If we were asserting flow control, then deassert it. */
	SET(ch->ch_rx_flags, RX_IBUF_BLOCKED);
	sbscn_dohwiflow(ch);

	/* Clear any break condition set with TIOCSBRK. */
	sbscn_break(ch, 0);

	/*
	 * Hang up if necessary.  Wait a bit, so the other side has time to
	 * notice even if we immediately open the port again.
	 * Avoid tsleeping above splhigh().
	 */
	if (ISSET(tp->t_cflag, HUPCL)) {
		sbscn_modem(ch, 0);
		splx(s);
		/* XXX tsleep will only timeout */
		(void) tsleep(ch, TTIPRI, ttclos, hz);
		s = splserial();
	}

	/* Turn off interrupts. */
#ifdef DDB
	if (ISSET(ch->ch_hwflags, SBSCN_HW_CONSOLE))
#if 0	/* DO NOT turn on break interrupt at this time. */
		ch->ch_imr = 0x04; /* interrupt on break */
#else
		ch->ch_imr = 0x00;
#endif
	else
Exemplo n.º 12
0
/* ARGSUSED */
int
raclose(dev_t dev, int flags, int fmt, struct lwp *l)
{
	struct ra_softc *ra = mscp_device_lookup(dev);
	int mask = (1 << DISKPART(dev));

	mutex_enter(&ra->ra_disk.dk_openlock);

	switch (fmt) {
	case S_IFCHR:
		ra->ra_disk.dk_copenmask &= ~mask;
		break;
	case S_IFBLK:
		ra->ra_disk.dk_bopenmask &= ~mask;
		break;
	}
	ra->ra_disk.dk_openmask =
	    ra->ra_disk.dk_copenmask | ra->ra_disk.dk_bopenmask;

	/*
	 * Should wait for I/O to complete on this partition even if
	 * others are open, but wait for work on blkflush().
	 */
#if notyet
	if (ra->ra_openpart == 0) {
		s = spluba();
		while (bufq_peek(udautab[unit]) != NULL)
			(void) tsleep(&udautab[unit], PZERO - 1,
			    "raclose", 0);
		splx(s);
		ra->ra_state = DK_CLOSED;
		ra->ra_wlabel = 0;
	}
#endif
	mutex_exit(&ra->ra_disk.dk_openlock);
	return (0);
}
Exemplo n.º 13
0
static void
vm_pagezero(void)
{
	struct thread *td;
	struct proc *p;
	struct rtprio rtp;
	int pages = 0;
	int pri;

	td = curthread;
	p = td->td_proc;
	rtp.prio = RTP_PRIO_MAX;
	rtp.type = RTP_PRIO_IDLE;
	mtx_lock_spin(&sched_lock);
	rtp_to_pri(&rtp, td->td_ksegrp);
	pri = td->td_priority;
	mtx_unlock_spin(&sched_lock);
	PROC_LOCK(p);
	p->p_flag |= P_NOLOAD;
	PROC_UNLOCK(p);

	for (;;) {
		if (vm_page_zero_check()) {
			pages += vm_page_zero_idle();
			if (pages > idlezero_maxrun || sched_runnable()) {
				mtx_lock_spin(&sched_lock);
				td->td_proc->p_stats->p_ru.ru_nvcsw++;
				mi_switch();
				mtx_unlock_spin(&sched_lock);
				pages = 0;
			}
		} else {
			tsleep(&zero_state, pri, "pgzero", hz * 300);
			pages = 0;
		}
	}
}
Exemplo n.º 14
0
/*
 * shutdown the pipe
 */
void
pipeclose(struct pipe *cpipe)
{
	struct pipe *ppipe;
	if (cpipe) {
		
		pipeselwakeup(cpipe);

		/*
		 * If the other side is blocked, wake it up saying that
		 * we want to close it down.
		 */
		cpipe->pipe_state |= PIPE_EOF;
		while (cpipe->pipe_busy) {
			wakeup(cpipe);
			cpipe->pipe_state |= PIPE_WANT;
			tsleep(cpipe, PRIBIO, "pipecl", 0);
		}

		/*
		 * Disconnect from peer
		 */
		if ((ppipe = cpipe->pipe_peer) != NULL) {
			pipeselwakeup(ppipe);

			ppipe->pipe_state |= PIPE_EOF;
			wakeup(ppipe);
			ppipe->pipe_peer = NULL;
		}

		/*
		 * free resources
		 */
		pipe_free_kmem(cpipe);
		pool_put(&pipe_pool, cpipe);
	}
}
Exemplo n.º 15
0
static void
udsir_thread(void *arg)
{
	struct udsir_softc *sc = arg;
	int error;

	DPRINTFN(20, ("%s: starting polling thread\n", __func__));

	while (!sc->sc_closing) {
		if (!sc->sc_rd_readinprogress && !UDSIR_BLOCK_RX_DATA(sc))
			udsir_periodic(sc);

		if (!sc->sc_closing) {
			error = tsleep(&sc->sc_thread, PWAIT, "udsir", hz / 10);
			if (error == EWOULDBLOCK &&
			    sc->sc_rd_expectdataticks > 0)
				/*
				 * After a timeout decrement the tick
				 * counter within which time we expect
				 * data to arrive if we are receiving
				 * data...
				 */
				sc->sc_rd_expectdataticks--;
		}
	}

	DPRINTFN(20, ("%s: exiting polling thread\n", __func__));

	sc->sc_thread = NULL;

	wakeup(&sc->sc_closing);

	if (--sc->sc_refcnt < 0)
		usb_detach_wakeupold(sc->sc_dev);

	kthread_exit(0);
}
Exemplo n.º 16
0
/* 
 * Emit tone of frequency thz for given number of centisecs 
 */
static void
tone(unsigned int thz, unsigned int centisecs)
{
	int sps, timo;

	if (thz <= 0)
		return;

#ifdef DEBUG
	(void) printf("tone: thz=%d centisecs=%d\n", thz, centisecs);
#endif /* DEBUG */

	/* set timer to generate clicks at given frequency in Hertz */
	sps = splclock();

	if (timer_spkr_acquire()) {
		/* enter list of waiting procs ??? */
		splx(sps);
		return;
	}
	splx(sps);
	disable_intr();
	timer_spkr_setfreq(thz);
	enable_intr();

	/*
	 * Set timeout to endtone function, then give up the timeslice.
	 * This is so other processes can execute while the tone is being
	 * emitted.
	 */
	timo = centisecs * hz / 100;
	if (timo > 0)
		tsleep(&endtone, SPKRPRI | PCATCH, "spkrtn", timo);
	sps = splclock();
	timer_spkr_release();
	splx(sps);
}
Exemplo n.º 17
0
/*
 * Wait for the PFS to sync past the specified TID
 */
int
hammer_ioc_wait_pseudofs(hammer_transaction_t trans, hammer_inode_t ip,
			 struct hammer_ioc_pseudofs_rw *pfs)
{
	hammer_pseudofs_inmem_t pfsm;
	struct hammer_pseudofs_data pfsd;
	uint32_t localization;
	hammer_tid_t tid;
	void *waitp;
	int error;

	if ((error = hammer_pfs_autodetect(pfs, ip)) != 0)
		return(error);
	localization = pfs_to_lo(pfs->pfs_id);

	if ((error = copyin(pfs->ondisk, &pfsd, sizeof(pfsd))) != 0)
		return(error);

	pfsm = hammer_load_pseudofs(trans, localization, &error);
	if (error == 0) {
		if (hammer_is_pfs_slave(&pfsm->pfsd)) {
			tid = pfsm->pfsd.sync_end_tid;
			waitp = &pfsm->pfsd.sync_end_tid;
		} else {
			tid = trans->hmp->flush_tid1;
			waitp = &trans->hmp->flush_tid1;
		}
		if (tid <= pfsd.sync_end_tid)
			tsleep(waitp, PCATCH, "hmrmwt", 0);
	}
	hammer_rel_pseudofs(trans->hmp, pfsm);
	if (error == EINTR) {
		pfs->head.flags |= HAMMER_IOC_HEAD_INTR;
		error = 0;
	}
	return(error);
}
Exemplo n.º 18
0
/*
 * Used to grab the process and keep it in the kernel to service
 * memory filesystem I/O requests.
 *
 * Loop servicing I/O requests.
 * Copy the requested data into or out of the memory filesystem
 * address space.
 */
int
mfs_start(struct mount *mp, int flags, struct proc *p)
{
	struct vnode *vp = VFSTOUFS(mp)->um_devvp;
	struct mfsnode *mfsp = VTOMFS(vp);
	struct buf *bp;
	int sleepreturn = 0;

	while (1) {
		while (1) {
			bp = bufq_dequeue(&mfsp->mfs_bufq);
			if (bp == NULL || mfsp->mfs_dying) {
				break;
			}
			mfs_doio(mfsp, bp);
			wakeup((caddr_t)bp);
		}
		if (mfsp->mfs_dying)
			break;
		/*
		 * If a non-ignored signal is received, try to unmount.
		 * If that fails, clear the signal (it has been "processed"),
		 * otherwise we will loop here, as tsleep will always return
		 * EINTR/ERESTART.
		 */
		if (sleepreturn != 0) {
			if (vfs_busy(mp, VB_WRITE|VB_NOWAIT) ||
			    dounmount(mp,
			    (CURSIG(p) == SIGKILL) ? MNT_FORCE : 0, p, NULL))
				CLRSIG(p, CURSIG(p));
			sleepreturn = 0;
			continue;
		}
		sleepreturn = tsleep((caddr_t)vp, mfs_pri, "mfsidl", 0);
	}
	return (0);
}
Exemplo n.º 19
0
static usbd_status
open_out_jack(struct umidi_jack *jack, void *arg, void (*intr)(void *))
{
	struct umidi_endpoint *ep = jack->endpoint;
	umidi_packet_bufp end;
	int s;
	int err;

	if (jack->opened)
		return USBD_IN_USE;

	jack->arg = arg;
	jack->u.out.intr = intr;
	jack->midiman_ppkt = NULL;
	end = ep->buffer + ep->buffer_size / sizeof *ep->buffer;
	s = splusb();
	jack->opened = 1;
	ep->num_open++;
	/*
	 * out_solicit maintains an invariant that there will always be
	 * (num_open - num_scheduled) slots free in the buffer. as we have
	 * just incremented num_open, the buffer may be too full to satisfy
	 * the invariant until a transfer completes, for which we must wait.
	 */
	while ( end - ep->next_slot < ep->num_open - ep->num_scheduled ) {
		err = tsleep(ep, PWAIT|PCATCH, "umi op", mstohz(10));
		if ( err ) {
			ep->num_open--;
			jack->opened = 0;
			splx(s);
			return USBD_IOERROR;
		}
	}
	splx(s);

	return USBD_NORMAL_COMPLETION;
}
Exemplo n.º 20
0
vaddr_t
uvm_km_valloc_prefer_wait(struct vm_map *map, vsize_t size, voff_t prefer)
{
	vaddr_t kva;
	UVMHIST_FUNC("uvm_km_valloc_prefer_wait"); UVMHIST_CALLED(maphist);

	UVMHIST_LOG(maphist, "(map=%p, size=0x%lx)", map, size, 0,0);
	KASSERT(vm_map_pmap(map) == pmap_kernel());

	size = round_page(size);
	if (size > vm_map_max(map) - vm_map_min(map))
		return(0);

	while (1) {
		kva = vm_map_min(map);		/* hint */

		/*
		 * allocate some virtual space.   will be demand filled
		 * by kernel_object.
		 */

		if (__predict_true(uvm_map(map, &kva, size, uvm.kernel_object,
		    prefer, 0, UVM_MAPFLAG(UVM_PROT_ALL,
		    UVM_PROT_ALL, UVM_INH_NONE, UVM_ADV_RANDOM, 0)) == 0)) {
			UVMHIST_LOG(maphist,"<- done (kva=0x%lx)", kva,0,0,0);
			return(kva);
		}

		/*
		 * failed.  sleep for a while (on map)
		 */

		UVMHIST_LOG(maphist,"<<<sleeping>>>",0,0,0,0);
		tsleep((caddr_t)map, PVM, "vallocwait", 0);
	}
	/*NOTREACHED*/
}
Exemplo n.º 21
0
/*
 * (Try to) put the drive online. This is done the first time the
 * drive is opened, or if it has fallen offline.
 */
int
mt_putonline(struct mt_softc *mt)
{
	struct	mscp *mp;
	struct	mscp_softc *mi =
	    device_private(device_parent(mt->mt_dev));

	((volatile struct mt_softc *) mt)->mt_state = MT_OFFLINE;
	mp = mscp_getcp(mi, MSCP_WAIT);
	mp->mscp_opcode = M_OP_ONLINE;
	mp->mscp_unit = mt->mt_hwunit;
	mp->mscp_cmdref = (long)&mt->mt_state;
	*mp->mscp_addr |= MSCP_OWN | MSCP_INT;

	/* Poll away */
	bus_space_read_2(mi->mi_iot, mi->mi_iph, 0);
	if (tsleep(&mt->mt_state, PRIBIO, "mtonline", 240 * hz))
		return MSCP_FAILED;

	if ((volatile int)mt->mt_state != MT_ONLINE)
		return MSCP_FAILED;

	return MSCP_DONE;
}
Exemplo n.º 22
0
static void
pccard_kthread(void *arg)
{
	struct pccard_softc *self = arg;
	struct pccard_slot *slot = &self->devs[0];

	for (;;) {
		int s = spl2();

		if (slot->flags & SLOT_NEW_CARD_EVENT) {
			slot->flags &= ~SLOT_NEW_CARD_EVENT;
			gayle.intreq = 0xc0;

			/* reset the registers */
			gayle.intreq = GAYLE_INT_IDE | GAYLE_INT_DETECT;
			gayle.pcc_status = GAYLE_CCMEM_WP | GAYLE_CCIO_SPKR;
			gayle.pcc_config = 0;
			pccard_attach_slot(&self->devs[0]);
		}
		splx(s);

		tsleep(slot, PWAIT, "pccthread", hz);
	}
}
Exemplo n.º 23
0
/*
 * cztty_shutdown:
 *
 *	Shut down a port.
 */
static void
cztty_shutdown(struct cztty_softc *sc)
{
	struct cz_softc *cz = CZTTY_CZ(sc);
	struct tty *tp = sc->sc_tty;
	int s;

	s = spltty();

	/* Clear any break condition set with TIOCSBRK. */
	cztty_break(sc, 0);

	/*
	 * Hang up if necessary.  Wait a bit, so the other side has time to
	 * notice even if we immediately open the port again.
	 */
	if (ISSET(tp->t_cflag, HUPCL)) {
		cztty_modem(sc, 0);
		(void) tsleep(tp, TTIPRI, ttclos, hz);
	}

	/* Disable the channel. */
	cz_wait_pci_doorbell(cz, "czdis");
	CZTTY_CHAN_WRITE(sc, CHNCTL_OP_MODE, C_CH_DISABLE);
	CZ_FWCTL_WRITE(cz, BRDCTL_HCMD_CHANNEL, sc->sc_channel);
	CZ_PLX_WRITE(cz, PLX_PCI_LOCAL_DOORBELL, C_CM_IOCTL);

	if ((--cz->cz_nopenchan == 0) && (cz->cz_ih == NULL)) {
#ifdef CZ_DEBUG
		printf("%s: Disabling polling\n", device_xname(cz->cz_dev));
#endif
		callout_stop(&cz->cz_callout);
	}

	splx(s);
}
Exemplo n.º 24
0
/*
 * Wait specified idle threads to switch once.  This ensures that even
 * preempted threads have cycled through the switch function once,
 * exiting their codepaths.  This allows us to change global pointers
 * with no other synchronization.
 */
int
quiesce_cpus(cpuset_t map, const char *wmesg, int prio)
{
	struct pcpu *pcpu;
	u_int gen[MAXCPU];
	int error;
	int cpu;

	error = 0;
	for (cpu = 0; cpu <= mp_maxid; cpu++) {
		if (!CPU_ISSET(cpu, &map) || CPU_ABSENT(cpu))
			continue;
		pcpu = pcpu_find(cpu);
		gen[cpu] = pcpu->pc_idlethread->td_generation;
	}
	for (cpu = 0; cpu <= mp_maxid; cpu++) {
		if (!CPU_ISSET(cpu, &map) || CPU_ABSENT(cpu))
			continue;
		pcpu = pcpu_find(cpu);
		thread_lock(curthread);
		sched_bind(curthread, cpu);
		thread_unlock(curthread);
		while (gen[cpu] == pcpu->pc_idlethread->td_generation) {
			error = tsleep(quiesce_cpus, prio, wmesg, 1);
			if (error != EWOULDBLOCK)
				goto out;
			error = 0;
		}
	}
out:
	thread_lock(curthread);
	sched_unbind(curthread);
	thread_unlock(curthread);

	return (error);
}
Exemplo n.º 25
0
struct nfsrvcache *
nfsrv_lookupcache(struct nfsrv_descript *nd)
{
	struct nfsrvhash	*hash;
	struct nfsrvcache	*rp;

	hash = NFSRCHASH(nd->nd_retxid);
loop:
	LIST_FOREACH(rp, hash, rc_hash) {
		if (nd->nd_retxid == rp->rc_xid &&
		    nd->nd_procnum == rp->rc_proc &&
		    netaddr_match(NETFAMILY(rp), &rp->rc_haddr, nd->nd_nam)) {
			if ((rp->rc_flag & RC_LOCKED)) {
				rp->rc_flag |= RC_WANTED;
				tsleep(rp, PZERO - 1, "nfsrc", 0);
				goto loop;
			}
			rp->rc_flag |= RC_LOCKED;
			return (rp);
		}
	}

	return (NULL);
}
Exemplo n.º 26
0
static void
close_out_jack(struct umidi_jack *jack)
{
	struct umidi_endpoint *ep;
	int s;
	u_int16_t mask;
	int err;

	if (jack->opened) {
		ep = jack->endpoint;
		mask = 1 << (jack->cable_number);
		s = splusb();
		while ( mask & (ep->this_schedule | ep->next_schedule) ) {
			err = tsleep(ep, PWAIT|PCATCH, "umi dr", mstohz(10));
			if ( err )
				break;
		}
		jack->opened = 0;
		jack->endpoint->num_open--;
		ep->this_schedule &= ~mask;
		ep->next_schedule &= ~mask;
		splx(s);
	}
}
Exemplo n.º 27
0
/*
 * Obtain a shared lock
 *
 * We do not give pending exclusive locks priority over shared locks as
 * doing so could lead to a deadlock.
 */
void
hammer_lock_sh(struct hammer_lock *lock)
{
	thread_t td = curthread;
	u_int lv;
	u_int nlv;
	const char *ident = "hmrlck";

	KKASSERT(lock->refs);
	for (;;) {
		lv = lock->lockval;

		if ((lv & HAMMER_LOCKF_EXCLUSIVE) == 0) {
			nlv = (lv + 1);
			if (atomic_cmpset_int(&lock->lockval, lv, nlv))
				break;
		} else if (lock->lowner == td) {
			/*
			 * Disallowed case, drop into kernel debugger for
			 * now.  A cont continues w/ an exclusive lock.
			 */
			nlv = (lv + 1);
			if (atomic_cmpset_int(&lock->lockval, lv, nlv)) {
				if (hammer_debug_critical)
					Debugger("hammer_lock_sh: holding ex");
				break;
			}
		} else {
			nlv = lv | HAMMER_LOCKF_WANTED;
			++hammer_contention_count;
			tsleep_interlock(&lock->lockval, 0);
			if (atomic_cmpset_int(&lock->lockval, lv, nlv))
				tsleep(&lock->lockval, PINTERLOCKED, ident, 0);
		}
	}
}
Exemplo n.º 28
0
/* Finish transaction. */
int
tpm_legacy_end(struct tpm_softc *sc, int flag, int rv)
{
	struct timeval tv;
	uint8_t r;
	int to;

	if (rv || flag == UIO_READ)
		bus_space_write_1(sc->sc_batm, sc->sc_bahm, 1, TPM_LEGACY_ABRT);
	else {
		tv.tv_sec = TPM_LEGACY_TMO;
		tv.tv_usec = 0;
		to = tvtohz(&tv) / TPM_LEGACY_SLEEP;
		while(((r = bus_space_read_1(sc->sc_batm, sc->sc_bahm, 1)) &
		    TPM_LEGACY_BUSY) && to--) {
			rv = tsleep(sc, PRIBIO | PCATCH, "legacy_tpm_end",
			    TPM_LEGACY_SLEEP);
			if (rv && rv != EWOULDBLOCK)
				return rv;
		}

#if defined(TPM_DEBUG) && !defined(__FreeBSD__)
		char buf[128];
		snprintb(buf, sizeof(buf), TPM_LEGACY_BITS, r);
		aprint_debug_dev(sc->sc_dev, "%s: bits %s\n",
		    device_xname(sc->sc_dev), buf);
#endif
		if (r & TPM_LEGACY_BUSY)
			return EIO;

		if (r & TPM_LEGACY_RE)
			return EIO;	/* XXX Retry the loop? */
	}

	return rv;
}
Exemplo n.º 29
0
/*
 * Get a socket structure from our zone, and initialize it.
 * 'waitok' has been implemented for eCos, with [currently] some
 * rather fixed strategy - it will retry some number of times (10)
 * after at most 2 minutes.  This seems sufficient for sockets which
 * are tied up in the TCP close process.
 */
struct socket *
soalloc(int waitok)
{
    struct socket *so = NULL;
    int maxtries = waitok ? 10 : 1;

    while (maxtries-- > 0) {
	so = zalloci(socket_zone);
	if (so) {
            /* XXX race condition for reentrant kernel */
            bzero(so, sizeof *so);
            so->so_gencnt = ++so_gencnt;
            so->so_zone = socket_zone;
            TAILQ_INIT(&so->so_aiojobq);
            return so;
	}
        if (waitok) {
            diag_printf("DEBUG: Out of sockets - waiting\n");
            tsleep(socket_zone, PVM|PCATCH, "soalloc", 120*100);
            diag_printf("DEBUG: ... retry sockets\n");
        }
    }
    return so;
}
Exemplo n.º 30
0
/*
 * Flush the print buffer that our top half uses to provide data to
 * our bottom, interrupt-driven half.
 */
static int
cpi_flush(struct cpi_softc *sc)
{
	int err, s;

	err = 0;
	while (0 < sc->sc_bufbytes) {
		/* Feed the printer a char, if it's ready */
		if ( !cpi_notready(sc)) {
			if (TRACE_WRITE)
				printf("\tcpi_flush() writes %u bytes "
				    "(%lu hard, %lu bytes to port)\n",
				    sc->sc_bufbytes, sc->sc_intcount,
				    sc->sc_bytestoport);
			s = spltty();
			cpi_intr(sc);
			splx(s);
		}
		/* XXX Sure we want to wait forever for the printer? */
		err = tsleep((void *)sc, PZERO | PCATCH,
		    "cpi_flush", (60 * hz));
	}
	return err;
}