static int
fms_intr(void *arg)
{
	struct fms_softc *sc = arg;
#if NMPU > 0
	struct mpu_softc *sc_mpu = device_private(sc->sc_mpu_dev);
#endif
	uint16_t istat;

	mutex_spin_enter(&sc->sc_intr_lock);

	istat = bus_space_read_2(sc->sc_iot, sc->sc_ioh, FM_INTSTATUS);

	if (istat & FM_INTSTATUS_PLAY) {
		if ((sc->sc_play_nextblk += sc->sc_play_blksize) >=
		     sc->sc_play_end)
			sc->sc_play_nextblk = sc->sc_play_start;

		bus_space_write_4(sc->sc_iot, sc->sc_ioh,
		    sc->sc_play_flip++ & 1 ?
		    FM_PLAY_DMABUF2 : FM_PLAY_DMABUF1, sc->sc_play_nextblk);

		if (sc->sc_pintr)
			sc->sc_pintr(sc->sc_parg);
		else
			printf("unexpected play intr\n");
	}

	if (istat & FM_INTSTATUS_REC) {
		if ((sc->sc_rec_nextblk += sc->sc_rec_blksize) >=
		     sc->sc_rec_end)
			sc->sc_rec_nextblk = sc->sc_rec_start;

		bus_space_write_4(sc->sc_iot, sc->sc_ioh,
		    sc->sc_rec_flip++ & 1 ?
		    FM_REC_DMABUF2 : FM_REC_DMABUF1, sc->sc_rec_nextblk);

		if (sc->sc_rintr)
			sc->sc_rintr(sc->sc_rarg);
		else
			printf("unexpected rec intr\n");
	}

#if NMPU > 0
	if (istat & FM_INTSTATUS_MPU)
		mpu_intr(sc_mpu);
#endif

	bus_space_write_2(sc->sc_iot, sc->sc_ioh, FM_INTSTATUS,
			  istat & (FM_INTSTATUS_PLAY | FM_INTSTATUS_REC));

	mutex_spin_exit(&sc->sc_intr_lock);

	return 1;
}
Exemple #2
0
/*
 * Filter detach method for EVFILT_READ on kqueue descriptor.
 */
static void
filt_kqdetach(struct knote *kn)
{
	struct kqueue *kq;

	kq = ((file_t *)kn->kn_obj)->f_data;

	mutex_spin_enter(&kq->kq_lock);
	SLIST_REMOVE(&kq->kq_sel.sel_klist, kn, knote, kn_selnext);
	mutex_spin_exit(&kq->kq_lock);
}
Exemple #3
0
static void
pcppi_bell_callout(void *arg)
{
	struct pcppi_softc *sc = arg;

	mutex_spin_enter(&tty_lock);
	if (sc->sc_timeout != 0) {
		pcppi_bell_stop(sc);
	}
	mutex_spin_exit(&tty_lock);
}
Exemple #4
0
static void
tap_kqdetach(struct knote *kn)
{
	struct tap_softc *sc = (struct tap_softc *)kn->kn_hook;

	KERNEL_LOCK(1, NULL);
	mutex_spin_enter(&sc->sc_kqlock);
	SLIST_REMOVE(&sc->sc_rsel.sel_klist, kn, knote, kn_selnext);
	mutex_spin_exit(&sc->sc_kqlock);
	KERNEL_UNLOCK_ONE(NULL);
}
static int
auich_open(void *addr, int flags)
{
	struct auich_softc *sc;

	sc = (struct auich_softc *)addr;
	mutex_spin_exit(&sc->sc_intr_lock);
	sc->codec_if->vtbl->lock(sc->codec_if);
	mutex_spin_enter(&sc->sc_intr_lock);
	return 0;
}
void
callout_ack(callout_t *cs)
{
	callout_impl_t *c = (callout_impl_t *)cs;
	kmutex_t *lock;

	KASSERT(c->c_magic == CALLOUT_MAGIC);

	lock = callout_lock(c);
	c->c_flags &= ~CALLOUT_INVOKING;
	mutex_spin_exit(lock);
}
void
rndsink_schedule(struct rndsink *rndsink)
{

	/* Optimistically check without the lock whether we're queued.  */
	if ((rndsink->rsink_state != RNDSINK_QUEUED) &&
	    (rndsink->rsink_state != RNDSINK_REQUEUED)) {
		mutex_spin_enter(&rndsinks_lock);
		rndsinks_enqueue(rndsink);
		mutex_spin_exit(&rndsinks_lock);
	}
}
RTDECL(void) RTSpinlockRelease(RTSPINLOCK Spinlock)
{
    PRTSPINLOCKINTERNAL pThis = (PRTSPINLOCKINTERNAL)Spinlock;
    AssertPtr(pThis);
    Assert(pThis->u32Magic == RTSPINLOCK_MAGIC);

    if (pThis->fFlags & RTSPINLOCK_FLAGS_INTERRUPT_SAFE) {
        mutex_spin_exit(&pThis->pSpinLock);
    } else {
        mutex_exit(&pThis->pSpinLock);
    }
}
/*
 * ntp_gettime() - NTP user application interface
 */
void
ntp_gettime(struct ntptimeval *ntv)
{

	mutex_spin_enter(&timecounter_lock);
	nanotime(&ntv->time);
	ntv->maxerror = time_maxerror;
	ntv->esterror = time_esterror;
	ntv->tai = time_tai;
	ntv->time_state = time_state;
	mutex_spin_exit(&timecounter_lock);
}
Exemple #10
0
static void
ingenic_rng_get(struct ingenic_rng_softc *sc)
{
	uint32_t data;

	mutex_spin_enter(&sc->sc_intr_lock);
	while (sc->sc_bytes_wanted) {
		bus_space_read_region_4(sc->sc_bst, sc->sc_bsh, 0, &data, 1);
#if 0
		device_printf(sc->sc_dev, "random output: %x\n", data);
#endif
		mutex_spin_exit(&sc->sc_intr_lock);
		mutex_spin_enter(&sc->sc_rnd_lock);
		rnd_add_data(&sc->sc_rndsource, &data, sizeof(data),
		    sizeof(data) * NBBY);
		mutex_spin_exit(&sc->sc_rnd_lock);
		mutex_spin_enter(&sc->sc_intr_lock);
		sc->sc_bytes_wanted -= MIN(sc->sc_bytes_wanted, sizeof(data));
	}
	explicit_memset(&data, 0, sizeof(data));
	mutex_spin_exit(&sc->sc_intr_lock);
}
static void
s3c2440_i2s_xfer_complete(dmac_xfer_t xfer, void *cookie)
{
	struct s3c2xx0_softc *sc = s3c2xx0_softc; /* Shortcut */
	s3c2440_i2s_buf_t buf = cookie;
	struct s3c2440_i2s_softc *i2s = buf->i2b_parent;

	bus_dmamap_unload(sc->sc_dmat, buf->i2b_dmamap);

	mutex_spin_enter(i2s->sc_intr_lock);
	(buf->i2b_cb)(buf->i2b_cb_cookie);
	mutex_spin_exit(i2s->sc_intr_lock);
}
Exemple #12
0
int
lwp_park(clockid_t clock_id, int flags, struct timespec *ts, const void *hint)
{
	sleepq_t *sq;
	kmutex_t *mp;
	wchan_t wchan;
	int timo, error;
	lwp_t *l;

	if (ts != NULL) {
		if ((error = ts2timo(clock_id, flags, ts, &timo, NULL)) != 0)
			return error;
		KASSERT(timo != 0);
	} else {
		timo = 0;
	}

	/* Find and lock the sleep queue. */
	l = curlwp;
	wchan = lwp_park_wchan(l->l_proc, hint);
	sq = sleeptab_lookup(&lwp_park_tab, wchan, &mp);

	/*
	 * Before going the full route and blocking, check to see if an
	 * unpark op is pending.
	 */
	lwp_lock(l);
	if ((l->l_flag & (LW_CANCELLED | LW_UNPARKED)) != 0) {
		l->l_flag &= ~(LW_CANCELLED | LW_UNPARKED);
		lwp_unlock(l);
		mutex_spin_exit(mp);
		return EALREADY;
	}
	lwp_unlock_to(l, mp);
	l->l_biglocks = 0;
	sleepq_enqueue(sq, wchan, "parked", &lwp_park_sobj);
	error = sleepq_block(timo, true);
	switch (error) {
	case EWOULDBLOCK:
		error = ETIMEDOUT;
		break;
	case ERESTART:
		error = EINTR;
		break;
	default:
		/* nothing */
		break;
	}
	return error;
}
void
callout_setfunc(callout_t *cs, void (*func)(void *), void *arg)
{
	callout_impl_t *c = (callout_impl_t *)cs;
	kmutex_t *lock;

	KASSERT(c->c_magic == CALLOUT_MAGIC);
	KASSERT(func != NULL);

	lock = callout_lock(c);
	c->c_func = func;
	c->c_arg = arg;
	mutex_spin_exit(lock);
}
bool
rndsink_request(struct rndsink *rndsink, void *buffer, size_t bytes)
{

	KASSERT(bytes == rndsink->rsink_bytes);

	mutex_spin_enter(&rndsinks_lock);
	const bool full_entropy = rndpool_extract(buffer, bytes);
	if (!full_entropy)
		rndsinks_enqueue(rndsink);
	mutex_spin_exit(&rndsinks_lock);

	return full_entropy;
}
bool
callout_active(callout_t *cs)
{
	callout_impl_t *c = (callout_impl_t *)cs;
	kmutex_t *lock;
	bool rv;

	KASSERT(c->c_magic == CALLOUT_MAGIC);

	lock = callout_lock(c);
	rv = ((c->c_flags & (CALLOUT_PENDING|CALLOUT_FIRED)) != 0);
	mutex_spin_exit(lock);

	return rv;
}
bool
callout_invoking(callout_t *cs)
{
	callout_impl_t *c = (callout_impl_t *)cs;
	kmutex_t *lock;
	bool rv;

	KASSERT(c->c_magic == CALLOUT_MAGIC);

	lock = callout_lock(c);
	rv = ((c->c_flags & CALLOUT_INVOKING) != 0);
	mutex_spin_exit(lock);

	return rv;
}
Exemple #17
0
static bool
auvia_resume(device_t dv, const pmf_qual_t *qual)
{
	struct auvia_softc *sc = device_private(dv);

	mutex_enter(&sc->sc_lock);
	mutex_spin_enter(&sc->sc_intr_lock);
	auvia_reset_codec(sc);
	DELAY(1000);
	mutex_spin_exit(&sc->sc_intr_lock);
	(sc->codec_if->vtbl->restore_ports)(sc->codec_if);
	mutex_exit(&sc->sc_lock);

	return true;
}
static inline kmutex_t *
callout_lock(callout_impl_t *c)
{
	struct callout_cpu *cc;
	kmutex_t *lock;

	for (;;) {
		cc = c->c_cpu;
		lock = cc->cc_lock;
		mutex_spin_enter(lock);
		if (__predict_true(cc == c->c_cpu))
			return lock;
		mutex_spin_exit(lock);
	}
}
Exemple #19
0
int
arch_phys_wc_add(unsigned long base, unsigned long size)
{
#if defined(MTRR)
	struct mtrr *mtrr;
	int n = 1;
	int id;
	int ret;

	mtrr = kmem_alloc(sizeof(*mtrr), KM_SLEEP);
	mtrr->base = base;
	mtrr->len = size;
	mtrr->type = MTRR_TYPE_WC;
	mtrr->flags = MTRR_VALID;

	/* XXX errno NetBSD->Linux */
	ret = -mtrr_set(mtrr, &n, NULL, MTRR_GETSET_KERNEL);
	if (ret) {
		KASSERT(n == 0);
		goto fail0;
	}
	KASSERT(n == 1);

	idr_preload(GFP_KERNEL);
	mutex_spin_enter(&linux_writecomb.lock);
	id = idr_alloc(&linux_writecomb.idr, mtrr, 0, 0, GFP_NOWAIT);
	mutex_spin_exit(&linux_writecomb.lock);
	idr_preload_end();
	if (id < 0)
		goto fail1;

	return id;

fail1:	KASSERT(id < 0);
	mtrr->type = 0;
	mtrr->flags = 0;
	/* XXX errno NetBSD->Linux */
	ret = -mtrr_set(mtrr, &n, NULL, MTRR_GETSET_KERNEL);
	KASSERT(ret == 0);
	KASSERT(n == 1);
	ret = id;
fail0:	KASSERT(ret < 0);
	kmem_free(mtrr, sizeof(*mtrr));
	return ret;
#else
	return -1;
#endif
}
/*
 * sysctl helper routine for kern.profiling subtree.  enables/disables
 * kernel profiling and gives out copies of the profiling data.
 */
static int
sysctl_kern_profiling(SYSCTLFN_ARGS)
{
	struct gmonparam *gp = &_gmonparam;
	int error;
	struct sysctlnode node;

	node = *rnode;

	switch (node.sysctl_num) {
	case GPROF_STATE:
		node.sysctl_data = &gp->state;
		break;
	case GPROF_COUNT:
		node.sysctl_data = gp->kcount;
		node.sysctl_size = gp->kcountsize;
		break;
	case GPROF_FROMS:
		node.sysctl_data = gp->froms;
		node.sysctl_size = gp->fromssize;
		break;
	case GPROF_TOS:
		node.sysctl_data = gp->tos;
		node.sysctl_size = gp->tossize;
		break;
	case GPROF_GMONPARAM:
		node.sysctl_data = gp;
		node.sysctl_size = sizeof(*gp);
		break;
	default:
		return (EOPNOTSUPP);
	}

	error = sysctl_lookup(SYSCTLFN_CALL(&node));
	if (error || newp == NULL)
		return (error);

	if (node.sysctl_num == GPROF_STATE) {
		mutex_spin_enter(&proc0.p_stmutex);
		if (gp->state == GMON_PROF_OFF)
			stopprofclock(&proc0);
		else
			startprofclock(&proc0);
		mutex_spin_exit(&proc0.p_stmutex);
	}

	return (0);
}
static void
tickle_tc(void)
{
	if (timer0_count && 
	    timecounter->tc_get_timecount == iomd_timecounter0_get) {
		mutex_spin_enter(&tmr_lock);
		if (timer0_ticked)
			timer0_ticked    = 0;
		else {
			timer0_offset   += timer0_count;
			timer0_lastcount = 0;
		}
		mutex_spin_exit(&tmr_lock);
	}

}
Exemple #22
0
static void
ingenic_rng_get_cb(size_t bytes_wanted, void *priv)
{
	struct ingenic_rng_softc * const sc = priv;

	mutex_spin_enter(&sc->sc_intr_lock);
	if (sc->sc_bytes_wanted == 0) {
		softint_schedule(sc->sc_sih);
	}
	if (bytes_wanted > (UINT_MAX - sc->sc_bytes_wanted)) {
		sc->sc_bytes_wanted = UINT_MAX;
	} else {
		sc->sc_bytes_wanted += bytes_wanted;
	}
	mutex_spin_exit(&sc->sc_intr_lock);
}
Exemple #23
0
int
wss_intr(void *addr)
{
	struct ad1848_isa_softc *sc;
	int handled;

	sc = addr;

	mutex_spin_enter(&sc->sc_ad1848.sc_intr_lock);

	handled = ad1848_isa_intr(sc);

	mutex_spin_exit(&sc->sc_ad1848.sc_intr_lock);

	return handled;
}
Exemple #24
0
static void
ingenic_rng_get(size_t bytes_wanted, void *priv)
{
	struct ingenic_rng_softc * const sc = priv;
	uint32_t data;

	mutex_spin_enter(&sc->sc_lock);
	while (bytes_wanted) {
		data = bus_space_read_4(sc->sc_bst, sc->sc_bsh, 0);
		delay(1);
		rnd_add_data_sync(&sc->sc_rndsource, &data, sizeof(data),
		    sizeof(data) * NBBY);
		bytes_wanted -= MIN(bytes_wanted, sizeof(data));
	}
	explicit_memset(&data, 0, sizeof(data));
	mutex_spin_exit(&sc->sc_lock);
}
Exemple #25
0
/*ARGSUSED*/
static int
filt_kqueue(struct knote *kn, long hint)
{
	struct kqueue *kq;
	int rv;

	kq = ((file_t *)kn->kn_obj)->f_data;

	if (hint != NOTE_SUBMIT)
		mutex_spin_enter(&kq->kq_lock);
	kn->kn_data = kq->kq_count;
	rv = (kn->kn_data > 0);
	if (hint != NOTE_SUBMIT)
		mutex_spin_exit(&kq->kq_lock);

	return rv;
}
/*
 * Fill the buffer with as much entropy as we can.  Return true if it
 * has full entropy and false if not.
 */
static bool
rndpool_extract(void *buffer, size_t bytes)
{
	const size_t extracted = rnd_extract_data(buffer, bytes,
	    RND_EXTRACT_GOOD);

	if (extracted < bytes) {
		(void)rnd_extract_data((uint8_t *)buffer + extracted,
		    bytes - extracted, RND_EXTRACT_ANY);
		mutex_spin_enter(&rndpool_mtx);
		rnd_getmore(bytes - extracted);
		mutex_spin_exit(&rndpool_mtx);
		return false;
	}

	return true;
}
Exemple #27
0
STATIC void
gtmpscshutdown(struct gtmpsc_softc *sc)
{
	struct tty *tp;

#ifdef KGDB
	if (sc->sc_flags & GTMPSCF_KGDB != 0)
		return;
#endif
	tp = sc->sc_tty;
	mutex_spin_enter(&sc->sc_lock);
	/* Fake carrier off */
	(void) (*tp->t_linesw->l_modem)(tp, 0);
	sdma_imask &= ~SDMA_INTR_RXBUF(sc->sc_unit);
	gt_sdma_imask(device_parent(sc->sc_dev), sdma_imask);
	mutex_spin_exit(&sc->sc_lock);
}
Exemple #28
0
void
audioamd_onopen(struct am7930_softc *sc)
{
	struct audioamd_softc *mdsc;

	mdsc = (struct audioamd_softc *)sc;

	/* reset pdma state */
	mutex_spin_enter(&mdsc->sc_lock);
	mdsc->sc_rintr = 0;
	mdsc->sc_rarg = 0;
	mdsc->sc_pintr = 0;
	mdsc->sc_parg = 0;
	mdsc->sc_au.au_rdata = 0;
	mdsc->sc_au.au_pdata = 0;
	mutex_spin_exit(&mdsc->sc_lock);
}
Exemple #29
0
STATIC int
gtmpscparam(struct tty *tp, struct termios *t)
{
	struct gtmpsc_softc *sc =
	    device_lookup_private(&gtmpsc_cd, GTMPSCUNIT(tp->t_dev));

	/* Check requested parameters. */
	if (compute_cdv(t->c_ospeed) < 0)
		return EINVAL;
	if (t->c_ispeed && t->c_ispeed != t->c_ospeed)
		return EINVAL;

	/*
	 * If there were no changes, don't do anything.  This avoids dropping
	 * input and improves performance when all we did was frob things like
	 * VMIN and VTIME.
	 */
	if (tp->t_ospeed == t->c_ospeed &&
	    tp->t_cflag == t->c_cflag)
		return 0;

	mutex_spin_enter(&sc->sc_lock);

	/* And copy to tty. */
	tp->t_ispeed = 0;
	tp->t_ospeed = t->c_ospeed;
	tp->t_cflag = t->c_cflag;

	sc->sc_baudrate = t->c_ospeed;

	if (!sc->sc_heldchange) {
		if (sc->sc_tx_busy) {
			sc->sc_heldtbc = sc->sc_tbc;
			sc->sc_tbc = 0;
			sc->sc_heldchange = 1;
		} else
			gtmpsc_loadchannelregs(sc);
	}

	mutex_spin_exit(&sc->sc_lock);

	/* Fake carrier on */
	(void) (*tp->t_linesw->l_modem)(tp, 1);

	return 0;
}
Exemple #30
0
static bool
yds_suspend(device_t dv, const pmf_qual_t *qual)
{
	struct yds_softc *sc = device_private(dv);
	pci_chipset_tag_t pc = sc->sc_pc;
	pcitag_t tag = sc->sc_pcitag;

	mutex_enter(&sc->sc_lock);
	mutex_spin_enter(&sc->sc_intr_lock);
	sc->sc_dsctrl = pci_conf_read(pc, tag, YDS_PCI_DSCTRL);
	sc->sc_legacy = pci_conf_read(pc, tag, YDS_PCI_LEGACY);
	sc->sc_ba[0] = pci_conf_read(pc, tag, YDS_PCI_FM_BA);
	sc->sc_ba[1] = pci_conf_read(pc, tag, YDS_PCI_MPU_BA);
	mutex_spin_exit(&sc->sc_intr_lock);
	mutex_exit(&sc->sc_lock);

	return true;
}