Beispiel #1
0
static void
vstate_assert_change(vnode_t *vp, enum vnode_state from, enum vnode_state to,
    const char *func, int line)
{
	vnode_impl_t *node = VNODE_TO_VIMPL(vp);

	KASSERTMSG(mutex_owned(vp->v_interlock), "at %s:%d", func, line);
	if (from == VS_LOADING)
		KASSERTMSG(mutex_owned(&vcache.lock), "at %s:%d", func, line);

	if (from == VS_MARKER)
		vnpanic(vp, "from is %s at %s:%d",
		    vstate_name(from), func, line);
	if (to == VS_MARKER)
		vnpanic(vp, "to is %s at %s:%d",
		    vstate_name(to), func, line);
	if (node->vi_state != from)
		vnpanic(vp, "from is %s, expected %s at %s:%d\n",
		    vstate_name(node->vi_state), vstate_name(from), func, line);

	node->vi_state = to;
	if (from == VS_LOADING)
		cv_broadcast(&vcache.cv);
	if (to == VS_ACTIVE || to == VS_RECLAIMED)
		cv_broadcast(&vp->v_cv);
}
Beispiel #2
0
static void
xenevt_free(struct xenevt_d *d)
{
	int i;
	KASSERT(mutex_owned(&devevent_lock));
	KASSERT(mutex_owned(&d->lock));

	for (i = 0; i < NR_EVENT_CHANNELS; i++ ) {
		if (devevent[i] == d) {
			evtchn_op_t op = { .cmd = 0 };
			int error;

			hypervisor_mask_event(i);
			xen_atomic_clear_bit(&d->ci->ci_evtmask[0], i);
			devevent[i] = NULL;

			op.cmd = EVTCHNOP_close;
			op.u.close.port = i;
			if ((error = HYPERVISOR_event_channel_op(&op))) {
				printf("xenevt_fclose: error %d from "
				    "hypervisor\n", -error);
			}
		}
	}
	mutex_exit(&d->lock);
	seldestroy(&d->sel);
	cv_destroy(&d->cv);
	mutex_destroy(&d->lock);
	free(d, M_DEVBUF);
}
Beispiel #3
0
/*
 * Support functions.
 */
void
efe_init(efe_t *efep)
{
	uint32_t val;

	ASSERT(mutex_owned(&efep->efe_intrlock));
	ASSERT(mutex_owned(&efep->efe_txlock));

	efe_reset(efep);

	val = GENCTL_ONECOPY | GENCTL_RFT_128 | GENCTL_MRM;
#ifdef _BIG_ENDIAN
	val |= GENCTL_BE;
#endif	/* _BIG_ENDIAN */

	PUTCSR(efep, CSR_GENCTL, val);
	PUTCSR(efep, CSR_PBLCNT, BURSTLEN);

	efe_init_rx_ring(efep);
	efe_init_tx_ring(efep);

	efe_setaddr(efep, efep->efe_macaddr);

	if (efep->efe_promisc) {
		efe_setmchash(efep, efe_mchash_promisc);
	} else {
		efe_setmchash(efep, efep->efe_mchash);
	}
}
/*
 * Add an item to the syncer work queue.
 */
static void
vn_syncer_add1(struct vnode *vp, int delayx)
{
	synclist_t *slp;

	KASSERT(mutex_owned(&syncer_data_lock));

	if (vp->v_iflag & VI_ONWORKLST) {
		/*
		 * Remove in order to adjust the position of the vnode.
		 * Note: called from sched_sync(), which will not hold
		 * interlock, therefore we cannot modify v_iflag here.
		 */
		slp = &syncer_workitem_pending[vp->v_synclist_slot];
		TAILQ_REMOVE(slp, vp, v_synclist);
	} else {
		KASSERT(mutex_owned(vp->v_interlock));
		vp->v_iflag |= VI_ONWORKLST;
	}

	if (delayx > syncer_maxdelay - 2)
		delayx = syncer_maxdelay - 2;
	vp->v_synclist_slot = (syncer_delayno + delayx) % syncer_last;

	slp = &syncer_workitem_pending[vp->v_synclist_slot];
	TAILQ_INSERT_TAIL(slp, vp, v_synclist);
}
Beispiel #5
0
static void
pcn_stopall(pcn_t *pcnp)
{
	ASSERT(mutex_owned(&pcnp->pcn_intrlock));
	ASSERT(mutex_owned(&pcnp->pcn_xmtlock));

	pcn_stop_timer(pcnp);
	PCN_CSR_SETBIT(pcnp, PCN_CSR_CSR, PCN_CSR_STOP);
}
Beispiel #6
0
int
uvm_loanbreak_anon(struct vm_anon *anon, struct uvm_object *uobj)
{
	struct vm_page *pg;

	KASSERT(mutex_owned(anon->an_lock));
	KASSERT(uobj == NULL || mutex_owned(uobj->vmobjlock));

	/* get new un-owned replacement page */
	pg = uvm_pagealloc(NULL, 0, NULL, 0);
	if (pg == NULL) {
		return ENOMEM;
	}

	/* copy old -> new */
	uvm_pagecopy(anon->an_page, pg);

	/* force reload */
	pmap_page_protect(anon->an_page, VM_PROT_NONE);
	mutex_enter(&uvm_pageqlock);	  /* KILL loan */

	anon->an_page->uanon = NULL;
	/* in case we owned */
	anon->an_page->pqflags &= ~PQ_ANON;

	if (uobj) {
		/* if we were receiver of loan */
		anon->an_page->loan_count--;
	} else {
		/*
		 * we were the lender (A->K); need to remove the page from
		 * pageq's.
		 */
		uvm_pagedequeue(anon->an_page);
	}

	if (uobj) {
		mutex_exit(uobj->vmobjlock);
	}

	/* install new page in anon */
	anon->an_page = pg;
	pg->uanon = anon;
	pg->pqflags |= PQ_ANON;

	uvm_pageactivate(pg);
	mutex_exit(&uvm_pageqlock);

	pg->flags &= ~(PG_BUSY|PG_FAKE);
	UVM_PAGE_OWN(pg, NULL);

	/* done! */

	return 0;
}
Beispiel #7
0
void
efe_reset(efe_t *efep)
{
	ASSERT(mutex_owned(&efep->efe_intrlock));
	ASSERT(mutex_owned(&efep->efe_txlock));

	PUTCSR(efep, CSR_GENCTL, GENCTL_RESET);
	drv_usecwait(RESET_DELAY);

	/* Assert internal clock source (AN 7.15) */
	for (int i = 0; i < RESET_TEST_CYCLES; ++i) {
		PUTCSR(efep, CSR_TEST, TEST_CLOCK);
	}
}
Beispiel #8
0
/*
 * Update the disk quota in the quota file.
 */
int
lfs_dq1sync(struct vnode *vp, struct dquot *dq)
{
	struct vnode *dqvp;
	struct iovec aiov;
	struct uio auio;
	int error;

	if (dq == NODQUOT)
		panic("dq1sync: dquot");
	KASSERT(mutex_owned(&dq->dq_interlock));
	if ((dq->dq_flags & DQ_MOD) == 0)
		return (0);
	if ((dqvp = dq->dq_ump->um_quotas[dq->dq_type]) == NULLVP)
		panic("dq1sync: file");
	KASSERT(dqvp != vp);
	vn_lock(dqvp, LK_EXCLUSIVE | LK_RETRY);
	auio.uio_iov = &aiov;
	auio.uio_iovcnt = 1;
	aiov.iov_base = (void *)&dq->dq_un.dq1_dqb;
	aiov.iov_len = sizeof (struct dqblk);
	auio.uio_resid = sizeof (struct dqblk);
	auio.uio_offset = (off_t)(dq->dq_id * sizeof (struct dqblk));
	auio.uio_rw = UIO_WRITE;
	UIO_SETUP_SYSSPACE(&auio);
	error = VOP_WRITE(dqvp, &auio, 0, dq->dq_ump->um_cred[dq->dq_type]);
	if (auio.uio_resid && error == 0)
		error = EIO;
	dq->dq_flags &= ~DQ_MOD;
	VOP_UNLOCK(dqvp);
	return (error);
}
Beispiel #9
0
void
puffs_mp_reference(struct puffs_mount *pmp)
{

	KASSERT(mutex_owned(&pmp->pmp_lock));
	pmp->pmp_refcount++;
}
static boolean_t
rge_factotum_stall_check(rge_t *rgep)
{
	uint32_t dogval;

	ASSERT(mutex_owned(rgep->genlock));

	/*
	 * Specific check for Tx stall ...
	 *
	 * The 'watchdog' counter is incremented whenever a packet
	 * is queued, reset to 1 when some (but not all) buffers
	 * are reclaimed, reset to 0 (disabled) when all buffers
	 * are reclaimed, and shifted left here.  If it exceeds the
	 * threshold value, the chip is assumed to have stalled and
	 * is put into the ERROR state.  The factotum will then reset
	 * it on the next pass.
	 *
	 * All of which should ensure that we don't get into a state
	 * where packets are left pending indefinitely!
	 */
	if (rgep->resched_needed)
		(void) ddi_intr_trigger_softint(rgep->resched_hdl, NULL);
	dogval = rge_atomic_shl32(&rgep->watchdog, 1);
	if (dogval < rge_watchdog_count)
		return (B_FALSE);

	RGE_REPORT((rgep, "Tx stall detected, watchdog code 0x%x", dogval));
	return (B_TRUE);

}
Beispiel #11
0
static struct cac_ccb *
cac_l0_completed(struct cac_softc *sc)
{
	struct cac_ccb *ccb;
	paddr_t off;

	KASSERT(mutex_owned(&sc->sc_mutex));

	if ((off = cac_inl(sc, CAC_REG_DONE_FIFO)) == 0)
		return (NULL);

	if ((off & 3) != 0)
		aprint_error_dev(sc->sc_dev, "failed command list returned: %lx\n",
		    (long)off);

	off = (off & ~3) - sc->sc_ccbs_paddr;
	ccb = (struct cac_ccb *)((char *)sc->sc_ccbs + off);

	bus_dmamap_sync(sc->sc_dmat, sc->sc_dmamap, off, sizeof(struct cac_ccb),
	    BUS_DMASYNC_POSTWRITE | BUS_DMASYNC_POSTREAD);

	if ((off & 3) != 0 && ccb->ccb_req.error == 0)
		ccb->ccb_req.error = CAC_RET_CMD_REJECTED;

	return (ccb);
}
Beispiel #12
0
void
efe_init_tx_ring(efe_t *efep)
{
	efe_ring_t *rp;

	ASSERT(mutex_owned(&efep->efe_txlock));

	rp = efep->efe_tx_ring;

	for (int i = 0; i < DESCLEN(rp); ++i) {
		efe_desc_t *dp = GETDESC(rp, i);
		efe_buf_t *bp = GETBUF(rp, i);

		PUTDESC16(rp, &dp->d_status, 0);
		PUTDESC16(rp, &dp->d_len, 0);
		PUTDESC32(rp, &dp->d_bufaddr, BUFADDR(bp));
		PUTDESC16(rp, &dp->d_buflen, BUFLEN(bp));
		PUTDESC16(rp, &dp->d_control, 0);
		PUTDESC32(rp, &dp->d_next, NEXTDESCADDR(rp, i));

		SYNCDESC(rp, i, DDI_DMA_SYNC_FORDEV);
	}

	efep->efe_tx_desc = 0;
	efep->efe_tx_sent = 0;

	PUTCSR(efep, CSR_PTCDAR, DESCADDR(rp, 0));
}
Beispiel #13
0
/*
 * ehci_insert_isoc_req:
 *
 * Insert an isochronous request into the Host Controller's
 * isochronous list.
 */
int
ehci_insert_isoc_req(
	ehci_state_t			*ehcip,
	ehci_pipe_private_t		*pp,
	ehci_isoc_xwrapper_t		*itw,
	usb_flags_t			usb_flags)
{
	int			error;

	USB_DPRINTF_L4(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
	    "ehci_insert_isoc_req: flags = 0x%x port status = 0x%x",
	    usb_flags, itw->itw_port_status);

	ASSERT(mutex_owned(&ehcip->ehci_int_mutex));

	ASSERT(itw->itw_curr_xfer_reqp != NULL);
	ASSERT(itw->itw_curr_xfer_reqp->isoc_pkt_descr != NULL);

	/*
	 * Save address of first usb isochronous packet descriptor.
	 */
	itw->itw_curr_isoc_pktp = itw->itw_curr_xfer_reqp->isoc_pkt_descr;

	if (itw->itw_port_status == USBA_HIGH_SPEED_DEV) {
		error = USB_NOT_SUPPORTED;
	} else {
		error = ehci_insert_sitd_req(ehcip, pp, itw, usb_flags);
	}

	return (error);
}
/*
 * The interrupt flavor acquires spl and lock once and releases at the end,
 * as it expects to write only one byte or message. The interface convention
 * is that if hw_if->output returns 0, it has initiated transmission and the
 * completion interrupt WILL be forthcoming; if it has not returned 0, NO
 * interrupt will be forthcoming, and if it returns EINPROGRESS it wants
 * another byte right away.
 */
static int
midi_intr_out(struct midi_softc *sc)
{
	struct midi_buffer *mb;
	int error, msglen;
	MIDI_BUF_DECLARE(idx);
	MIDI_BUF_DECLARE(buf);
	int armed = 0;

	KASSERT(mutex_owned(sc->lock));

	error = 0;
	mb = &sc->outbuf;

	MIDI_BUF_CONSUMER_INIT(mb,idx);
	MIDI_BUF_CONSUMER_INIT(mb,buf);
	
	while (idx_cur != idx_lim) {
		if (sc->hw_if_ext) {
			error = midi_msg_out(sc, &idx_cur, &idx_lim,
			    &buf_cur, &buf_lim);
			if (!error ) /* no EINPROGRESS from extended hw_if */
				armed = 1;
			break;
		}
		/* or, lacking hw_if_ext ... */
		msglen = MB_IDX_LEN(*idx_cur);
		error = sc->hw_if->output(sc->hw_hdl, *buf_cur);
		if (error &&  error != EINPROGRESS)
			break;
		++ buf_cur;
		MIDI_BUF_WRAP(buf);
		-- msglen;
		if (msglen)
			*idx_cur = PACK_MB_IDX(MB_IDX_CAT(*idx_cur),msglen);
		else {
			++ idx_cur;
			MIDI_BUF_WRAP(idx);
		}
		if (!error) {
			armed = 1;
			break;
		}
	}
	MIDI_BUF_CONSUMER_WBACK(mb,idx);
	MIDI_BUF_CONSUMER_WBACK(mb,buf);
	if (!armed) {
		sc->pbus = 0;
		callout_schedule(&sc->xmt_asense_co, MIDI_XMT_ASENSE_PERIOD);
	}
	cv_broadcast(&sc->wchan);
	selnotify(&sc->wsel, 0, NOTE_SUBMIT);
	if (sc->async) {
		softint_schedule(sc->sih);
	}
	if (error) {
		DPRINTF(("midi_intr_output error %d\n", error));
	}
	return error;
}
Beispiel #15
0
static int
unionfs_getpages(void *v)
{
	struct vop_getpages_args /* {
		struct vnode *a_vp;
		voff_t a_offset;
		struct vm_page **a_m;
		int *a_count;
		int a_centeridx;
		vm_prot_t a_access_type;
		int a_advice;
		int a_flags;
	} */ *ap = v;
	struct vnode *vp = ap->a_vp, *tvp;
	struct unionfs_node *unp;

	KASSERT(mutex_owned(vp->v_interlock));

	unp = VTOUNIONFS(vp);
	tvp = (unp->un_uppervp != NULLVP ? unp->un_uppervp : unp->un_lowervp);
	KASSERT(tvp->v_interlock == vp->v_interlock);

	if (ap->a_flags & PGO_LOCKED) {
		return EBUSY;
	}
	return VOP_GETPAGES(tvp, ap->a_offset, ap->a_m, ap->a_count,
	    ap->a_centeridx, ap->a_access_type, ap->a_advice, ap->a_flags);
}
Beispiel #16
0
/*
 * chfs_scan_make_vnode_cache - makes a new vnode cache during scan
 * This function returns a vnode cache belonging to @vno.
 */
struct chfs_vnode_cache *
chfs_scan_make_vnode_cache(struct chfs_mount *chmp, ino_t vno)
{
	struct chfs_vnode_cache *vc;

	KASSERT(mutex_owned(&chmp->chm_lock_vnocache));

	/* vnode cache already exists */
	vc = chfs_vnode_cache_get(chmp, vno);
	if (vc) {
		return vc;
	}

	/* update max vnode number if needed */
	if (vno > chmp->chm_max_vno) {
		chmp->chm_max_vno = vno;
	}

	/* create new vnode cache */
	vc = chfs_vnode_cache_alloc(vno);

	chfs_vnode_cache_add(chmp, vc);

	if (vno == CHFS_ROOTINO) {
		vc->nlink = 2;
		vc->pvno = CHFS_ROOTINO;
		vc->state = VNO_STATE_CHECKEDABSENT;
	}

	return vc;
}
Beispiel #17
0
static void
pcn_startall(pcn_t *pcnp)
{
	ASSERT(mutex_owned(&pcnp->pcn_intrlock));
	ASSERT(mutex_owned(&pcnp->pcn_xmtlock));

	(void) pcn_initialize(pcnp, B_FALSE);

	/* Start chip and enable interrupts */
	PCN_CSR_SETBIT(pcnp, PCN_CSR_CSR, PCN_CSR_START|PCN_CSR_INTEN);

	pcn_start_timer(pcnp);

	if (IS_RUNNING(pcnp))
		mac_tx_update(pcnp->pcn_mh);
}
Beispiel #18
0
/*
 * amap_share_protect: change protection of anons in a shared amap
 *
 * for shared amaps, given the current data structure layout, it is
 * not possible for us to directly locate all maps referencing the
 * shared anon (to change the protection).  in order to protect data
 * in shared maps we use pmap_page_protect().  [this is useful for IPC
 * mechanisms like map entry passing that may want to write-protect
 * all mappings of a shared amap.]  we traverse am_anon or am_slots
 * depending on the current state of the amap.
 *
 * => entry's map and amap must be locked by the caller
 */
void
amap_share_protect(struct vm_map_entry *entry, vm_prot_t prot)
{
	struct vm_amap *amap = entry->aref.ar_amap;
	int slots, lcv, slot, stop;

	KASSERT(mutex_owned(&amap->am_l));

	AMAP_B2SLOT(slots, (entry->end - entry->start));
	stop = entry->aref.ar_pageoff + slots;

	if (slots < amap->am_nused) {
		/* cheaper to traverse am_anon */
		for (lcv = entry->aref.ar_pageoff ; lcv < stop ; lcv++) {
			if (amap->am_anon[lcv] == NULL)
				continue;
			if (amap->am_anon[lcv]->an_page != NULL)
				pmap_page_protect(amap->am_anon[lcv]->an_page,
						  prot);
		}
		return;
	}

	/* cheaper to traverse am_slots */
	for (lcv = 0 ; lcv < amap->am_nused ; lcv++) {
		slot = amap->am_slots[lcv];
		if (slot < entry->aref.ar_pageoff || slot >= stop)
			continue;
		if (amap->am_anon[slot]->an_page != NULL)
			pmap_page_protect(amap->am_anon[slot]->an_page, prot);
	}
}
Beispiel #19
0
void
_kernel_unlock(int nlocks, int *countp)
{

	if (!mutex_owned(&rump_giantlock)) {
		KASSERT(nlocks == 0);
		if (countp)
			*countp = 0;
		return;
	}

	if (countp)
		*countp = lockcnt;
	if (nlocks == 0)
		nlocks = lockcnt;
	if (nlocks == -1) {
		KASSERT(lockcnt == 1);
		nlocks = 1;
	}
	KASSERT(nlocks <= lockcnt);
	while (nlocks--) {
		lockcnt--;
		mutex_exit(&rump_giantlock);
	}
}
static int
acpicpu_cstate_latency(struct acpicpu_softc *sc)
{
	static const uint32_t cs_factor = 3;
	struct acpicpu_cstate *cs;
	int i;

	KASSERT(mutex_owned(&sc->sc_mtx) != 0);

	for (i = cs_state_max; i > 0; i--) {

		cs = &sc->sc_cstate[i];

		if (__predict_false(cs->cs_method == 0))
			continue;

		/*
		 * Choose a state if we have previously slept
		 * longer than the worst case latency of the
		 * state times an arbitrary multiplier.
		 */
		if (sc->sc_cstate_sleep > cs->cs_latency * cs_factor)
			return i;
	}

	return ACPI_STATE_C1;
}
Beispiel #21
0
void
semundo_clear(int semid, int semnum)
{
	struct sem_undo *suptr;
	struct sem_undo_entry *sunptr, *sunend;

	KASSERT(mutex_owned(&semlock));

	for (suptr = semu_list; suptr != NULL; suptr = suptr->un_next)
		for (sunptr = &suptr->un_ent[0],
		    sunend = sunptr + suptr->un_cnt; sunptr < sunend;) {
			if (sunptr->un_id == semid) {
				if (semnum == -1 || sunptr->un_num == semnum) {
					suptr->un_cnt--;
					sunend--;
					if (sunptr != sunend)
						*sunptr = *sunend;
					if (semnum != -1)
						break;
					else
						continue;
				}
			}
			sunptr++;
		}
}
static enum ioc_reply
rge_diag_ioctl(rge_t *rgep, int cmd, mblk_t *mp, struct iocblk *iocp)
{
	ASSERT(mutex_owned(rgep->genlock));

	switch (cmd) {
	default:
		/* NOTREACHED */
		rge_error(rgep, "rge_diag_ioctl: invalid cmd 0x%x", cmd);
		return (IOC_INVAL);

	case RGE_DIAG:
		/*
		 * Currently a no-op
		 */
		return (IOC_ACK);

	case RGE_PEEK:
	case RGE_POKE:
		return (rge_pp_ioctl(rgep, cmd, mp, iocp));

	case RGE_PHY_RESET:
		return (IOC_RESTART_ACK);

	case RGE_SOFT_RESET:
	case RGE_HARD_RESET:
		/*
		 * Reset and reinitialise the 570x hardware
		 */
		rge_restart(rgep);
		return (IOC_ACK);
	}

	/* NOTREACHED */
}
Beispiel #23
0
/*
 * ehci_wait_for_transfers_completion:
 *
 * Wait for processing all completed transfers and to send results
 * to upstream.
 */
static void
ehci_wait_for_isoc_completion(
	ehci_state_t		*ehcip,
	ehci_pipe_private_t	*pp)
{
	clock_t			xfer_cmpl_time_wait;

	ASSERT(mutex_owned(&ehcip->ehci_int_mutex));

	if (pp->pp_itw_head == NULL) {

		return;
	}

	/* Get the number of clock ticks to wait */
	xfer_cmpl_time_wait = drv_usectohz(EHCI_XFER_CMPL_TIMEWAIT * 1000000);

	(void) cv_timedwait(&pp->pp_xfer_cmpl_cv,
	    &ehcip->ehci_int_mutex,
	    ddi_get_lbolt() + xfer_cmpl_time_wait);

	if (pp->pp_itw_head) {
		USB_DPRINTF_L2(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
		    "ehci_wait_for_isoc_completion: "
		    "No transfers completion confirmation received");
	}
}
Beispiel #24
0
static int
e_devid_do_discovery(void)
{
	ASSERT(mutex_owned(&devid_discovery_mutex));

	if (i_ddi_io_initialized() == 0) {
		if (devid_discovery_boot > 0) {
			devid_discovery_boot--;
			return (1);
		}
	} else {
		if (devid_discovery_postboot_always > 0)
			return (1);
		if (devid_discovery_postboot > 0) {
			devid_discovery_postboot--;
			return (1);
		}
		if (devid_discovery_secs > 0) {
			if ((ddi_get_lbolt() - devid_last_discovery) >
			    drv_usectohz(devid_discovery_secs * MICROSEC)) {
				return (1);
			}
		}
	}

	DEVID_LOG_DISC((CE_CONT, "devid_discovery: no discovery\n"));
	return (0);
}
Beispiel #25
0
Datei: zvol.c Projekt: alek-p/zfs
static void
#else
static int
#endif
zvol_release(struct gendisk *disk, fmode_t mode)
{
	zvol_state_t *zv = disk->private_data;
	int drop_mutex = 0;

	ASSERT(zv && zv->zv_open_count > 0);

	if (!mutex_owned(&zvol_state_lock)) {
		mutex_enter(&zvol_state_lock);
		drop_mutex = 1;
	}

	zv->zv_open_count--;
	if (zv->zv_open_count == 0)
		zvol_last_close(zv);

	if (drop_mutex)
		mutex_exit(&zvol_state_lock);

#ifndef HAVE_BLOCK_DEVICE_OPERATIONS_RELEASE_VOID
	return (0);
#endif
}
static void
wusb_df_pm_busy_component(wusb_df_state_t *wusb_dfp)
{
	ASSERT(!mutex_owned(&wusb_dfp->wusb_df_mutex));

	mutex_enter(&wusb_dfp->wusb_df_mutex);
	if (wusb_dfp->wusb_df_pm == NULL) {
		USB_DPRINTF_L4(PRINT_MASK_PM, wusb_dfp->wusb_df_log_hdl,
		    "wusb_df_pm_busy_component: pm = NULL");
		goto done;
	}

	wusb_dfp->wusb_df_pm->wusb_df_pm_busy++;
	USB_DPRINTF_L4(PRINT_MASK_PM, wusb_dfp->wusb_df_log_hdl,
	    "wusb_df_pm_busy_component: %d",
	    wusb_dfp->wusb_df_pm->wusb_df_pm_busy);

	mutex_exit(&wusb_dfp->wusb_df_mutex);

	if (pm_busy_component(wusb_dfp->wusb_df_dip, 0) != DDI_SUCCESS) {
		mutex_enter(&wusb_dfp->wusb_df_mutex);
		wusb_dfp->wusb_df_pm->wusb_df_pm_busy--;

		USB_DPRINTF_L4(PRINT_MASK_PM, wusb_dfp->wusb_df_log_hdl,
		    "wusb_df_pm_busy_component: %d",
		    wusb_dfp->wusb_df_pm->wusb_df_pm_busy);
		mutex_exit(&wusb_dfp->wusb_df_mutex);


	}
	return;
done:
		mutex_exit(&wusb_dfp->wusb_df_mutex);

}
/*
 * cleanvnode: grab a vnode from freelist, clean and free it.
 *
 * => Releases vnode_free_list_lock.
 */
static int
cleanvnode(void)
{
	vnode_t *vp;
	vnodelst_t *listhd;

	KASSERT(mutex_owned(&vnode_free_list_lock));
retry:
	listhd = &vnode_free_list;
try_nextlist:
	TAILQ_FOREACH(vp, listhd, v_freelist) {
		/*
		 * It's safe to test v_usecount and v_iflag
		 * without holding the interlock here, since
		 * these vnodes should never appear on the
		 * lists.
		 */
		KASSERT(vp->v_usecount == 0);
		KASSERT((vp->v_iflag & VI_CLEAN) == 0);
		KASSERT(vp->v_freelisthd == listhd);

		if (!mutex_tryenter(vp->v_interlock))
			continue;
		if ((vp->v_iflag & VI_XLOCK) == 0)
			break;
		mutex_exit(vp->v_interlock);
	}
/*
 * wusb_df_release_access:
 *    Release the serial synchronization object.
 */
static void
wusb_df_release_access(wusb_df_state_t *wusb_dfp)
{
	ASSERT(mutex_owned(&wusb_dfp->wusb_df_mutex));
	wusb_dfp->wusb_df_serial_inuse = B_FALSE;
	cv_broadcast(&wusb_dfp->wusb_df_serial_cv);
}
static int
awin_p2wi_rsb_config(struct awin_p2wi_softc *sc, uint8_t rta, i2c_addr_t da,
    int flags)
{
	uint32_t dar, ctrl;

	KASSERT(mutex_owned(&sc->sc_lock));

	P2WI_WRITE(sc, AWIN_A31_P2WI_STAT_REG,
	    P2WI_READ(sc, AWIN_A31_P2WI_STAT_REG) & AWIN_A31_P2WI_STAT_MASK);

	dar = __SHIFTIN(rta, AWIN_A80_RSB_DAR_RTA);
	dar |= __SHIFTIN(da, AWIN_A80_RSB_DAR_DA);
	P2WI_WRITE(sc, AWIN_A80_RSB_DAR_REG, dar);
	P2WI_WRITE(sc, AWIN_A80_RSB_CMD_REG, AWIN_A80_RSB_CMD_IDX_SRTA);

	/* Make sure the controller is idle */
	ctrl = P2WI_READ(sc, AWIN_A31_P2WI_CTRL_REG);
	if (ctrl & AWIN_A31_P2WI_CTRL_START_TRANS) {
		device_printf(sc->sc_dev, "device is busy\n");
		return EBUSY;
	}

	/* Start the transfer */
	P2WI_WRITE(sc, AWIN_A31_P2WI_CTRL_REG,
	    ctrl | AWIN_A31_P2WI_CTRL_START_TRANS);

	return awin_p2wi_wait(sc, flags);
}
Beispiel #30
0
/*
 * Wait for the specified CCB to complete.
 */
static int
cac_ccb_poll(struct cac_softc *sc, struct cac_ccb *wantccb, int timo)
{
	struct cac_ccb *ccb;

	KASSERT(mutex_owned(&sc->sc_mutex));

	timo *= 1000;

	do {
		for (; timo != 0; timo--) {
			ccb = (*sc->sc_cl.cl_completed)(sc);
			if (ccb != NULL)
				break;
			DELAY(1);
		}

		if (timo == 0) {
			printf("%s: timeout\n", device_xname(sc->sc_dev));
			return (EBUSY);
		}
		cac_ccb_done(sc, ccb);
	} while (ccb != wantccb);

	return (0);
}