示例#1
0
/*
 * Unmap the specified CCB's command block and data buffer (if any) and
 * perform DMA synchronisation.
 */
void
twe_ccb_unmap(struct twe_softc *sc, struct twe_ccb *ccb)
{
	int flags, s;

	if ((ccb->ccb_flags & TWE_CCB_DATA_IN) != 0)
		flags = BUS_DMASYNC_POSTREAD;
	else
		flags = 0;
	if ((ccb->ccb_flags & TWE_CCB_DATA_OUT) != 0)
		flags |= BUS_DMASYNC_POSTWRITE;

	bus_dmamap_sync(sc->sc_dmat, ccb->ccb_dmamap_xfer, 0,
	    ccb->ccb_datasize, flags);
	bus_dmamap_unload(sc->sc_dmat, ccb->ccb_dmamap_xfer);

	if (ccb->ccb_abuf != (vaddr_t)0) {
		if ((ccb->ccb_flags & TWE_CCB_DATA_IN) != 0)
			memcpy(ccb->ccb_data, (void *)ccb->ccb_abuf,
			    ccb->ccb_datasize);
		s = splvm();
		/* XXX */
		uvm_km_kmem_free(kmem_va_arena, ccb->ccb_abuf,
		    ccb->ccb_datasize);
		splx(s);
	}
}
示例#2
0
static __inline void
uvm_pageremove(struct vm_page *pg)
{
    struct pglist *buck;
    int s;
    UVMHIST_FUNC("uvm_pageremove");
    UVMHIST_CALLED(pghist);

    KASSERT(pg->pg_flags & PG_TABLED);
    buck = &uvm.page_hash[uvm_pagehash(pg->uobject,pg->offset)];
    s = splvm();
    simple_lock(&uvm.hashlock);
    TAILQ_REMOVE(buck, pg, hashq);
    simple_unlock(&uvm.hashlock);
    splx(s);

#ifdef UBC
    if (pg->uobject->pgops == &uvm_vnodeops) {
        uvm_pgcnt_vnode--;
    }
#endif

    /* object should be locked */
    TAILQ_REMOVE(&pg->uobject->memq, pg, listq);

    atomic_clearbits_int(&pg->pg_flags, PG_TABLED);
    pg->uobject->uo_npages--;
    pg->uobject = NULL;
    pg->pg_version++;
}
示例#3
0
void 
_bus_dmamap_unload(bus_dma_tag_t t, bus_dmamap_t map)
{
	bus_dma_segment_t *segs;
	vaddr_t dva;
	vsize_t sgsize;
	int error, s;

#ifdef DIAGNOSTIC
	if (map->dm_nsegs != 1)
		panic("%s: invalid nsegs = %d", __func__, map->dm_nsegs);
#endif

	segs = map->dm_segs;
	dva = segs[0]._ds_va & ~PGOFSET;
	sgsize = segs[0]._ds_sgsize;

	/* Unmap the DVMA addresses. */
	pmap_remove(pmap_kernel(), dva, dva + sgsize);
	pmap_update(pmap_kernel());

	/* Free the DVMA addresses. */
	s = splvm();
	error = extent_free(dvma_extent, dva, sgsize, EX_NOWAIT);
	splx(s);
#ifdef DIAGNOSTIC
	if (error)
		panic("%s: unable to free DVMA region", __func__);
#endif

	/* Mark the mappings as invalid. */
	map->dm_mapsize = 0;
	map->dm_nsegs = 0;
}
示例#4
0
int
aedkqfilter(dev_t dev, struct knote *kn)
{
	struct klist *klist;
	int s;

	switch (kn->kn_filter) {
	case EVFILT_READ:
		klist = &aed_sc->sc_selinfo.sel_klist;
		kn->kn_fop = &aedread_filtops;
		break;

	case EVFILT_WRITE:
		klist = &aed_sc->sc_selinfo.sel_klist;
		kn->kn_fop = &aed_seltrue_filtops;
		break;

	default:
		return (1);
	}

	kn->kn_hook = NULL;

	s = splvm();
	SLIST_INSERT_HEAD(klist, kn, kn_selnext);
	splx(s);

	return (0);
}
示例#5
0
文件: phys_pager.c 项目: MarginC/kame
static int
phys_pager_getpages(vm_object_t object, vm_page_t *m, int count, int reqpage)
{
	int i, s;

	s = splvm();
	/*
	 * Fill as many pages as vm_fault has allocated for us.
	 */
	for (i = 0; i < count; i++) {
		if ((m[i]->flags & PG_ZERO) == 0)
			pmap_zero_page(m[i]);
		vm_page_flag_set(m[i], PG_ZERO);
		/* Switch off pv_entries */
		vm_page_lock_queues();
		vm_page_unmanage(m[i]);
		vm_page_unlock_queues();
		m[i]->valid = VM_PAGE_BITS_ALL;
		m[i]->dirty = 0;
		/* The requested page must remain busy, the others not. */
		if (reqpage != i) {
			vm_page_flag_clear(m[i], PG_BUSY);
			m[i]->busy = 0;
		}
	}
	splx(s);

	return (VM_PAGER_OK);
}
示例#6
0
/*
 * Place the event in the event queue and wakeup any waiting processes.
 */
static void 
aed_enqevent(adb_event_t *event)
{
	int s;

	s = splvm();

#ifdef DIAGNOSTIC
	if (aed_sc->sc_evq_tail < 0 || aed_sc->sc_evq_tail >= AED_MAX_EVENTS)
		panic("adb: event queue tail is out of bounds");

	if (aed_sc->sc_evq_len < 0 || aed_sc->sc_evq_len > AED_MAX_EVENTS)
		panic("adb: event queue len is out of bounds");
#endif

	if (aed_sc->sc_evq_len == AED_MAX_EVENTS) {
		splx(s);
		return;		/* Oh, well... */
	}
	aed_sc->sc_evq[(aed_sc->sc_evq_len + aed_sc->sc_evq_tail) %
	    AED_MAX_EVENTS] = *event;
	aed_sc->sc_evq_len++;

	selnotify(&aed_sc->sc_selinfo, 0, 0);
	if (aed_sc->sc_ioproc)
		psignal(aed_sc->sc_ioproc, SIGIO);

	splx(s);
}
示例#7
0
void
xen_set_ldt(vaddr_t base, uint32_t entries)
{
	vaddr_t va;
	vaddr_t end;
	pt_entry_t *ptp;
	int s;

#ifdef __x86_64__
	end = base + (entries << 3);
#else
	end = base + entries * sizeof(union descriptor);
#endif

	for (va = base; va < end; va += PAGE_SIZE) {
		KASSERT(va >= VM_MIN_KERNEL_ADDRESS);
		ptp = kvtopte(va);
		XENPRINTF(("xen_set_ldt %#" PRIxVADDR " %d %p\n",
		    base, entries, ptp));
		pmap_pte_clearbits(ptp, PG_RW);
	}
	s = splvm();
	xpq_queue_set_ldt(base, entries);
	splx(s);
}
示例#8
0
struct mbuf *
m_free(struct mbuf *m)
{
	struct mbuf *n;
	int s;

	s = splvm();
	mbstat.m_mtypes[m->m_type]--;
	if (m->m_flags & M_PKTHDR)
		m_tag_delete_chain(m);
	if (m->m_flags & M_EXT) {
		if (MCLISREFERENCED(m))
			_MCLDEREFERENCE(m);
		else if (m->m_flags & M_CLUSTER)
			pool_put(&mclpool, m->m_ext.ext_buf);
		else if (m->m_ext.ext_free)
			(*(m->m_ext.ext_free))(m->m_ext.ext_buf,
			    m->m_ext.ext_size, m->m_ext.ext_arg);
		else
			free(m->m_ext.ext_buf,m->m_ext.ext_type);
		m->m_ext.ext_size = 0;
	}
	m->m_flags = 0;
	n = m->m_next;
	pool_put(&mbpool, m);
	splx(s);

	return (n);
}
示例#9
0
struct mbuf *
m_gethdr(int nowait, int type)
{
	struct mbuf *m;
	int s;

	s = splvm();
	m = pool_get(&mbpool, nowait == M_WAIT ? PR_WAITOK : 0);
	if (m) {
		m->m_type = type;
		mbstat.m_mtypes[type]++;

		/* keep in sync with m_inithdr */
		m->m_next = (struct mbuf *)NULL;
		m->m_nextpkt = (struct mbuf *)NULL;
		m->m_data = m->m_pktdat;
		m->m_flags = M_PKTHDR;
		m->m_pkthdr.rcvif = NULL;
		SLIST_INIT(&m->m_pkthdr.tags);
		m->m_pkthdr.csum_flags = 0;
		m->m_pkthdr.pf.hdr = NULL;
		m->m_pkthdr.pf.statekey = NULL;
		m->m_pkthdr.pf.rtableid = 0;
		m->m_pkthdr.pf.qid = 0;
		m->m_pkthdr.pf.tag = 0;
		m->m_pkthdr.pf.flags = 0;
		m->m_pkthdr.pf.routed = 0;
	}
	splx(s);
	return (m);
}
示例#10
0
static void
filt_aedrdetach(struct knote *kn)
{
	int s;

	s = splvm();
	SLIST_REMOVE(&aed_sc->sc_selinfo.sel_klist, kn, knote, kn_selnext);
	splx(s);
}
示例#11
0
static __inline void
ip6q_unlock()
{
	int s;

	s = splvm();
	ip6q_locked = 0;
	splx(s);
}
示例#12
0
/*
 * pmap_tlb_shootdown: invalidate a page on all CPUs using pmap 'pm'.
 */
void
pmap_tlb_shootdown(struct pmap *pm, vaddr_t va, pt_entry_t pte, tlbwhy_t why)
{
	pmap_tlb_packet_t *tp;
	int s;

#ifndef XEN
	KASSERT((pte & PG_G) == 0 || pm == pmap_kernel());
#endif

	/*
	 * If tearing down the pmap, do nothing.  We will flush later
	 * when we are ready to recycle/destroy it.
	 */
	if (__predict_false(curlwp->l_md.md_gc_pmap == pm)) {
		return;
	}

	if ((pte & PG_PS) != 0) {
		va &= PG_LGFRAME;
	}

	/*
	 * Add the shootdown operation to our pending set.
	 */
	s = splvm();
	tp = (pmap_tlb_packet_t *)curcpu()->ci_pmap_data;

	/* Whole address flush will be needed if PG_G is set. */
	CTASSERT(PG_G == (uint16_t)PG_G);
	tp->tp_pte |= (uint16_t)pte;

	if (tp->tp_count == (uint16_t)-1) {
		/*
		 * Already flushing everything.
		 */
	} else if (tp->tp_count < TP_MAXVA && va != (vaddr_t)-1LL) {
		/* Flush a single page. */
		tp->tp_va[tp->tp_count++] = va;
		KASSERT(tp->tp_count > 0);
	} else {
		/* Flush everything. */
		tp->tp_count = (uint16_t)-1;
	}

	if (pm != pmap_kernel()) {
		kcpuset_merge(tp->tp_cpumask, pm->pm_cpus);
		if (va >= VM_MAXUSER_ADDRESS) {
			kcpuset_merge(tp->tp_cpumask, pm->pm_kernel_cpus);
		}
		tp->tp_userpmap = 1;
	} else {
		kcpuset_copy(tp->tp_cpumask, kcpuset_running);
	}
	pmap_tlbstat_count(pm, va, why);
	splx(s);
}
示例#13
0
/*
 * Given a range of kernel virtual space, remap all the
 * pages found there into the DVMA space (dup mappings).
 * This IS safe to call at interrupt time.
 * (Typically called at SPLBIO)
 */
void *
dvma_mapin(void *kva, int len, int canwait /* ignored */)
{
	vaddr_t seg_kva, seg_dma;
	vsize_t seg_len, seg_off;
	vaddr_t v, x;
	int s, sme, error;

	/* Get seg-aligned address and length. */
	seg_kva = (vaddr_t)kva;
	seg_len = (vsize_t)len;
	seg_off = seg_kva & SEGOFSET;
	seg_kva -= seg_off;
	seg_len = m68k_round_seg(seg_len + seg_off);

	s = splvm();

	/* Allocate the DVMA segment(s) */

	error = extent_alloc(dvma_extent, seg_len, NBSG, 0,
	    EX_FAST | EX_NOWAIT | EX_MALLOCOK, &seg_dma);
	if (error) {
		splx(s);
		return NULL;
	}

#ifdef	DIAGNOSTIC
	if (seg_dma & SEGOFSET)
		panic("dvma_mapin: seg not aligned");
#endif

	/* Duplicate the mappings into DMA space. */
	v = seg_kva;
	x = seg_dma;
	while (seg_len > 0) {
		sme = get_segmap(v);
#ifdef	DIAGNOSTIC
		if (sme == SEGINV)
			panic("dvma_mapin: seg not mapped");
#endif
#ifdef	HAVECACHE
		/* flush write-back on old mappings */
		if (cache_size)
			cache_flush_segment(v);
#endif
		set_segmap_allctx(x, sme);
		v += NBSG;
		x += NBSG;
		seg_len -= NBSG;
	}
	seg_dma += seg_off;

	splx(s);
	return (void *)seg_dma;
}
示例#14
0
void
xpq_flush_cache(void)
{
	int s = splvm();

	xpq_flush_queue();

	XENPRINTK2(("xpq_queue_flush_cache\n"));
	asm("wbinvd":::"memory");
	splx(s); /* XXX: removeme */
}
示例#15
0
int 
aedclose(dev_t dev, int flag, int mode, struct lwp *l)
{
	int s;

	s = splvm();
	aed_sc->sc_open = 0;
	aed_sc->sc_ioproc = NULL;
	splx(s);

	return (0);
}
示例#16
0
/*
 * If we failed to allocate uba resources, put us on a queue to wait
 * until there is available resources. Resources to compete about
 * are map registers and BDPs. This is normally only a problem on
 * Unibus systems, Qbus systems have more map registers than usable.
 */
void
uba_enqueue(struct uba_unit *uu)
{
	struct uba_softc *uh;
	int s;

	uh = (void *)((struct device *)(uu->uu_softc))->dv_parent;

	s = splvm();
	SIMPLEQ_INSERT_TAIL(&uh->uh_resq, uu, uu_resq);
	splx(s);
}
示例#17
0
void
m_reclaim(void *arg, int flags)
{
	struct domain *dp;
	struct protosw *pr;
	int s = splvm();

	for (dp = domains; dp; dp = dp->dom_next)
		for (pr = dp->dom_protosw; pr < dp->dom_protoswNPROTOSW; pr++)
			if (pr->pr_drain)
				(*pr->pr_drain)();
	splx(s);
	mbstat.m_drain++;
}
示例#18
0
static __inline int
ip6q_lock_try()
{
	int s;

	/* Use splvm() due to mbuf allocation. */
	s = splvm();
	if (ip6q_locked) {
		splx(s);
		return (0);
	}
	ip6q_locked = 1;
	splx(s);
	return (1);
}
示例#19
0
文件: rtc.c 项目: lacombar/netbsd-alc
static uint8_t
rtc_readreg(struct rtc_softc *sc, int reg)
{
	bus_space_tag_t bst = sc->sc_bst;
	bus_space_handle_t bsh = sc->sc_bsh;
	uint8_t data;
	int s = splvm();

	data = reg;
	intio_device_writecmd(bst, bsh, RTC_SET_REG, &data, 1);
	intio_device_readcmd(bst, bsh, RTC_READ_REG, &data);

	splx(s);
	return data;
}
示例#20
0
static void
linux_work_lock(struct work_struct *work)
{
	struct cpu_info *ci;
	int cnt, s;

	/* XXX Copypasta of MUTEX_SPIN_SPLRAISE.  */
	s = splvm();
	ci = curcpu();
	cnt = ci->ci_mtx_count--;
	__insn_barrier();
	if (cnt == 0)
		ci->ci_mtx_oldspl = s;

	__cpu_simple_lock(&work->w_lock);
}
示例#21
0
文件: xbd.c 项目: MarginC/kame
static void
unmap_align(struct xbdreq *xr)
{
	int s;

	if (xr->xr_bp->b_flags & B_READ)
		memcpy(xr->xr_bp->b_data, (void *)xr->xr_aligned,
		    xr->xr_bp->b_bcount);
	DPRINTF(XBDB_IO, ("unmap_align(%p): bp %p addr %p align 0x%08lx "
	    "size 0x%04lx\n", xr, xr->xr_bp, xr->xr_bp->b_data,
	    xr->xr_aligned, xr->xr_bp->b_bcount));
	s = splvm();
	uvm_km_free(kmem_map, xr->xr_aligned, xr->xr_bp->b_bcount);
	splx(s);
	xr->xr_aligned = (vaddr_t)0;
}
示例#22
0
文件: rtc.c 项目: lacombar/netbsd-alc
static uint8_t
rtc_writereg(struct rtc_softc *sc, int reg, uint8_t data)
{
	bus_space_tag_t bst = sc->sc_bst;
	bus_space_handle_t bsh = sc->sc_bsh;
	uint8_t tmp;
	int s = splvm();

	tmp = (data << 4) | reg;
	intio_device_writecmd(bst, bsh, RTC_SET_REG, &tmp, 1);
	intio_device_writecmd(bst, bsh, RTC_WRITE_REG, NULL, 0);
	intio_device_readcmd(bst, bsh, RTC_READ_REG, &tmp);

	splx(s);
	return tmp;
}
示例#23
0
static int
xennet_xenbus_detach(device_t self, int flags)
{
	struct xennet_xenbus_softc *sc = device_private(self);
	struct ifnet *ifp = &sc->sc_ethercom.ec_if;
	int s0, s1;
	RING_IDX i;

	DPRINTF(("%s: xennet_xenbus_detach\n", device_xname(self)));
	s0 = splnet();
	xennet_stop(ifp, 1);
	/* wait for pending TX to complete, and collect pending RX packets */
	xennet_handler(sc);
	while (sc->sc_tx_ring.sring->rsp_prod != sc->sc_tx_ring.rsp_cons) {
		tsleep(xennet_xenbus_detach, PRIBIO, "xnet_detach", hz/2);
		xennet_handler(sc);
	}
	xennet_free_rx_buffer(sc);

	s1 = splvm();
	for (i = 0; i < NET_RX_RING_SIZE; i++) {
		struct xennet_rxreq *rxreq = &sc->sc_rxreqs[i];
		uvm_km_free(kernel_map, rxreq->rxreq_va, PAGE_SIZE,
		    UVM_KMF_WIRED);
	}
	splx(s1);
		
	ether_ifdetach(ifp);
	if_detach(ifp);
	while (xengnt_status(sc->sc_tx_ring_gntref)) {
		tsleep(xennet_xenbus_detach, PRIBIO, "xnet_txref", hz/2);
	}
	xengnt_revoke_access(sc->sc_tx_ring_gntref);
	uvm_km_free(kernel_map, (vaddr_t)sc->sc_tx_ring.sring, PAGE_SIZE,
	    UVM_KMF_WIRED);
	while (xengnt_status(sc->sc_rx_ring_gntref)) {
		tsleep(xennet_xenbus_detach, PRIBIO, "xnet_rxref", hz/2);
	}
	xengnt_revoke_access(sc->sc_rx_ring_gntref);
	uvm_km_free(kernel_map, (vaddr_t)sc->sc_rx_ring.sring, PAGE_SIZE,
	    UVM_KMF_WIRED);
	softint_disestablish(sc->sc_softintr);
	event_remove_handler(sc->sc_evtchn, &xennet_handler, sc);
	splx(s0);
	DPRINTF(("%s: xennet_xenbus_detach done\n", device_xname(self)));
	return 0;
}
示例#24
0
void
m_clget(struct mbuf *m, int how)
{
	int s;

	s = splvm();
	m->m_ext.ext_buf =
	    pool_get(&mclpool, how == M_WAIT ? PR_WAITOK : 0);
	splx(s);
	if (m->m_ext.ext_buf != NULL) {
		m->m_data = m->m_ext.ext_buf;
		m->m_flags |= M_EXT|M_CLUSTER;
		m->m_ext.ext_size = MCLBYTES;
		m->m_ext.ext_free = NULL;
		m->m_ext.ext_arg = NULL;
		MCLINITREFERENCE(m);
	}
}
示例#25
0
文件: xbd.c 项目: MarginC/kame
static void
map_align(struct xbdreq *xr)
{
	int s;

	s = splvm();
	xr->xr_aligned = uvm_km_kmemalloc1(kmem_map, NULL,
	    xr->xr_bqueue, XEN_BSIZE, UVM_UNKNOWN_OFFSET,
	    0/*  UVM_KMF_NOWAIT */);
	splx(s);
	DPRINTF(XBDB_IO, ("map_align(%p): bp %p addr %p align 0x%08lx "
	    "size 0x%04lx\n", xr, xr->xr_bp, xr->xr_bp->b_data,
	    xr->xr_aligned, xr->xr_bqueue));
	xr->xr_data = xr->xr_aligned;
	if ((xr->xr_bp->b_flags & B_READ) == 0)
		memcpy((void *)xr->xr_aligned, xr->xr_bp->b_data,
		    xr->xr_bqueue);
}
示例#26
0
int 
aedread(dev_t dev, struct uio *uio, int flag)
{
	int s, error;
	int willfit;
	int total;
	int firstmove;
	int moremove;

	if (uio->uio_resid < sizeof(adb_event_t))
		return (EMSGSIZE);	/* close enough. */

	s = splvm();
	if (aed_sc->sc_evq_len == 0) {
		splx(s);
		return (0);
	}
	willfit = howmany(uio->uio_resid, sizeof(adb_event_t));
	total = (aed_sc->sc_evq_len < willfit) ? aed_sc->sc_evq_len : willfit;

	firstmove = (aed_sc->sc_evq_tail + total > AED_MAX_EVENTS)
	    ? (AED_MAX_EVENTS - aed_sc->sc_evq_tail) : total;

	error = uiomove((void *) & aed_sc->sc_evq[aed_sc->sc_evq_tail],
	    firstmove * sizeof(adb_event_t), uio);
	if (error) {
		splx(s);
		return (error);
	}
	moremove = total - firstmove;

	if (moremove > 0) {
		error = uiomove((void *) & aed_sc->sc_evq[0],
		    moremove * sizeof(adb_event_t), uio);
		if (error) {
			splx(s);
			return (error);
		}
	}
	aed_sc->sc_evq_tail = (aed_sc->sc_evq_tail + total) % AED_MAX_EVENTS;
	aed_sc->sc_evq_len -= total;
	splx(s);
	return (0);
}
示例#27
0
/*
 * Space allocation routines.
 */
struct mbuf *
m_get(int nowait, int type)
{
	struct mbuf *m;
	int s;

	s = splvm();
	m = pool_get(&mbpool, nowait == M_WAIT ? PR_WAITOK : 0);
	if (m) {
		m->m_type = type;
		mbstat.m_mtypes[type]++;
		m->m_next = (struct mbuf *)NULL;
		m->m_nextpkt = (struct mbuf *)NULL;
		m->m_data = m->m_dat;
		m->m_flags = 0;
	}
	splx(s);
	return (m);
}
示例#28
0
int 
aedpoll(dev_t dev, int events, struct lwp *l)
{
	int s, revents;

	revents = events & (POLLOUT | POLLWRNORM);
	
	if ((events & (POLLIN | POLLRDNORM)) == 0)
		return (revents);

	s = splvm();
	if (aed_sc->sc_evq_len > 0)
		revents |= events & (POLLIN | POLLRDNORM);
	else
		selrecord(l, &aed_sc->sc_selinfo);
	splx(s);

	return (revents);
}
示例#29
0
/*
 * Free some DVMA space allocated by the above.
 * This IS safe to call at interrupt time.
 * (Typically called at SPLBIO)
 */
void 
dvma_mapout(void *dma, int len)
{
	vaddr_t seg_dma;
	vsize_t seg_len, seg_off;
	vaddr_t v, x;
	int sme;
	int s;

	/* Get seg-aligned address and length. */
	seg_dma = (vaddr_t)dma;
	seg_len = (vsize_t)len;
	seg_off = seg_dma & SEGOFSET;
	seg_dma -= seg_off;
	seg_len = m68k_round_seg(seg_len + seg_off);

	s = splvm();

	/* Flush cache and remove DVMA mappings. */
	v = seg_dma;
	x = v + seg_len;
	while (v < x) {
		sme = get_segmap(v);
#ifdef	DIAGNOSTIC
		if (sme == SEGINV)
			panic("dvma_mapout: seg not mapped");
#endif
#ifdef	HAVECACHE
		/* flush write-back on the DVMA mappings */
		if (cache_size)
			cache_flush_segment(v);
#endif
		set_segmap_allctx(v, SEGINV);
		v += NBSG;
	}

	if (extent_free(dvma_extent, seg_dma, seg_len,
	    EX_NOWAIT | EX_MALLOCOK))
		panic("dvma_mapout: unable to free 0x%lx,0x%lx",
		    seg_dma, seg_len);
	splx(s);
}
示例#30
0
/*
 * Generate a reset on uba number uban.	 Then
 * call each device that asked to be called during attach,
 * giving it a chance to clean up so as to be able to continue.
 */
void
ubareset(struct uba_softc *uh)
{
	struct uba_reset *ur;
	int s;

	s = splvm();
	SIMPLEQ_INIT(&uh->uh_resq);
	printf("%s: reset", uh->uh_dev.dv_xname);
	(*uh->uh_ubainit)(uh);

	ur = SIMPLEQ_FIRST(&uh->uh_resetq);
	if (ur) do {
		printf(" %s", ur->ur_dev->dv_xname);
		(*ur->ur_reset)(ur->ur_dev);
	} while ((ur = SIMPLEQ_NEXT(ur, ur_resetq)));

	printf("\n");
	splx(s);
}