Exemplo n.º 1
0
static void mlx4_en_modify_rl_res(struct mlx4_en_priv *priv,
			   int ring_id, u8 rate_index)
{
	struct mlx4_en_tx_ring *tx_ring;
	struct mlx4_update_qp_params update_params;
	int err;

	tx_ring = priv->tx_ring[ring_id];

	/* Ring validation */
	if(!TX_RING_USER_VALID(ring_id)) {
		en_err(priv, "Failed modifying new rate, ring %d doesn't exist\n", ring_id);
		/* If the modified ring does not exist, no need to add one
		 * to the reference count of the requested rate */
		atomic_subtract_int(&priv->rate_limits[rate_index].ref, 1);
		return;
	}

	if (priv->rate_limits[tx_ring->rl_data.rate_index].rate !=
				priv->rate_limits[rate_index].rate) {
		update_params.rl_index = rate_index;
		err = mlx4_update_qp(priv->mdev->dev, tx_ring->qpn, MLX4_UPDATE_QP_RATE_LIMIT,
				     &update_params);
		if (err) {
			en_err(priv, "Failed updating ring %d with new rate %uBytes/sec, err: %d\n",
			       ring_id, (priv->rate_limits[rate_index].rate/8), err);
			atomic_subtract_int(&priv->rate_limits[rate_index].ref, 1);
			return;
		}
	}
	atomic_subtract_int(&priv->rate_limits[tx_ring->rl_data.rate_index].ref, 1);
	tx_ring->rl_data.rate_index = rate_index;
}
Exemplo n.º 2
0
/*
 * A very short dispatch, to try and maximise assembler code use
 * between all exception types. Maybe 'true' interrupts should go
 * here, and the trap code can come in separately
 */
void
powerpc_interrupt(struct trapframe *framep)
{
        struct thread *td;
	struct clockframe ckframe;

	td = curthread;

	switch (framep->exc) {
	case EXC_EXI:
		atomic_add_int(&td->td_intr_nesting_level, 1);
		(*powerpc_extintr_handler)();
		atomic_subtract_int(&td->td_intr_nesting_level, 1);	
		break;

	case EXC_DECR:
		atomic_add_int(&td->td_intr_nesting_level, 1);
		ckframe.srr0 = framep->srr0;
		ckframe.srr1 = framep->srr1;
		decr_intr(&ckframe);
		atomic_subtract_int(&td->td_intr_nesting_level, 1);	
		break;

	default:
		/*
		 * Re-enable interrupts and call the generic trap code
		 */
#if 0
		printf("powerpc_interrupt: got trap\n");
#endif
		mtmsr(mfmsr() | PSL_EE);
		isync();
		trap(framep);
	}	        
}
Exemplo n.º 3
0
void
uma_small_free(void *mem, vm_size_t size, u_int8_t flags)
{
	vm_page_t m;

	if (!hw_direct_map)
		pmap_remove(kernel_pmap,(vm_offset_t)mem,
		    (vm_offset_t)mem + PAGE_SIZE);

	m = PHYS_TO_VM_PAGE((vm_offset_t)mem);
	m->wire_count--;
	vm_page_free(m);
	atomic_subtract_int(&vm_cnt.v_wire_count, 1);
	atomic_subtract_int(&hw_uma_mdpages, 1);
}
Exemplo n.º 4
0
static void
bpf_track(void *arg, struct ifnet *ifp, int dlt, int attach)
{
	/* NB: identify vap's by if_init */
	if (dlt == DLT_IEEE802_11_RADIO &&
	    ifp->if_init == ieee80211_init) {
		struct ieee80211vap *vap = ifp->if_softc;
		/*
		 * Track bpf radiotap listener state.  We mark the vap
		 * to indicate if any listener is present and the com
		 * to indicate if any listener exists on any associated
		 * vap.  This flag is used by drivers to prepare radiotap
		 * state only when needed.
		 */
		if (attach) {
			ieee80211_syncflag_ext(vap, IEEE80211_FEXT_BPF);
			if (vap->iv_opmode == IEEE80211_M_MONITOR)
				atomic_add_int(&vap->iv_ic->ic_montaps, 1);
		} else if (!bpf_peers_present(vap->iv_rawbpf)) {
			ieee80211_syncflag_ext(vap, -IEEE80211_FEXT_BPF);
			if (vap->iv_opmode == IEEE80211_M_MONITOR)
				atomic_subtract_int(&vap->iv_ic->ic_montaps, 1);
		}
	}
}
Exemplo n.º 5
0
int
ieee80211_node_dectestref(struct ieee80211_node *ni)
{
	/* XXX need equivalent of atomic_dec_and_test */
	atomic_subtract_int(&ni->ni_refcnt, 1);
	return atomic_cmpset_int(&ni->ni_refcnt, 0, 1);
}
Exemplo n.º 6
0
static int
soft_stop_pmc(int cpu, int ri)
{
	struct pmc *pm;
	struct soft_cpu *pc;
	struct pmc_soft *ps;

	KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
	    ("[soft,%d] illegal CPU value %d", __LINE__, cpu));
	KASSERT(ri >= 0 && ri < SOFT_NPMCS,
	    ("[soft,%d] illegal row-index %d", __LINE__, ri));

	pc = soft_pcpu[cpu];
	pm = pc->soft_hw[ri].phw_pmc;

	KASSERT(pm,
	    ("[soft,%d] cpu %d ri %d pmc not configured", __LINE__, cpu, ri));

	ps = pmc_soft_ev_acquire(pm->pm_event);
	/* event unregistered ? */
	if (ps != NULL) {
		atomic_subtract_int(&ps->ps_running, 1);
		pmc_soft_ev_release(ps);
	}

	return (0);
}
Exemplo n.º 7
0
static inline void
l2t_hold(struct l2t_data *d, struct l2t_entry *e)
{

	if (atomic_fetchadd_int(&e->refcnt, 1) == 0)  /* 0 -> 1 transition */
		atomic_subtract_int(&d->nfree, 1);
}
Exemplo n.º 8
0
static
int
nvtruncbuf_bp_trunc(struct buf *bp, void *data)
{
	struct truncbuf_info *info = data;

	/*
	 * Do not try to use a buffer we cannot immediately lock,
	 * but sleep anyway to prevent a livelock.  The code will
	 * loop until all buffers can be acted upon.
	 */
	if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT)) {
		atomic_add_int(&bp->b_refs, 1);
		if (BUF_LOCK(bp, LK_EXCLUSIVE|LK_SLEEPFAIL) == 0)
			BUF_UNLOCK(bp);
		atomic_subtract_int(&bp->b_refs, 1);
	} else if ((info->clean && (bp->b_flags & B_DELWRI)) ||
		   (info->clean == 0 && (bp->b_flags & B_DELWRI) == 0) ||
		   bp->b_vp != info->vp ||
		   nvtruncbuf_bp_trunc_cmp(bp, data)) {
		BUF_UNLOCK(bp);
	} else {
		bremfree(bp);
		bp->b_flags |= (B_INVAL | B_RELBUF | B_NOCACHE);
		brelse(bp);
	}
	lwkt_yield();
	return(1);
}
int
bus_dma_tag_destroy(bus_dma_tag_t dmat)
{
	if (dmat != NULL) {
		
                if (dmat->map_count != 0)
                        return (EBUSY);
		
                while (dmat != NULL) {
                        bus_dma_tag_t parent;
			
                        parent = dmat->parent;
                        atomic_subtract_int(&dmat->ref_count, 1);
                        if (dmat->ref_count == 0) {
                                free(dmat, M_DEVBUF);
                                /*
                                 * Last reference count, so
                                 * release our reference
                                 * count on our parent.
                                 */
                                dmat = parent;
                        } else
                                dmat = NULL;
                }
        }
        return (0);
}
Exemplo n.º 10
0
/*
 * ap_watchdog() is called by the SMP idle loop code.  It works on the same
 * premise that the disabling of logical processors does: that if the cpu is
 * idle, then it can ignore the world from then on, as nothing will be
 * scheduled on it.  Leaving aside multi-runqueue schedulers (SCHED_ULE) and
 * explicit process migration (sched_bind()), this is not an unreasonable
 * assumption.
 */
void
ap_watchdog(u_int cpuid)
{
    char old_pcomm[MAXCOMLEN + 1];
    struct proc *p;

    if (watchdog_cpu != cpuid)
        return;

    printf("watchdog started on cpu %d\n", cpuid);
    p = curproc;
    bcopy(p->p_comm, old_pcomm, MAXCOMLEN + 1);
    snprintf(p->p_comm, MAXCOMLEN + 1, "mp_watchdog cpu %d", cpuid);
    while (1) {
        DELAY(1000000);				/* One second. */
        if (watchdog_cpu != cpuid)
            break;
        atomic_subtract_int(&watchdog_timer, 1);
        if (watchdog_timer < 4)
            printf("Watchdog timer: %d\n", watchdog_timer);
        if (watchdog_timer == 0 && watchdog_dontfire == 0) {
            printf("Watchdog firing!\n");
            watchdog_dontfire = 1;
            if (watchdog_nmi)
                watchdog_ipi_nmi();
            else
                kdb_enter(KDB_WHY_WATCHDOG, "mp_watchdog");
        }
    }
    bcopy(old_pcomm, p->p_comm, MAXCOMLEN + 1);
    printf("watchdog stopped on cpu %d\n", cpuid);
}
Exemplo n.º 11
0
/* Only called from the receive thread */
static uint32_t
if_netmap_sweep_trail(struct if_netmap_softc *sc)
{
	struct if_netmap_bufinfo_pool *p;
	struct if_netmap_bufinfo *bi;
	uint32_t i;
	uint32_t returned;
	unsigned int n;

	i = sc->hw_rx_rsvd_begin;
	
	p = &sc->rx_bufinfo;

	returned = p->returnable;
	for (n = 0; n < returned; n++) {
		bi = &p->pool[p->free_list[p->trail]];
		if_netmap_rxsetslot(sc->nm_host_ctx, &i, bi->nm_index);
		bi->refcnt = 0;

		p->trail++;
		if (p->trail == p->max) {
			p->trail = 0;
		}
	}
	sc->hw_rx_rsvd_begin = i;

	atomic_subtract_int(&p->returnable, returned);
	p->avail += returned;

	return (returned);
}
Exemplo n.º 12
0
/*
 * Allocate an L2T entry for use by a switching rule.  Such need to be
 * explicitly freed and while busy they are not on any hash chain, so normal
 * address resolution updates do not see them.
 */
struct l2t_entry *
t4_l2t_alloc_switching(struct adapter *sc, uint16_t vlan, uint8_t port,
    uint8_t *eth_addr)
{
	struct l2t_data *d = sc->l2t;
	struct l2t_entry *e;
	int rc;

	rw_wlock(&d->lock);
	e = find_or_alloc_l2e(d, vlan, port, eth_addr);
	if (e) {
		if (atomic_load_acq_int(&e->refcnt) == 0) {
			mtx_lock(&e->lock);    /* avoid race with t4_l2t_free */
			e->wrq = &sc->sge.ctrlq[0];
			e->iqid = sc->sge.fwq.abs_id;
			e->state = L2T_STATE_SWITCHING;
			e->vlan = vlan;
			e->lport = port;
			memcpy(e->dmac, eth_addr, ETHER_ADDR_LEN);
			atomic_store_rel_int(&e->refcnt, 1);
			atomic_subtract_int(&d->nfree, 1);
			rc = t4_write_l2e(e, 0);
			mtx_unlock(&e->lock);
			if (rc != 0)
				e = NULL;
		} else {
			MPASS(e->vlan == vlan);
			MPASS(e->lport == port);
			atomic_add_int(&e->refcnt, 1);
		}
	}
	rw_wunlock(&d->lock);
	return (e);
}
Exemplo n.º 13
0
/*
 * Function release table lock and eventually wakeup all waiters.
 */
static void
dm_table_unbusy(dm_table_head_t * head)
{
	KKASSERT(head->io_cnt != 0);

	atomic_subtract_int(&head->io_cnt, 1);

	lockmgr(&head->table_mtx, LK_RELEASE);
}
Exemplo n.º 14
0
/*
 * The function is called after data encryption.
 *
 * g_eli_start -> g_eli_crypto_run -> G_ELI_CRYPTO_WRITE_DONE -> g_io_request -> g_eli_write_done -> g_io_deliver
 */
static int
g_eli_crypto_write_done(struct cryptop *crp)
{
	struct g_eli_softc *sc;
	struct g_geom *gp;
	struct g_consumer *cp;
	struct bio *bp, *cbp;

	if (crp->crp_etype == EAGAIN) {
		if (g_eli_crypto_rerun(crp) == 0)
			return (0);
	}
	bp = (struct bio *)crp->crp_opaque;
	bp->bio_inbed++;
	if (crp->crp_etype == 0) {
		G_ELI_DEBUG(3, "Crypto WRITE request done (%d/%d).",
		    bp->bio_inbed, bp->bio_children);
	} else {
		G_ELI_DEBUG(1, "Crypto WRITE request failed (%d/%d) error=%d.",
		    bp->bio_inbed, bp->bio_children, crp->crp_etype);
		if (bp->bio_error == 0)
			bp->bio_error = crp->crp_etype;
	}
	gp = bp->bio_to->geom;
	sc = gp->softc;
	g_eli_key_drop(sc, crp->crp_desc->crd_key);
	/*
	 * All sectors are already encrypted?
	 */
	if (bp->bio_inbed < bp->bio_children)
		return (0);
	bp->bio_inbed = 0;
	bp->bio_children = 1;
	cbp = bp->bio_driver1;
	bp->bio_driver1 = NULL;
	if (bp->bio_error != 0) {
		G_ELI_LOGREQ(0, bp, "Crypto WRITE request failed (error=%d).",
		    bp->bio_error);
		free(bp->bio_driver2, M_ELI);
		bp->bio_driver2 = NULL;
		g_destroy_bio(cbp);
		g_io_deliver(bp, bp->bio_error);
		atomic_subtract_int(&sc->sc_inflight, 1);
		return (0);
	}
	cbp->bio_data = bp->bio_driver2;
	cbp->bio_done = g_eli_write_done;
	cp = LIST_FIRST(&gp->consumer);
	cbp->bio_to = cp->provider;
	G_ELI_LOGREQ(2, cbp, "Sending request.");
	/*
	 * Send encrypted data to the provider.
	 */
	g_io_request(cbp, cp);
	return (0);
}
Exemplo n.º 15
0
inline void ATOMIC_SUB(ATOMIC_T *v, int i)
{
	#ifdef PLATFORM_LINUX
	atomic_sub(i,v);
	#elif defined(PLATFORM_WINDOWS)
	InterlockedAdd(v,-i);
	#elif defined(PLATFORM_FREEBSD)
	atomic_subtract_int(v,i);
	#endif
}
Exemplo n.º 16
0
void
uma_small_free(void *mem, int size, u_int8_t flags)
{
	vm_page_t m;

	m = PHYS_TO_VM_PAGE(IA64_RR_MASK((u_int64_t)mem));
	m->wire_count--;
	vm_page_free(m);
	atomic_subtract_int(&cnt.v_wire_count, 1);
}
Exemplo n.º 17
0
void
uma_small_free(void *mem, int size, u_int8_t flags)
{
	vm_page_t m;

	m = PHYS_TO_VM_PAGE(TLB_DIRECT_TO_PHYS((vm_offset_t)mem));
	m->wire_count--;
	vm_page_free(m);
	atomic_subtract_int(&cnt.v_wire_count, 1);
}
Exemplo n.º 18
0
/*
 * Free all mbufs in a chain, decrementing the reference count as
 * necessary.
 *
 * Functions in this file should use this instead of m_freem() when
 * they are freeing mbuf chains that may contain clusters that were
 * already included in tcp_pcap_clusters_referenced_cur.
 */
static void
tcp_pcap_m_freem(struct mbuf *mb)
{
	while (mb != NULL) {
		if (mb->m_flags & M_EXT)
			atomic_subtract_int(&tcp_pcap_clusters_referenced_cur,
			    1);
		mb = m_free(mb);
	}
}
Exemplo n.º 19
0
inline void ATOMIC_DEC(ATOMIC_T *v)
{
	#ifdef PLATFORM_LINUX
	atomic_dec(v);
	#elif defined(PLATFORM_WINDOWS)
	InterlockedDecrement(v);
	#elif defined(PLATFORM_FREEBSD)
	atomic_subtract_int(v,1);
	#endif
}
Exemplo n.º 20
0
inline int ATOMIC_DEC_RETURN(ATOMIC_T *v)
{
	#ifdef PLATFORM_LINUX
	return atomic_dec_return(v);
	#elif defined(PLATFORM_WINDOWS)
	return InterlockedDecrement(v);
	#elif defined(PLATFORM_FREEBSD)
	atomic_subtract_int(v,1);
	return atomic_load_acq_32(v);
	#endif
}
Exemplo n.º 21
0
inline int ATOMIC_SUB_RETURN(ATOMIC_T *v, int i)
{
	#ifdef PLATFORM_LINUX
	return atomic_sub_return(i,v);
	#elif defined(PLATFORM_WINDOWS)
	return InterlockedAdd(v,-i);
	#elif defined(PLATFORM_FREEBSD)
	atomic_subtract_int(v,i);
	return atomic_load_acq_32(v);
	#endif
}
Exemplo n.º 22
0
void
uma_small_free(void *mem, vm_size_t size, u_int8_t flags)
{
	vm_page_t m;
	vm_paddr_t pa;

	pa = DMAP_TO_PHYS((vm_offset_t)mem);
	m = PHYS_TO_VM_PAGE(pa);
	m->wire_count--;
	vm_page_free(m);
	atomic_subtract_int(&vm_cnt.v_wire_count, 1);
}
Exemplo n.º 23
0
/*
 * Free struct sackhole.
 */
static void
tcp_sackhole_free(struct tcpcb *tp, struct sackhole *hole)
{

    uma_zfree(V_sack_hole_zone, hole);

    tp->snd_numholes--;
    atomic_subtract_int(&V_tcp_sack_globalholes, 1);

//ScenSim-Port//    KASSERT(tp->snd_numholes >= 0, ("tp->snd_numholes >= 0"));
//ScenSim-Port//    KASSERT(V_tcp_sack_globalholes >= 0, ("tcp_sack_globalholes >= 0"));
}
Exemplo n.º 24
0
static void
msgdma_intr(void *arg)
{
	xdma_transfer_status_t status;
	struct xdma_transfer_status st;
	struct msgdma_desc *desc;
	struct msgdma_channel *chan;
	struct xdma_channel *xchan;
	struct msgdma_softc *sc;
	uint32_t tot_copied;

	sc = arg;
	chan = &sc->channels[0];
	xchan = chan->xchan;

	dprintf("%s(%d): status 0x%08x next_descr 0x%08x, control 0x%08x\n",
	    __func__, device_get_unit(sc->dev),
		READ4_DESC(sc, PF_STATUS),
		READ4_DESC(sc, PF_NEXT_LO),
		READ4_DESC(sc, PF_CONTROL));

	tot_copied = 0;

	while (chan->idx_tail != chan->idx_head) {
		dprintf("%s: idx_tail %d idx_head %d\n", __func__,
		    chan->idx_tail, chan->idx_head);
		bus_dmamap_sync(chan->dma_tag, chan->dma_map[chan->idx_tail],
		    BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);

		desc = chan->descs[chan->idx_tail];
		if ((le32toh(desc->control) & CONTROL_OWN) != 0) {
			break;
		}

		tot_copied += le32toh(desc->transferred);
		st.error = 0;
		st.transferred = le32toh(desc->transferred);
		xchan_seg_done(xchan, &st);

		chan->idx_tail = msgdma_next_desc(chan, chan->idx_tail);
		atomic_subtract_int(&chan->descs_used_count, 1);
	}

	WRITE4_DESC(sc, PF_STATUS, PF_STATUS_IRQ);

	/* Finish operation */
	status.error = 0;
	status.transferred = tot_copied;
	xdma_callback(chan->xchan, &status);
}
Exemplo n.º 25
0
static void
test_callout(void *arg)
{
	struct callout_run *rn;
	int cpu;
	
	critical_enter();
	cpu = curcpu;
	critical_exit();
	rn = (struct callout_run *)arg;
	atomic_add_int(&rn->callout_waiting, 1);
	mtx_lock(&rn->lock);
	if (callout_pending(&rn->co_array[cpu]) ||
	    !callout_active(&rn->co_array[cpu])) {
		rn->co_return_npa++;
		atomic_subtract_int(&rn->callout_waiting, 1);
		mtx_unlock(&rn->lock);
		return;
	}
	callout_deactivate(&rn->co_array[cpu]);
	rn->co_completed++;
	mtx_unlock(&rn->lock);	
	atomic_subtract_int(&rn->callout_waiting, 1);
}
Exemplo n.º 26
0
void
udev_device_unref(struct udev_device *udev_device)
{
	int refcount;

	refcount = atomic_fetchadd_int(&udev_device->refs, -1);

	if (refcount == 1) {
		atomic_subtract_int(&udev_device->refs, 0x400); /* in destruction */
		if (udev_device->dict != NULL)
			prop_object_release(udev_device->dict);

		udev_unref(udev_device->udev_ctx);
		free(udev_device);
	}
}
Exemplo n.º 27
0
static void mlx4_en_destroy_rl_res(struct mlx4_en_priv *priv,
                                    int ring_id)
{
	struct mlx4_en_tx_ring *ring;
	struct mlx4_en_dev *mdev = priv->mdev;

	ring = priv->tx_ring[ring_id];

	mutex_lock(&mdev->state_lock);

	/* Index was validated, thus ring is not NULL */
	spin_lock(&ring->tx_lock);
	if (ring->rl_data.user_valid == false) {
		en_err(priv, "ring %d doesn't exist\n", ring_id);
		spin_unlock(&ring->tx_lock);
		return;
	} else {
		ring->rl_data.user_valid = false;
	}
	if (!drbr_empty(priv->dev, ring->br)) {
		struct mbuf *m;
		while ((m = buf_ring_dequeue_sc(ring->br)) != NULL) {
			m_freem(m);
		}
	}
	spin_unlock(&ring->tx_lock);
	atomic_subtract_int(&priv->rate_limits[ring->rl_data.rate_index].ref, 1);

	/* Deactivate resources */
	if (priv->port_up) {
		mlx4_en_deactivate_tx_ring(priv, ring);
		mlx4_en_deactivate_cq(priv, priv->tx_cq[ring_id]);
		msleep(10);
		mlx4_en_free_tx_buf(priv->dev, ring);
	}
	mutex_unlock(&mdev->state_lock);

	/* clear statistics */
	ring->bytes = 0;
	ring->packets = 0;

	sysctl_ctx_free(&ring->rl_data.rl_stats_ctx);

	/* Add index to re-use list */
	priv->rate_limit_tx_ring_num--;
	mlx4_en_rl_reused_index_insert(priv, ring_id);
}
Exemplo n.º 28
0
/*
 * A very short dispatch, to try and maximise assembler code use
 * between all exception types. Maybe 'true' interrupts should go
 * here, and the trap code can come in separately
 */
void
powerpc_interrupt(struct trapframe *framep)
{
	struct thread *td;
	struct trapframe *oldframe;
	register_t ee;

	td = curthread;

	CTR2(KTR_INTR, "%s: EXC=%x", __func__, framep->exc);

	switch (framep->exc) {
	case EXC_EXI:
		critical_enter();
		PIC_DISPATCH(root_pic, framep);
		critical_exit();
		break;

	case EXC_DECR:
		critical_enter();
		atomic_add_int(&td->td_intr_nesting_level, 1);
		oldframe = td->td_intr_frame;
		td->td_intr_frame = framep;
		decr_intr(framep);
		td->td_intr_frame = oldframe;
		atomic_subtract_int(&td->td_intr_nesting_level, 1);
		critical_exit();
		break;
#ifdef HWPMC_HOOKS
	case EXC_PERF:
		critical_enter();
		KASSERT(pmc_intr != NULL, ("Performance exception, but no handler!"));
		(*pmc_intr)(PCPU_GET(cpuid), framep);
		if (pmc_hook && (PCPU_GET(curthread)->td_pflags & TDP_CALLCHAIN))
			pmc_hook(PCPU_GET(curthread), PMC_FN_USER_CALLCHAIN, framep);
		critical_exit();
		break;
#endif

	default:
		/* Re-enable interrupts if applicable. */
		ee = framep->srr1 & PSL_EE;
		if (ee != 0)
			mtmsr(mfmsr() | ee);
		trap(framep);
	}	        
}
Exemplo n.º 29
0
/*
 * Decrementer interrupt routine
 */
void
powerpc_decr_interrupt(struct trapframe *framep)
{
	struct thread *td;
	struct trapframe *oldframe;

	td = curthread;
	critical_enter();
	atomic_add_int(&td->td_intr_nesting_level, 1);
	oldframe = td->td_intr_frame;
	td->td_intr_frame = framep;
	decr_intr(framep);
	td->td_intr_frame = oldframe;
	atomic_subtract_int(&td->td_intr_nesting_level, 1);
	critical_exit();
	framep->srr1 &= ~PSL_WE;
}
Exemplo n.º 30
0
void
efi_destroy_1t1_map(void)
{
	vm_page_t m;

	if (obj_1t1_pt != NULL) {
		VM_OBJECT_RLOCK(obj_1t1_pt);
		TAILQ_FOREACH(m, &obj_1t1_pt->memq, listq)
			m->wire_count = 0;
		atomic_subtract_int(&vm_cnt.v_wire_count,
		    obj_1t1_pt->resident_page_count);
		VM_OBJECT_RUNLOCK(obj_1t1_pt);
		vm_object_deallocate(obj_1t1_pt);
	}

	obj_1t1_pt = NULL;
	efi_l0 = NULL;
	efi_l0_page = NULL;
}