예제 #1
0
int atomicExchangeAdd(volatile atomic_t& val, int add)
{
    volatile ulong_t* uvalue = reinterpret_cast<volatile ulong_t*>(&val.l);
    volatile ulong_t  un     = add;
    volatile ulong_t  result = atomic_add_long_nv(uvalue, un);
    return result - add;
}
예제 #2
0
/*
 * Change the count associated with number of processes
 * a given user is using.
 */
int
chgproccnt(uid_t uid, int diff)
{
	struct uidinfo *uip;
	long proccnt;

	uip = uid_find(uid);
	proccnt = atomic_add_long_nv(&uip->ui_proccnt, diff);
	KASSERT(proccnt >= 0);
	return proccnt;
}
예제 #3
0
int
chgsbsize(struct uidinfo *uip, u_long *hiwat, u_long to, rlim_t xmax)
{
	rlim_t nsb;
	const long diff = to - *hiwat;

	nsb = (rlim_t)atomic_add_long_nv((long *)&uip->ui_sbsize, diff);
	if (diff > 0 && nsb > xmax) {
		atomic_add_long((long *)&uip->ui_sbsize, -diff);
		return 0;
	}
	*hiwat = to;
	KASSERT(nsb >= 0);
	return 1;
}
예제 #4
0
/*
 * Allocate zeroed memory if tmpfs_maxkmem has not been exceeded
 * or the 'musthave' flag is set.  'musthave' allocations should
 * always be subordinate to normal allocations so that tmpfs_maxkmem
 * can't be exceeded by more than a few KB.  Example: when creating
 * a new directory, the tmpnode is a normal allocation; if that
 * succeeds, the dirents for "." and ".." are 'musthave' allocations.
 */
void *
tmp_memalloc(size_t size, int musthave)
{
	static time_t last_warning;
	time_t now;

	if (atomic_add_long_nv(&tmp_kmemspace, size) < tmpfs_maxkmem ||
	    musthave)
		return (kmem_zalloc(size, KM_SLEEP));

	atomic_add_long(&tmp_kmemspace, -size);
	now = gethrestime_sec();
	if (last_warning != now) {
		last_warning = now;
		cmn_err(CE_WARN, "tmp_memalloc: tmpfs over memory limit");
	}
	return (NULL);
}
예제 #5
0
int
px_fdvma_reserve(dev_info_t *dip, dev_info_t *rdip, px_t *px_p,
	ddi_dma_req_t *dmareq, ddi_dma_handle_t *handlep)
{
	fdvma_t *fdvma_p;
	px_dvma_addr_t dvma_pg;
	px_mmu_t *mmu_p = px_p->px_mmu_p;
	size_t npages;
	ddi_dma_impl_t *mp;
	ddi_dma_lim_t *lim_p = dmareq->dmar_limits;
	ulong_t hi = lim_p->dlim_addr_hi;
	ulong_t lo = lim_p->dlim_addr_lo;
	size_t counter_max = (lim_p->dlim_cntr_max + 1) & MMU_PAGE_MASK;

	if (px_disable_fdvma)
		return (DDI_FAILURE);

	DBG(DBG_DMA_CTL, dip, "DDI_DMA_RESERVE: rdip=%s%d\n",
	    ddi_driver_name(rdip), ddi_get_instance(rdip));

	/*
	 * Check the limit structure.
	 */
	if ((lo >= hi) || (hi < mmu_p->mmu_dvma_base))
		return (DDI_DMA_BADLIMITS);

	/*
	 * Allocate DVMA space from reserve.
	 */
	npages = dmareq->dmar_object.dmao_size;
	if ((long)atomic_add_long_nv(&mmu_p->mmu_dvma_reserve, -npages) < 0) {
		atomic_add_long(&mmu_p->mmu_dvma_reserve, npages);
		return (DDI_DMA_NORESOURCES);
	}

	/*
	 * Allocate the dma handle.
	 */
	mp = kmem_zalloc(sizeof (px_dma_hdl_t), KM_SLEEP);

	/*
	 * Get entries from dvma space map.
	 * (vmem_t *vmp,
	 *	size_t size, size_t align, size_t phase,
	 *	size_t nocross, void *minaddr, void *maxaddr, int vmflag)
	 */
	dvma_pg = MMU_BTOP((ulong_t)vmem_xalloc(mmu_p->mmu_dvma_map,
	    MMU_PTOB(npages), MMU_PAGE_SIZE, 0,
	    counter_max, (void *)lo, (void *)(hi + 1),
	    dmareq->dmar_fp == DDI_DMA_SLEEP ? VM_SLEEP : VM_NOSLEEP));
	if (dvma_pg == 0) {
		atomic_add_long(&mmu_p->mmu_dvma_reserve, npages);
		kmem_free(mp, sizeof (px_dma_hdl_t));
		return (DDI_DMA_NOMAPPING);
	}

	/*
	 * Create the fast dvma request structure.
	 */
	fdvma_p = kmem_alloc(sizeof (fdvma_t), KM_SLEEP);
	fdvma_p->pagecnt = kmem_alloc(npages * sizeof (uint_t), KM_SLEEP);
	fdvma_p->ops = &fdvma_ops;
	fdvma_p->softsp = (caddr_t)px_p;
	fdvma_p->sync_flag = NULL;

	/*
	 * Initialize the handle.
	 */
	mp->dmai_rdip = rdip;
	mp->dmai_rflags = DMP_BYPASSNEXUS | DDI_DMA_READ | DMP_NOSYNC;
	mp->dmai_burstsizes = dmareq->dmar_limits->dlim_burstsizes;
	mp->dmai_mapping = MMU_PTOB(dvma_pg);
	mp->dmai_ndvmapages = npages;
	mp->dmai_size = npages * MMU_PAGE_SIZE;
	mp->dmai_nwin = 0;
	mp->dmai_fdvma = (caddr_t)fdvma_p;

	/*
	 * The bdf protection value is set to immediate child
	 * at first. It gets modified by switch/bridge drivers
	 * as the code traverses down the fabric topology.
	 *
	 * XXX No IOMMU protection for broken devices.
	 */
	ASSERT((intptr_t)ddi_get_parent_data(rdip) >> 1 == 0);
	mp->dmai_bdf = ((intptr_t)ddi_get_parent_data(rdip) == 1) ?
	    PCIE_INVALID_BDF : pcie_get_bdf_for_dma_xfer(dip, rdip);

	DBG(DBG_DMA_CTL, dip,
	    "DDI_DMA_RESERVE: mp=%p dvma=%x npages=%x private=%p\n",
	    mp, mp->dmai_mapping, npages, fdvma_p);
	*handlep = (ddi_dma_handle_t)mp;
	return (DDI_SUCCESS);
}