Пример #1
0
void
vmxnet3s_txcache_release(vmxnet3s_softc_t *dp)
{
	int		i;
	int		rc;
	vmxnet3s_txcache_t *cache = &dp->txcache;

	/* Unmap pages */
	hat_unload(kas.a_hat, cache->window, ptob(cache->num_pages),
	    HAT_UNLOAD_UNLOCK);
	vmem_free(heap_arena, cache->window, ptob(cache->num_pages));

	/* Free pages */
	for (i = 0; i < cache->num_pages; i++) {
		rc = page_tryupgrade(cache->pages[i]);
		if (!rc) {
			page_unlock(cache->pages[i]);
			while (!page_lock(cache->pages[i], SE_EXCL, NULL,
			    P_RECLAIM))
				;
		}
		page_free(cache->pages[i], 0);
	}
	page_unresv(cache->num_pages);

	kmem_free(cache->pages, cache->num_pages * sizeof (page_t *));
	kmem_free(cache->page_maps, cache->num_pages * sizeof (page_t *));
	kmem_free(cache->nodes,
	    cache->num_nodes * sizeof (vmxnet3s_txcache_node_t));
}
Пример #2
0
/*
 * We only support unmapping the whole segment, and we automatically unlock
 * what we previously soft-locked.
 */
static int
segmf_unmap(struct seg *seg, caddr_t addr, size_t len)
{
	struct segmf_data *data = seg->s_data;
	offset_t off;

	if (addr < seg->s_base || addr + len > seg->s_base + seg->s_size ||
	    (len & PAGEOFFSET) || ((uintptr_t)addr & PAGEOFFSET))
		panic("segmf_unmap");

	if (addr != seg->s_base || len != seg->s_size)
		return (ENOTSUP);

	hat_unload(seg->s_as->a_hat, addr, len,
	    HAT_UNLOAD_UNMAP | HAT_UNLOAD_UNLOCK);

	off = (offset_t)seg_page(seg, addr);

	ASSERT(data->vp != NULL);

	(void) VOP_DELMAP(VTOCVP(data->vp), off, seg->s_as, addr, len,
	    data->prot, data->maxprot, MAP_SHARED, CRED(), NULL);

	seg_free(seg);
	return (0);
}
Пример #3
0
static void
pci_cfgacc_unmap()
{
	if (khat_running)
		hat_unload(kas.a_hat, pci_cfgacc_virt_base, MMU_PAGESIZE,
		    HAT_UNLOAD_UNLOCK);
}
Пример #4
0
int
segmf_create(struct seg *seg, void *args)
{
	struct segmf_crargs *a = args;
	struct segmf_data *data;
	struct as *as = seg->s_as;
	pgcnt_t i, npages = seg_pages(seg);
	int error;

	hat_map(as->a_hat, seg->s_base, seg->s_size, HAT_MAP);

	data = segmf_data_zalloc(seg);
	data->vp = specfind(a->dev, VCHR);
	data->prot = a->prot;
	data->maxprot = a->maxprot;

	data->map = kmem_alloc(npages * sizeof (segmf_map_t), KM_SLEEP);
	for (i = 0; i < npages; i++) {
		data->map[i].t_type = SEGMF_MAP_EMPTY;
	}

	error = VOP_ADDMAP(VTOCVP(data->vp), 0, as, seg->s_base, seg->s_size,
	    data->prot, data->maxprot, MAP_SHARED, CRED(), NULL);

	if (error != 0)
		hat_unload(as->a_hat,
		    seg->s_base, seg->s_size, HAT_UNLOAD_UNMAP);
	return (error);
}
Пример #5
0
/*
 * Legacy entry points from here to end of file.
 */
void
segkmem_mapin(struct seg *seg, void *addr, size_t size, uint_t vprot,
    pfn_t pfn, uint_t flags)
{
	hat_unload(seg->s_as->a_hat, addr, size, HAT_UNLOAD_UNLOCK);
	hat_devload(seg->s_as->a_hat, addr, size, pfn, vprot,
	    flags | HAT_LOAD_LOCK);
}
Пример #6
0
void
mach_cpucontext_fini(void)
{
	if (warm_reset_vector)
		psm_unmap_phys((caddr_t)warm_reset_vector,
		    sizeof (warm_reset_vector));
	hat_unload(kas.a_hat, (caddr_t)(uintptr_t)rm_platter_pa, MMU_PAGESIZE,
	    HAT_UNLOAD);
}
Пример #7
0
/*
 * Any changes to this routine must also be carried over to
 * devmap_free_pages() in the seg_dev driver. This is because
 * we currently don't have a special kernel segment for non-paged
 * kernel memory that is exported by drivers to user space.
 */
static void
segkmem_free_vn(vmem_t *vmp, void *inaddr, size_t size, struct vnode *vp,
    void (*func)(page_t *))
{
	page_t *pp;
	caddr_t addr = inaddr;
	caddr_t eaddr;
	pgcnt_t npages = btopr(size);

	ASSERT(((uintptr_t)addr & PAGEOFFSET) == 0);
	ASSERT(vp != NULL);

	if (kvseg.s_base == NULL) {
		segkmem_gc_list_t *gc = inaddr;
		gc->gc_arena = vmp;
		gc->gc_size = size;
		gc->gc_next = segkmem_gc_list;
		segkmem_gc_list = gc;
		return;
	}

	hat_unload(kas.a_hat, addr, size, HAT_UNLOAD_UNLOCK);

	for (eaddr = addr + size; addr < eaddr; addr += PAGESIZE) {
#if defined(__x86)
		pp = page_find(vp, (u_offset_t)(uintptr_t)addr);
		if (pp == NULL)
			panic("segkmem_free: page not found");
		if (!page_tryupgrade(pp)) {
			/*
			 * Some other thread has a sharelock. Wait for
			 * it to drop the lock so we can free this page.
			 */
			page_unlock(pp);
			pp = page_lookup(vp, (u_offset_t)(uintptr_t)addr,
			    SE_EXCL);
		}
#else
		pp = page_lookup(vp, (u_offset_t)(uintptr_t)addr, SE_EXCL);
#endif
		if (pp == NULL)
			panic("segkmem_free: page not found");
		/* Clear p_lckcnt so page_destroy() doesn't update availrmem */
		pp->p_lckcnt = 0;
		if (func)
			func(pp);
		else
			page_destroy(pp, 0);
	}
	if (func == NULL)
		page_unresv(npages);

	if (vmp != NULL)
		vmem_free(vmp, inaddr, size);

}
Пример #8
0
static int
mmio(struct uio *uio, enum uio_rw rw, pfn_t pfn, off_t pageoff, int allowio,
    page_t *pp)
{
	int error = 0;
	int devload = 0;
	int is_memory = pf_is_memory(pfn);
	size_t nbytes = MIN((size_t)(PAGESIZE - pageoff),
	    (size_t)uio->uio_iov->iov_len);
	caddr_t va = NULL;

	mutex_enter(&mm_lock);

	if (is_memory && kpm_enable) {
		if (pp)
			va = hat_kpm_mapin(pp, NULL);
		else
			va = hat_kpm_mapin_pfn(pfn);
	}

	if (va == NULL) {
		hat_devload(kas.a_hat, mm_map, PAGESIZE, pfn,
		    (uint_t)(rw == UIO_READ ? PROT_READ : PROT_READ|PROT_WRITE),
		    HAT_LOAD_NOCONSIST|HAT_LOAD_LOCK);
		va = mm_map;
		devload = 1;
	}

	if (!is_memory) {
		if (allowio) {
			size_t c = uio->uio_iov->iov_len;

			if (ddi_peekpokeio(NULL, uio, rw,
			    (caddr_t)(uintptr_t)uio->uio_loffset, c,
			    sizeof (int32_t)) != DDI_SUCCESS)
				error = EFAULT;
		} else
			error = EIO;
	} else
		error = uiomove(va + pageoff, nbytes, rw, uio);

	if (devload)
		hat_unload(kas.a_hat, mm_map, PAGESIZE, HAT_UNLOAD_UNLOCK);
	else if (pp)
		hat_kpm_mapout(pp, NULL, va);
	else
		hat_kpm_mapout_pfn(pfn);

	mutex_exit(&mm_lock);
	return (error);
}
Пример #9
0
void
psm_unmap(caddr_t addr, size_t len)
{
	uint_t pgoffset;
	caddr_t base;
	pgcnt_t npages;

	if (len == 0)
		return;

	pgoffset = (uintptr_t)addr & MMU_PAGEOFFSET;
	base = addr - pgoffset;
	npages = mmu_btopr(len + pgoffset);
	hat_unload(kas.a_hat, base, ptob(npages), HAT_UNLOAD_UNLOCK);
	device_arena_free(base, ptob(npages));
}
Пример #10
0
/*ARGSUSED*/
void
hat_kpm_delmem_mseg_update(struct memseg *msp, struct memseg **mspp)
{
	pfn_t base, end;

	/*
	 * The meta (page_t) pages for dynamically added memory are allocated
	 * either from the incoming memory itself or from existing memory.
	 * In the former case the base of the incoming pages will be different
	 * than the base of the dynamic segment so call memseg_get_start() to
	 * get the actual base of the incoming memory for each case.
	 */

	base = memseg_get_start(msp);
	end = msp->pages_end;

	hat_unload(kas.a_hat, kpm_vbase +  mmu_ptob(base), mmu_ptob(end - base),
	    HAT_UNLOAD | HAT_UNLOAD_UNLOCK | HAT_UNLOAD_UNMAP);
}
Пример #11
0
static int
segkmem_setprot(struct seg *seg, caddr_t addr, size_t size, uint_t prot)
{
	ASSERT(RW_LOCK_HELD(&seg->s_as->a_lock));

	if (seg->s_as != &kas || size > seg->s_size ||
	    addr < seg->s_base || addr + size > seg->s_base + seg->s_size)
		panic("segkmem_setprot: bad args");

	/*
	 * If it is one of segkp pages, call segkp.
	 */
	if (segkp_bitmap && seg == &kvseg &&
	    BT_TEST(segkp_bitmap, btop((uintptr_t)(addr - seg->s_base))))
		return (SEGOP_SETPROT(segkp, addr, size, prot));

	if (prot == 0)
		hat_unload(kas.a_hat, addr, size, HAT_UNLOAD);
	else
		hat_chgprot(kas.a_hat, addr, size, prot);
	return (0);
}
Пример #12
0
/*ARGSUSED*/
static void
xpvtap_segmf_unregister(struct as *as, void *arg, uint_t event)
{
	xpvtap_state_t *state;
	caddr_t uaddr;
	uint_t pgcnt;
	int i;


	state = (xpvtap_state_t *)arg;
	if (!state->bt_map.um_registered) {
		/* remove the callback (which is this routine) */
		(void) as_delete_callback(as, arg);
		return;
	}

	pgcnt = btopr(state->bt_map.um_guest_size);
	uaddr = state->bt_map.um_guest_pages;

	/* unmap any outstanding req's grefs */
	xpvtap_rs_flush(state->bt_map.um_rs, xpvtap_user_request_unmap, state);

	/* Unlock the gref pages */
	for (i = 0; i < pgcnt; i++) {
		AS_LOCK_ENTER(as, RW_WRITER);
		hat_prepare_mapping(as->a_hat, uaddr, NULL);
		hat_unload(as->a_hat, uaddr, PAGESIZE, HAT_UNLOAD_UNLOCK);
		hat_release_mapping(as->a_hat, uaddr);
		AS_LOCK_EXIT(as);
		uaddr += PAGESIZE;
	}

	/* remove the callback (which is this routine) */
	(void) as_delete_callback(as, arg);

	state->bt_map.um_registered = B_FALSE;
}
Пример #13
0
static void
segkmem_free_one_lp(caddr_t addr, size_t size)
{
	page_t		*pp, *rootpp = NULL;
	pgcnt_t 	pgs_left = btopr(size);

	ASSERT(size == segkmem_lpsize);

	hat_unload(kas.a_hat, addr, size, HAT_UNLOAD_UNLOCK);

	for (; pgs_left > 0; addr += PAGESIZE, pgs_left--) {
		pp = page_lookup(&kvp, (u_offset_t)(uintptr_t)addr, SE_EXCL);
		if (pp == NULL)
			panic("segkmem_free_one_lp: page not found");
		ASSERT(PAGE_EXCL(pp));
		pp->p_lckcnt = 0;
		if (rootpp == NULL)
			rootpp = pp;
	}
	ASSERT(rootpp != NULL);
	page_destroy_pages(rootpp);

	/* page_unresv() is done by the caller */
}
Пример #14
0
/*
 * balloon_free_pages()
 *    free page_cnt pages, using any combination of mfns, pfns, and kva as long
 *    as they refer to the same mapping.  If an array of mfns is passed in, we
 *    assume they were already cleared.  Otherwise, we need to zero the pages
 *    before giving them back to the hypervisor. kva space is not free'd up in
 *    case the caller wants to re-use it.
 */
long
balloon_free_pages(uint_t page_cnt, mfn_t *mfns, caddr_t kva, pfn_t *pfns)
{
	xen_memory_reservation_t memdec;
	mfn_t mfn;
	pfn_t pfn;
	uint_t i;
	long e;


#if DEBUG
	/* make sure kva is page aligned and maps to first pfn */
	if (kva != NULL) {
		ASSERT(((uintptr_t)kva & PAGEOFFSET) == 0);
		if (pfns != NULL) {
			ASSERT(hat_getpfnum(kas.a_hat, kva) == pfns[0]);
		}
	}
#endif

	/* if we have a kva, we can clean all pages with just one bzero */
	if ((kva != NULL) && balloon_zero_memory) {
		bzero(kva, (page_cnt * PAGESIZE));
	}

	/* if we were given a kva and/or a pfn */
	if ((kva != NULL) || (pfns != NULL)) {

		/*
		 * All the current callers only pass 1 page when using kva or
		 * pfns, and use mfns when passing multiple pages.  If that
		 * assumption is changed, the following code will need some
		 * work.  The following ASSERT() guarantees we're respecting
		 * the io locking quota.
		 */
		ASSERT(page_cnt < bln_contig_list_quota);

		/* go through all the pages */
		for (i = 0; i < page_cnt; i++) {

			/* get the next pfn */
			if (pfns == NULL) {
				pfn = hat_getpfnum(kas.a_hat,
				    (kva + (PAGESIZE * i)));
			} else {
				pfn = pfns[i];
			}

			/*
			 * if we didn't already zero this page, do it now. we
			 * need to do this *before* we give back the MFN
			 */
			if ((kva == NULL) && (balloon_zero_memory)) {
				pfnzero(pfn, 0, PAGESIZE);
			}

			/*
			 * unmap the pfn. We don't free up the kva vmem space
			 * so the caller can re-use it. The page must be
			 * unmapped before it is given back to the hypervisor.
			 */
			if (kva != NULL) {
				hat_unload(kas.a_hat, (kva + (PAGESIZE * i)),
				    PAGESIZE, HAT_UNLOAD_UNMAP);
			}

			/* grab the mfn before the pfn is marked as invalid */
			mfn = pfn_to_mfn(pfn);

			/* mark the pfn as invalid */
			reassign_pfn(pfn, MFN_INVALID);

			/*
			 * if we weren't given an array of MFNs, we need to
			 * free them up one at a time. Otherwise, we'll wait
			 * until later and do it in one hypercall
			 */
			if (mfns == NULL) {
				bzero(&memdec, sizeof (memdec));
				/*LINTED: constant in conditional context*/
				set_xen_guest_handle(memdec.extent_start, &mfn);
				memdec.domid = DOMID_SELF;
				memdec.nr_extents = 1;
				e = HYPERVISOR_memory_op(
				    XENMEM_decrease_reservation, &memdec);
				if (e != 1) {
					cmn_err(CE_PANIC, "balloon: unable to "
					    "give a page back to the "
					    "hypervisor.\n");
				}
			}
		}
	}

	/*
	 * if we were passed in MFNs, we haven't free'd them up yet. We can
	 * do it with one call.
	 */
	if (mfns != NULL) {
		bzero(&memdec, sizeof (memdec));
		/*LINTED: constant in conditional context*/
		set_xen_guest_handle(memdec.extent_start, mfns);
		memdec.domid = DOMID_SELF;
		memdec.nr_extents = page_cnt;
		e = HYPERVISOR_memory_op(XENMEM_decrease_reservation, &memdec);
		if (e != page_cnt) {
			cmn_err(CE_PANIC, "balloon: unable to give pages back "
			    "to the hypervisor.\n");
		}
	}

	atomic_add_long((ulong_t *)&bln_stats.bln_hv_pages, page_cnt);
	return (page_cnt);
}
Пример #15
0
/*ARGSUSED*/
static int
segmf_faultpage(struct hat *hat, struct seg *seg, caddr_t addr,
    enum fault_type type, uint_t prot)
{
	struct segmf_data *data = seg->s_data;
	uint_t hat_flags = HAT_LOAD_NOCONSIST;
	mfn_t mfn;
	x86pte_t pte;
	segmf_map_t *map;
	uint_t idx;


	idx = seg_page(seg, addr);
	map = &data->map[idx];
	ASSERT(map->t_type == SEGMF_MAP_MFN);

	mfn = map->u.m.m_mfn;

	if (type == F_SOFTLOCK) {
		mutex_enter(&freemem_lock);
		data->softlockcnt++;
		mutex_exit(&freemem_lock);
		hat_flags |= HAT_LOAD_LOCK;
	} else
		hat_flags |= HAT_LOAD;

	if (segmf_faultpage_debug > 0) {
		uprintf("segmf_faultpage: addr %p domid %x mfn %lx prot %x\n",
		    (void *)addr, data->domid, mfn, prot);
		segmf_faultpage_debug--;
	}

	/*
	 * Ask the HAT to load a throwaway mapping to page zero, then
	 * overwrite it with our foreign domain mapping. It gets removed
	 * later via hat_unload()
	 */
	hat_devload(hat, addr, MMU_PAGESIZE, (pfn_t)0,
	    PROT_READ | HAT_UNORDERED_OK, hat_flags);

	pte = mmu_ptob((x86pte_t)mfn) | PT_VALID | PT_USER | PT_FOREIGN;
	if (prot & PROT_WRITE)
		pte |= PT_WRITABLE;

	if (HYPERVISOR_update_va_mapping_otherdomain((uintptr_t)addr, pte,
	    UVMF_INVLPG | UVMF_ALL, data->domid) != 0) {
		hat_flags = HAT_UNLOAD_UNMAP;

		if (type == F_SOFTLOCK) {
			hat_flags |= HAT_UNLOAD_UNLOCK;
			mutex_enter(&freemem_lock);
			data->softlockcnt--;
			mutex_exit(&freemem_lock);
		}

		hat_unload(hat, addr, MMU_PAGESIZE, hat_flags);
		return (FC_MAKE_ERR(EFAULT));
	}

	return (0);
}
Пример #16
0
void
segkmem_mapout(struct seg *seg, void *addr, size_t size)
{
	hat_unload(seg->s_as->a_hat, addr, size, HAT_UNLOAD_UNLOCK);
}