Exemple #1
0
static int
mmio(struct uio *uio, enum uio_rw rw, pfn_t pfn, off_t pageoff, int allowio,
    page_t *pp)
{
	int error = 0;
	int devload = 0;
	int is_memory = pf_is_memory(pfn);
	size_t nbytes = MIN((size_t)(PAGESIZE - pageoff),
	    (size_t)uio->uio_iov->iov_len);
	caddr_t va = NULL;

	mutex_enter(&mm_lock);

	if (is_memory && kpm_enable) {
		if (pp)
			va = hat_kpm_mapin(pp, NULL);
		else
			va = hat_kpm_mapin_pfn(pfn);
	}

	if (va == NULL) {
		hat_devload(kas.a_hat, mm_map, PAGESIZE, pfn,
		    (uint_t)(rw == UIO_READ ? PROT_READ : PROT_READ|PROT_WRITE),
		    HAT_LOAD_NOCONSIST|HAT_LOAD_LOCK);
		va = mm_map;
		devload = 1;
	}

	if (!is_memory) {
		if (allowio) {
			size_t c = uio->uio_iov->iov_len;

			if (ddi_peekpokeio(NULL, uio, rw,
			    (caddr_t)(uintptr_t)uio->uio_loffset, c,
			    sizeof (int32_t)) != DDI_SUCCESS)
				error = EFAULT;
		} else
			error = EIO;
	} else
		error = uiomove(va + pageoff, nbytes, rw, uio);

	if (devload)
		hat_unload(kas.a_hat, mm_map, PAGESIZE, HAT_UNLOAD_UNLOCK);
	else if (pp)
		hat_kpm_mapout(pp, NULL, va);
	else
		hat_kpm_mapout_pfn(pfn);

	mutex_exit(&mm_lock);
	return (error);
}
Exemple #2
0
static void
segkmem_xdump_range(void *arg, void *start, size_t size)
{
	struct as *as = arg;
	caddr_t addr = start;
	caddr_t addr_end = addr + size;

	while (addr < addr_end) {
		pfn_t pfn = hat_getpfnum(kas.a_hat, addr);
		if (pfn != PFN_INVALID && pfn <= physmax && pf_is_memory(pfn))
			dump_addpage(as, addr, pfn);
		addr += PAGESIZE;
		dump_timeleft = dump_timeout;
	}
}
Exemple #3
0
/*
 * This routine should only be called using a pfn that is known to reside
 * in IO space.  The function pf_is_memory() can be used to determine this.
 */
int
pf_is_dmacapable(pfn_t pfn)
{
	int i, j;

	/* If the caller passed in a memory pfn, return true. */
	if (pf_is_memory(pfn))
		return (1);

	for (i = upa_dma_pfn_ndx, j = 0; j < i; j++)
		if (pfn <= upa_dma_pfn_array[j].hipfn &&
		    pfn >= upa_dma_pfn_array[j].lopfn)
			return (1);

	return (0);
}
Exemple #4
0
/*
 * Map address "addr" in address space "as" into a kernel virtual address.
 * The memory is guaranteed to be resident and locked down.
 */
static caddr_t
mapin(struct as *as, caddr_t addr, int writing)
{
	page_t *pp;
	caddr_t kaddr;
	pfn_t pfnum;

	/*
	 * NB: Because of past mistakes, we have bits being returned
	 * by getpfnum that are actually the page type bits of the pte.
	 * When the object we are trying to map is a memory page with
	 * a page structure everything is ok and we can use the optimal
	 * method, ppmapin.  Otherwise, we have to do something special.
	 */
	pfnum = hat_getpfnum(as->a_hat, addr);
	if (pf_is_memory(pfnum)) {
		pp = page_numtopp_nolock(pfnum);
		if (pp != NULL) {
			ASSERT(PAGE_LOCKED(pp));
			kaddr = ppmapin(pp, writing ?
				(PROT_READ | PROT_WRITE) : PROT_READ,
				(caddr_t)-1);
			return (kaddr + ((uintptr_t)addr & PAGEOFFSET));
		}
	}

	/*
	 * Oh well, we didn't have a page struct for the object we were
	 * trying to map in; ppmapin doesn't handle devices, but allocating a
	 * heap address allows ppmapout to free virutal space when done.
	 */
	kaddr = vmem_alloc(heap_arena, PAGESIZE, VM_SLEEP);

	hat_devload(kas.a_hat, kaddr, PAGESIZE, pfnum,
		writing ? (PROT_READ | PROT_WRITE) : PROT_READ, HAT_LOAD_LOCK);

	return (kaddr + ((uintptr_t)addr & PAGEOFFSET));
}
Exemple #5
0
/*
 * This function takes care of pages which are not in kas or need to be
 * taken care of in a special way.  For example, panicbuf pages are not
 * in kas and their pages are allocated via prom_retain().
 */
pgcnt_t
i_cpr_count_special_kpages(int mapflag, bitfunc_t bitfunc)
{
	struct cpr_map_info *pri, *tail;
	pgcnt_t pages, total = 0;
	pfn_t pfn;

	/*
	 * Save information about prom retained panicbuf pages
	 */
	if (bitfunc == cpr_setbit) {
		pri = &cpr_prom_retain[CPR_PANICBUF];
		pri->virt = (cpr_ptr)panicbuf;
		pri->phys = va_to_pa(panicbuf);
		pri->size = sizeof (panicbuf);
	}

	/*
	 * Go through the prom_retain array to tag those pages.
	 */
	tail = &cpr_prom_retain[CPR_PROM_RETAIN_CNT];
	for (pri = cpr_prom_retain; pri < tail; pri++) {
		pages = mmu_btopr(pri->size);
		for (pfn = ADDR_TO_PN(pri->phys); pages--; pfn++) {
			if (pf_is_memory(pfn)) {
				if (bitfunc == cpr_setbit) {
					if ((*bitfunc)(pfn, mapflag) == 0)
						total++;
				} else
					total++;
			}
		}
	}

	return (total);
}