/* ARGSUSED */
static pgcnt_t
lgrp_plat_mem_size_default(lgrp_handle_t lgrphand, lgrp_mem_query_t query)
{
	extern struct memlist *phys_install;
	extern struct memlist *phys_avail;
	struct memlist *mlist;
	pgcnt_t npgs = 0;

	switch (query) {
	case LGRP_MEM_SIZE_FREE:
		return ((pgcnt_t)freemem);
	case LGRP_MEM_SIZE_AVAIL:
		memlist_read_lock();
		for (mlist = phys_avail; mlist; mlist = mlist->next)
			npgs += btop(mlist->size);
		memlist_read_unlock();
		return (npgs);
	case LGRP_MEM_SIZE_INSTALL:
		memlist_read_lock();
		for (mlist = phys_install; mlist; mlist = mlist->next)
			npgs += btop(mlist->size);
		memlist_read_unlock();
		return (npgs);
	default:
		return ((pgcnt_t)0);
	}
}
Example #2
0
static int
mm_kstat_snapshot(kstat_t *ksp, void *buf, int rw)
{
	struct memlist *pmem;
	struct memunit {
		uint64_t address;
		uint64_t size;
	} *kspmem;

	if (rw == KSTAT_WRITE)
		return (EACCES);

	ksp->ks_snaptime = gethrtime();

	kspmem = (struct memunit *)buf;
	memlist_read_lock();
	for (pmem = phys_install; pmem != NULL; pmem = pmem->next, kspmem++) {
		if ((caddr_t)kspmem >= (caddr_t)buf + ksp->ks_data_size)
			break;
		kspmem->address = pmem->address;
		kspmem->size = pmem->size;
	}
	memlist_read_unlock();

	return (0);
}
Example #3
0
/*
 * Find the intersection between a memnode and a memlist
 * and returns the number of pages that overlap.
 *
 * Assumes the list is protected from DR operations by
 * the memlist lock.
 */
pgcnt_t
mem_node_memlist_pages(int mnode, struct memlist *mlist)
{
	pfn_t		base, end;
	pfn_t		cur_base, cur_end;
	pgcnt_t		npgs;
	struct memlist	*pmem;

	base = mem_node_config[mnode].physbase;
	end = mem_node_config[mnode].physmax;
	npgs = 0;

	memlist_read_lock();

	for (pmem = mlist; pmem; pmem = pmem->ml_next) {
		cur_base = btop(pmem->ml_address);
		cur_end = cur_base + btop(pmem->ml_size) - 1;
		if (end < cur_base || base > cur_end)
			continue;
		npgs = npgs + (MIN(cur_end, end) -
		    MAX(cur_base, base)) + 1;
	}

	memlist_read_unlock();

	return (npgs);
}
Example #4
0
/*
 * A "regular" and "volatile" bitmap are created for each range of
 * physical memory.  The volatile maps are used to count and track pages
 * susceptible to heap corruption - caused by drivers that allocate mem
 * during VOP_DUMP(); the regular maps are used for all the other non-
 * susceptible pages.  Before writing the bitmaps to the statefile,
 * each bitmap pair gets merged to simplify handling within cprboot.
 */
int
i_cpr_alloc_bitmaps(void)
{
	int err;

	memlist_read_lock();
	err = i_cpr_bitmap_setup();
	memlist_read_unlock();
	if (err)
		i_cpr_bitmap_cleanup();
	return (err);
}
Example #5
0
/*ARGSUSED2*/
static int
mmmmap(dev_t dev, off_t off, int prot)
{
	pfn_t pf;
	struct memlist *pmem;
	minor_t minor = getminor(dev);

	switch (minor) {
	case M_MEM:
		pf = btop(off);
		memlist_read_lock();
		for (pmem = phys_install; pmem != NULL; pmem = pmem->next) {
			if (pf >= BTOP(pmem->address) &&
			    pf < BTOP(pmem->address + pmem->size)) {
				memlist_read_unlock();
				return (impl_obmem_pfnum(pf));
			}
		}
		memlist_read_unlock();
		break;

	case M_KMEM:
	case M_ALLKMEM:
		/* no longer supported with KPR */
		return (-1);

	case M_ZERO:
		/*
		 * We shouldn't be mmap'ing to /dev/zero here as
		 * mmsegmap() should have already converted
		 * a mapping request for this device to a mapping
		 * using seg_vn for anonymous memory.
		 */
		break;

	}
	return (-1);
}
Example #6
0
static int
mm_kstat_update(kstat_t *ksp, int rw)
{
	struct memlist *pmem;
	uint_t count;

	if (rw == KSTAT_WRITE)
		return (EACCES);

	count = 0;
	memlist_read_lock();
	for (pmem = phys_install; pmem != NULL; pmem = pmem->next) {
		count++;
	}
	memlist_read_unlock();

	ksp->ks_ndata = count;
	ksp->ks_data_size = count * 2 * sizeof (uint64_t);

	return (0);
}
Example #7
0
/*ARGSUSED3*/
static int
mmrw(dev_t dev, struct uio *uio, enum uio_rw rw, cred_t *cred)
{
	pfn_t v;
	struct iovec *iov;
	int error = 0;
	size_t c;
	ssize_t oresid = uio->uio_resid;
	minor_t minor = getminor(dev);

	while (uio->uio_resid > 0 && error == 0) {
		iov = uio->uio_iov;
		if (iov->iov_len == 0) {
			uio->uio_iov++;
			uio->uio_iovcnt--;
			if (uio->uio_iovcnt < 0)
				panic("mmrw");
			continue;
		}
		switch (minor) {

		case M_MEM:
			memlist_read_lock();
			if (!address_in_memlist(phys_install,
			    (uint64_t)uio->uio_loffset, 1)) {
				memlist_read_unlock();
				error = EFAULT;
				break;
			}
			memlist_read_unlock();

			v = BTOP((u_offset_t)uio->uio_loffset);
			error = mmio(uio, rw, v,
			    uio->uio_loffset & PAGEOFFSET, 0, NULL);
			break;

		case M_KMEM:
		case M_ALLKMEM:
			{
			page_t **ppp = NULL;
			caddr_t vaddr = (caddr_t)uio->uio_offset;
			int try_lock = NEED_LOCK_KVADDR(vaddr);
			int locked = 0;

			if ((error = plat_mem_do_mmio(uio, rw)) != ENOTSUP)
				break;

			/*
			 * If vaddr does not map a valid page, as_pagelock()
			 * will return failure. Hence we can't check the
			 * return value and return EFAULT here as we'd like.
			 * seg_kp and seg_kpm do not properly support
			 * as_pagelock() for this context so we avoid it
			 * using the try_lock set check above.  Some day when
			 * the kernel page locking gets redesigned all this
			 * muck can be cleaned up.
			 */
			if (try_lock)
				locked = (as_pagelock(&kas, &ppp, vaddr,
				    PAGESIZE, S_WRITE) == 0);

			v = hat_getpfnum(kas.a_hat,
			    (caddr_t)(uintptr_t)uio->uio_loffset);
			if (v == PFN_INVALID) {
				if (locked)
					as_pageunlock(&kas, ppp, vaddr,
					    PAGESIZE, S_WRITE);
				error = EFAULT;
				break;
			}

			error = mmio(uio, rw, v, uio->uio_loffset & PAGEOFFSET,
			    minor == M_ALLKMEM || mm_kmem_io_access,
			    (locked && ppp) ? *ppp : NULL);
			if (locked)
				as_pageunlock(&kas, ppp, vaddr, PAGESIZE,
				    S_WRITE);
			}

			break;

		case M_ZERO:
			if (rw == UIO_READ) {
				label_t ljb;

				if (on_fault(&ljb)) {
					no_fault();
					error = EFAULT;
					break;
				}
				uzero(iov->iov_base, iov->iov_len);
				no_fault();
				uio->uio_resid -= iov->iov_len;
				uio->uio_loffset += iov->iov_len;
				break;
			}
			/* else it's a write, fall through to NULL case */
			/*FALLTHROUGH*/

		case M_NULL:
			if (rw == UIO_READ)
				return (0);
			c = iov->iov_len;
			iov->iov_base += c;
			iov->iov_len -= c;
			uio->uio_loffset += c;
			uio->uio_resid -= c;
			break;

		}
	}
	return (uio->uio_resid == oresid ? error : 0);
}
Example #8
0
/*
 * find prom phys pages and alloc space for a tmp copy
 */
static int
i_cpr_find_ppages(void)
{
	struct page *pp;
	struct memlist *pmem;
	pgcnt_t npages, pcnt, scnt, vcnt;
	pfn_t ppn, plast, *dst;
	int mapflag;

	cpr_clear_bitmaps();
	mapflag = REGULAR_BITMAP;

	/*
	 * there should be a page_t for each phys page used by the kernel;
	 * set a bit for each phys page not tracked by a page_t
	 */
	pcnt = 0;
	memlist_read_lock();
	for (pmem = phys_install; pmem; pmem = pmem->ml_next) {
		npages = mmu_btop(pmem->ml_size);
		ppn = mmu_btop(pmem->ml_address);
		for (plast = ppn + npages; ppn < plast; ppn++) {
			if (page_numtopp_nolock(ppn))
				continue;
			(void) cpr_setbit(ppn, mapflag);
			pcnt++;
		}
	}
	memlist_read_unlock();

	/*
	 * clear bits for phys pages in each segment
	 */
	scnt = cpr_count_seg_pages(mapflag, cpr_clrbit);

	/*
	 * set bits for phys pages referenced by the promvp vnode;
	 * these pages are mostly comprised of forthdebug words
	 */
	vcnt = 0;
	for (pp = promvp.v_pages; pp; ) {
		if (cpr_setbit(pp->p_offset, mapflag) == 0)
			vcnt++;
		pp = pp->p_vpnext;
		if (pp == promvp.v_pages)
			break;
	}

	/*
	 * total number of prom pages are:
	 * (non-page_t pages - seg pages + vnode pages)
	 */
	ppage_count = pcnt - scnt + vcnt;
	CPR_DEBUG(CPR_DEBUG1,
	    "find_ppages: pcnt %ld - scnt %ld + vcnt %ld = %ld\n",
	    pcnt, scnt, vcnt, ppage_count);

	/*
	 * alloc array of pfn_t to store phys page list
	 */
	pphys_list_size = ppage_count * sizeof (pfn_t);
	pphys_list = kmem_alloc(pphys_list_size, KM_NOSLEEP);
	if (pphys_list == NULL) {
		cpr_err(CE_WARN, "cannot alloc pphys_list");
		return (ENOMEM);
	}

	/*
	 * phys pages referenced in the bitmap should be
	 * those used by the prom; scan bitmap and save
	 * a list of prom phys page numbers
	 */
	dst = pphys_list;
	memlist_read_lock();
	for (pmem = phys_install; pmem; pmem = pmem->ml_next) {
		npages = mmu_btop(pmem->ml_size);
		ppn = mmu_btop(pmem->ml_address);
		for (plast = ppn + npages; ppn < plast; ppn++) {
			if (cpr_isset(ppn, mapflag)) {
				ASSERT(dst < (pphys_list + ppage_count));
				*dst++ = ppn;
			}
		}
	}
	memlist_read_unlock();

	/*
	 * allocate space to store prom pages
	 */
	ppage_buf = kmem_alloc(mmu_ptob(ppage_count), KM_NOSLEEP);
	if (ppage_buf == NULL) {
		kmem_free(pphys_list, pphys_list_size);
		pphys_list = NULL;
		cpr_err(CE_WARN, "cannot alloc ppage_buf");
		return (ENOMEM);
	}

	return (0);
}