Esempio n. 1
0
/* ARGSUSED */
static pgcnt_t
lgrp_plat_mem_size_default(lgrp_handle_t lgrphand, lgrp_mem_query_t query)
{
	extern struct memlist *phys_install;
	extern struct memlist *phys_avail;
	struct memlist *mlist;
	pgcnt_t npgs = 0;

	switch (query) {
	case LGRP_MEM_SIZE_FREE:
		return ((pgcnt_t)freemem);
	case LGRP_MEM_SIZE_AVAIL:
		memlist_read_lock();
		for (mlist = phys_avail; mlist; mlist = mlist->next)
			npgs += btop(mlist->size);
		memlist_read_unlock();
		return (npgs);
	case LGRP_MEM_SIZE_INSTALL:
		memlist_read_lock();
		for (mlist = phys_install; mlist; mlist = mlist->next)
			npgs += btop(mlist->size);
		memlist_read_unlock();
		return (npgs);
	default:
		return ((pgcnt_t)0);
	}
}
Esempio n. 2
0
munmap()
{
#ifdef notdef
	register struct a {
		caddr_t	addr;
		int	len;
	} *uap = (struct a *)u.u_ap;
	int off;
	int fv, lv;
	register struct pte *pte;

	if (((int)uap->addr & CLOFSET) || (uap->len & CLOFSET)) {
		u.u_error = EINVAL;
		return;
	}
	fv = btop(uap->addr);
	lv = btop(uap->addr + uap->len - 1);
	if (lv < fv || !isadsv(u.u_procp, fv) || !isadsv(u.u_procp, lv)) {
		u.u_error = EINVAL;
		return;
	}
	for (off = 0; off < uap->len; off += NBPG) {
		pte = vtopte(u.u_procp, fv);
		u.u_procp->p_rssize -= vmemfree(pte, 1);
		*(int *)pte = (PG_UW|PG_FOD);
		((struct fpte *)pte)->pg_fileno = PG_FZERO;
		fv++;
	}
	u.u_procp->p_flag |= SPTECHG;
#endif
}
Esempio n. 3
0
/*
 * Find the intersection between a memnode and a memlist
 * and returns the number of pages that overlap.
 *
 * Assumes the list is protected from DR operations by
 * the memlist lock.
 */
pgcnt_t
mem_node_memlist_pages(int mnode, struct memlist *mlist)
{
	pfn_t		base, end;
	pfn_t		cur_base, cur_end;
	pgcnt_t		npgs;
	struct memlist	*pmem;

	base = mem_node_config[mnode].physbase;
	end = mem_node_config[mnode].physmax;
	npgs = 0;

	memlist_read_lock();

	for (pmem = mlist; pmem; pmem = pmem->ml_next) {
		cur_base = btop(pmem->ml_address);
		cur_end = cur_base + btop(pmem->ml_size) - 1;
		if (end < cur_base || base > cur_end)
			continue;
		npgs = npgs + (MIN(cur_end, end) -
		    MAX(cur_base, base)) + 1;
	}

	memlist_read_unlock();

	return (npgs);
}
Esempio n. 4
0
/*
 * Remove a PFN range from a memnode.  On some platforms,
 * the memnode will be created with physbase at the first
 * allocatable PFN, but later deleted with the MC slice
 * base address converted to a PFN, in which case we need
 * to assume physbase and up.
 */
void
mem_node_del_slice(pfn_t start, pfn_t end)
{
	int mnode;
	pgcnt_t delta_pgcnt, node_size;
	mnodeset_t omask, nmask;

	if (mem_node_physalign) {
		start &= ~(btop(mem_node_physalign) - 1);
		end = roundup(end, btop(mem_node_physalign)) - 1;
	}
	mnode = PFN_2_MEM_NODE(start);

	ASSERT(mnode < max_mem_nodes);
	ASSERT(mem_node_config[mnode].exists == 1);

	delta_pgcnt = end - start;
	node_size = mem_node_config[mnode].physmax -
	    mem_node_config[mnode].physbase;

	if (node_size > delta_pgcnt) {
		/*
		 * Subtract the slice from the memnode.
		 */
		if (start <= mem_node_config[mnode].physbase)
			mem_node_config[mnode].physbase = end + 1;
		ASSERT(end <= mem_node_config[mnode].physmax);
		if (end == mem_node_config[mnode].physmax)
			mem_node_config[mnode].physmax = start - 1;
	} else {

		/*
		 * Let the common lgrp framework know the mnode is
		 * leaving
		 */
		lgrp_config(LGRP_CONFIG_MEM_DEL, mnode,
		    MEM_NODE_2_LGRPHAND(mnode));

		/*
		 * Delete the whole node.
		 */
		ASSERT(MNODE_PGCNT(mnode) == 0);
		do {
			omask = memnodes_mask;
			nmask = omask & ~(1ull << mnode);
		} while (atomic_cas_64(&memnodes_mask, omask, nmask) != omask);
		atomic_dec_16(&num_memnodes);
		mem_node_config[mnode].exists = 0;
	}
}
Esempio n. 5
0
int
_kvm_kvatop(kvm_t *kd, u_long va, off_t *pa)
{
    struct tte tte;
    off_t tte_off;
    u_long vpn;
    off_t pa_off;
    u_long pg_off;
    int rest;

    pg_off = va & PAGE_MASK;
    if (va >= VM_MIN_DIRECT_ADDRESS)
        pa_off = TLB_DIRECT_TO_PHYS(va) & ~PAGE_MASK;
    else {
        vpn = btop(va);
        tte_off = kd->vmst->vm_tsb_off +
                  ((vpn & kd->vmst->vm_tsb_mask) << TTE_SHIFT);
        if (!_kvm_read_phys(kd, tte_off, &tte, sizeof(tte)))
            goto invalid;
        if (!tte_match(&tte, va))
            goto invalid;
        pa_off = TTE_GET_PA(&tte);
    }
    rest = PAGE_SIZE - pg_off;
    pa_off = _kvm_find_off(kd->vmst, pa_off, rest);
    if (pa_off == KVM_OFF_NOTFOUND)
        goto invalid;
    *pa = pa_off + pg_off;
    return (rest);

invalid:
    _kvm_err(kd, 0, "invalid address (%lx)", va);
    return (0);
}
Esempio n. 6
0
/*
 * make_seg()
 *	Given a vas and a byte range, return a segment describing it.
 *
 * The byte range must occupy a single pset.  On error, 0 is returned.
 */
struct seg *
make_seg(struct vas *vas, void *buf, uint buflen)
{
	struct pview *pv;
	struct seg *s;
	int x;

	/*
	 * Invalid buf/buflen?
	 */
	if ((buf == 0) || (buflen == 0)) {
		return(0);
	}

	/*
	 * Get new segment
	 */
	s = MALLOC(sizeof(struct seg), MT_SEG);

	/*
	 * Find pview holding the starting address
	 */
	pv = find_pview(vas, buf);
	if (!pv) {
		FREE(s, MT_SEG);
		return(0);
	}

	/*
	 * Make sure end lies within pview
	 */
	if (((char *)buf + buflen) > ((char *)pv->p_vaddr+ptob(pv->p_len))) {
		v_lock(&pv->p_set->p_lock, SPL0);
		FREE(s, MT_SEG);
		return(0);
	}

	/*
	 * Duplicate view, record byte offset
	 */
	s->s_pview = *pv;
	ref_pset(pv->p_set);
	s->s_off = (char *)buf - (char *)pv->p_vaddr;
	s->s_len = buflen;

	/*
	 * Trim off leading and trailing pages from the view
	 */
	pv = &s->s_pview;
	x = btop(s->s_off);
	pv->p_off += x;
	s->s_off -= ptob(x);
	pv->p_len = btorp(s->s_off + buflen);

	/*
	 * Done with set
	 */
	v_lock(&pv->p_set->p_lock, SPL0);
	return(s);
}
Esempio n. 7
0
/*
 * Initialize the buffer I/O system by freeing
 * all buffers and setting all device hash buffer lists to empty.
 */
void
binit(void)
{
	struct buf *bp;
	unsigned int i, pct;
	ulong_t	bio_max_hwm, bio_default_hwm;

	/*
	 * Maximum/Default values for bufhwm are set to the smallest of:
	 *	- BIO_MAX_PERCENT resp. BIO_BUF_PERCENT of real memory
	 *	- 1/4 of kernel virtual memory
	 *	- INT32_MAX to prevent overflows of v.v_bufhwm (which is int).
	 * Additionally, in order to allow simple tuning by percentage of
	 * physical memory, bufhwm_pct is used to calculate the default if
	 * the value of this tunable is between 0 and BIO_MAX_PERCENT.
	 *
	 * Since the unit for v.v_bufhwm is kilobytes, this allows for
	 * a maximum of 1024 * 2GB == 2TB memory usage by buffer headers.
	 */
	bio_max_hwm = MIN(physmem / BIO_MAX_PERCENT,
	    btop(vmem_size(heap_arena, VMEM_FREE)) / 4) * (PAGESIZE / 1024);
	bio_max_hwm = MIN(INT32_MAX, bio_max_hwm);

	pct = BIO_BUF_PERCENT;
	if (bufhwm_pct != 0 &&
	    ((pct = 100 / bufhwm_pct) < BIO_MAX_PERCENT)) {
		pct = BIO_BUF_PERCENT;
		/*
		 * Invalid user specified value, emit a warning.
		 */
		cmn_err(CE_WARN, "binit: bufhwm_pct(%d) out of \
		    range(1..%d). Using %d as default.",
		    bufhwm_pct,
		    100 / BIO_MAX_PERCENT, 100 / BIO_BUF_PERCENT);
	}
Esempio n. 8
0
/*ARGSUSED*/
static int
segkmem_pagelock(struct seg *seg, caddr_t addr, size_t len,
	page_t ***ppp, enum lock_type type, enum seg_rw rw)
{
	page_t **pplist, *pp;
	pgcnt_t npages;
	spgcnt_t pg;
	size_t nb;
	struct vnode *vp = seg->s_data;

	ASSERT(ppp != NULL);

	/*
	 * If it is one of segkp pages, call into segkp.
	 */
	if (segkp_bitmap && seg == &kvseg &&
	    BT_TEST(segkp_bitmap, btop((uintptr_t)(addr - seg->s_base))))
		return (SEGOP_PAGELOCK(segkp, addr, len, ppp, type, rw));

	npages = btopr(len);
	nb = sizeof (page_t *) * npages;

	if (type == L_PAGEUNLOCK) {
		pplist = *ppp;
		ASSERT(pplist != NULL);

		for (pg = 0; pg < npages; pg++) {
			pp = pplist[pg];
			page_unlock(pp);
		}
		kmem_free(pplist, nb);
		return (0);
	}

	ASSERT(type == L_PAGELOCK);

	pplist = kmem_alloc(nb, KM_NOSLEEP);
	if (pplist == NULL) {
		*ppp = NULL;
		return (ENOTSUP);	/* take the slow path */
	}

	for (pg = 0; pg < npages; pg++) {
		pp = page_lookup(vp, (u_offset_t)(uintptr_t)addr, SE_SHARED);
		if (pp == NULL) {
			while (--pg >= 0)
				page_unlock(pplist[pg]);
			kmem_free(pplist, nb);
			*ppp = NULL;
			return (ENOTSUP);
		}
		pplist[pg] = pp;
		addr += PAGESIZE;
	}

	*ppp = pplist;
	return (0);
}
Esempio n. 9
0
/*
 * Adjust the memnode config after a DR operation.
 *
 * It is rather tricky to do these updates since we can't
 * protect the memnode structures with locks, so we must
 * be mindful of the order in which updates and reads to
 * these values can occur.
 */
void
mem_node_add_slice(pfn_t start, pfn_t end)
{
	int mnode;
	mnodeset_t newmask, oldmask;

	/*
	 * DR will pass us the first pfn that is allocatable.
	 * We need to round down to get the real start of
	 * the slice.
	 */
	if (mem_node_physalign) {
		start &= ~(btop(mem_node_physalign) - 1);
		end = roundup(end, btop(mem_node_physalign)) - 1;
	}

	mnode = PFN_2_MEM_NODE(start);
	ASSERT(mnode < max_mem_nodes);

	if (atomic_cas_32((uint32_t *)&mem_node_config[mnode].exists, 0, 1)) {
		/*
		 * Add slice to existing node.
		 */
		if (start < mem_node_config[mnode].physbase)
			mem_node_config[mnode].physbase = start;
		if (end > mem_node_config[mnode].physmax)
			mem_node_config[mnode].physmax = end;
	} else {
		mem_node_config[mnode].physbase = start;
		mem_node_config[mnode].physmax = end;
		atomic_inc_16(&num_memnodes);
		do {
			oldmask = memnodes_mask;
			newmask = memnodes_mask | (1ull << mnode);
		} while (atomic_cas_64(&memnodes_mask, oldmask, newmask) !=
			 oldmask);
	}
	/*
	 * Let the common lgrp framework know about the new memory
	 */
	lgrp_config(LGRP_CONFIG_MEM_ADD, mnode, MEM_NODE_2_LGRPHAND(mnode));
}
Esempio n. 10
0
/*
 * Allocate the segment specific private data struct and fill it in
 * with the per kp segment mutex, anon ptr. array and hash table.
 */
int
segkp_create(struct seg *seg)
{
	struct segkp_segdata *kpsd;
	size_t	np;

	ASSERT(seg != NULL && seg->s_as == &kas);
	ASSERT(RW_WRITE_HELD(&seg->s_as->a_lock));

	if (seg->s_size & PAGEOFFSET) {
		panic("Bad segkp size");
		/*NOTREACHED*/
	}

	kpsd = kmem_zalloc(sizeof (struct segkp_segdata), KM_SLEEP);

	/*
	 * Allocate the virtual memory for segkp and initialize it
	 */
	if (segkp_fromheap) {
		np = btop(kvseg.s_size);
		segkp_bitmap = kmem_zalloc(BT_SIZEOFMAP(np), KM_SLEEP);
		kpsd->kpsd_arena = vmem_create("segkp", NULL, 0, PAGESIZE,
		    vmem_alloc, vmem_free, heap_arena, 5 * PAGESIZE, VM_SLEEP);
	} else {
		segkp_bitmap = NULL;
		np = btop(seg->s_size);
		kpsd->kpsd_arena = vmem_create("segkp", seg->s_base,
		    seg->s_size, PAGESIZE, NULL, NULL, NULL, 5 * PAGESIZE,
		    VM_SLEEP);
	}

	kpsd->kpsd_anon = anon_create(np, ANON_SLEEP | ANON_ALLOC_FORCE);

	kpsd->kpsd_hash = kmem_zalloc(SEGKP_HASHSZ * sizeof (struct segkp *),
	    KM_SLEEP);
	seg->s_data = (void *)kpsd;
	seg->s_ops = &segkp_ops;
	segkpinit_mem_config(seg);
	return (0);
}
Esempio n. 11
0
pgcnt_t
size_virtalloc(prom_memlist_t *avail, size_t nelems)
{

	u_longlong_t	start, end;
	pgcnt_t		allocpages = 0;
	uint_t		hole_allocated = 0;
	uint_t		i;

	for (i = 0; i < nelems - 1; i++) {

		start = avail[i].addr + avail[i].size;
		end = avail[i + 1].addr;

		/*
		 * Notes:
		 *
		 * (1) OBP on platforms with US I/II pre-allocates the hole
		 * represented by [spec_hole_start, spec_hole_end);
		 * pre-allocation is done to make this range unavailable
		 * for any allocation.
		 *
		 * (2) OBP on starcat always pre-allocates the hole similar to
		 * platforms with US I/II.
		 *
		 * (3) OBP on serengeti does _not_ pre-allocate the hole.
		 *
		 * (4) OBP ignores Spitfire Errata #21; i.e. it does _not_
		 * fill up or pre-allocate an additional 4GB on both sides
		 * of the hole.
		 *
		 * (5) kernel virtual range [spec_hole_start, spec_hole_end)
		 * is _not_ used on any platform including those with
		 * UltraSPARC III where there is no hole.
		 *
		 * Algorithm:
		 *
		 * Check if range [spec_hole_start, spec_hole_end) is
		 * pre-allocated by OBP; if so, subtract that range from
		 * allocpages.
		 */
		if (end >= spec_hole_end && start <= spec_hole_start)
			hole_allocated = 1;

		allocpages += btopr(end - start);
	}

	if (hole_allocated)
		allocpages -= btop(spec_hole_end - spec_hole_start);

	return (allocpages);
}
Esempio n. 12
0
/*
 * Return the page for the kpm virtual address vaddr.
 * Caller is responsible for the kpm mapping and lock
 * state of the page.
 */
page_t *
hat_kpm_vaddr2page(caddr_t vaddr)
{
	uintptr_t	paddr;
	pfn_t		pfn;

	ASSERT(IS_KPM_ADDR(vaddr));

	SFMMU_KPM_VTOP(vaddr, paddr);
	pfn = (pfn_t)btop(paddr);

	return (page_numtopp_nolock(pfn));
}
Esempio n. 13
0
ka820_init()
{
	register int csr;

	/* map in the various devices */
	*(int *)&Ka820map[0] = PG_V|PG_KW|btop(KA820_PORTADDR);
	*(int *)&RX50map[0] = PG_V|PG_KW|btop(KA820_RX50ADDR);
	*(int *)&Clockmap[0] = PG_V|PG_KW|btop(KA820_CLOCKADDR);
#ifdef notyet
	ioaccess(bootram, BRAMmap, KA820_BRPAGES * NBPG);
	ioaccess(eeprom, EEPROMmap, KA820_EEPAGES * NBPG);
#else
	mtpr(TBIA, 0);
#endif

	/* reset the console and enable the RX50 */
	csr = ka820port.csr;
	csr &= ~KA820PORT_RSTHALT;	/* ??? */
	csr |= KA820PORT_CONSCLR | KA820PORT_CRDCLR | KA820PORT_CONSEN |
		KA820PORT_RXIE;
	ka820port.csr = csr;
}
Esempio n. 14
0
/*
 * kern_mem()
 *	Create a segment which views a range of kernel memory
 *
 * Mostly used to provide a view of a new client's capabilities.
 */
struct seg *
kern_mem(void *vaddr, uint len)
{
	struct seg *s;
	ulong pgstart, pgend;
	struct pview *pv;
	int x;
	struct pset *ps;
	extern struct pset *physmem_pset();

	/*
	 * Allocate a new segment, fill in some fields.  Use the
	 * pset layer to create a physmem-type page set on which
	 * we build our view.
	 */
	s = MALLOC(sizeof(struct seg), MT_SEG);
	s->s_off = (ulong)vaddr & (NBPG-1);
	s->s_len = len;
	pv = &s->s_pview;
	pgstart = btop(vaddr);
	pgend = btop((char *)vaddr + len - 1);
	pv->p_len = pgend-pgstart+1;
	pv->p_off = 0;
	ps = pv->p_set = physmem_pset(0, pv->p_len);
	ref_pset(ps);
	pv->p_prot = PROT_RO;

	/*
	 * Fill in the slots of the physmem pset with the actual
	 * page numbers for each page in the vaddr range.
	 */
	for (x = 0; x < pv->p_len; ++x) {
		struct perpage *pp;

		pp = find_pp(ps, x);
		pp->pp_pfn = btop(vtop((char *)vaddr + ptob(x)));
	}
	return(s);
}
Esempio n. 15
0
void
startup_build_mem_nodes(prom_memlist_t *list, size_t nelems)
{
	size_t	elem;
	pfn_t	basepfn;
	pgcnt_t	npgs;

	/* LINTED: ASSERT will always true or false */
	ASSERT(NBBY * sizeof (mnodeset_t) >= max_mem_nodes);

	if (&plat_build_mem_nodes != NULL) {
		plat_build_mem_nodes(list, nelems);
	} else {
		/*
		 * Boot install lists are arranged <addr, len>, ...
		 */
		for (elem = 0; elem < nelems; list++, elem++) {
			basepfn = btop(list->addr);
			npgs = btop(list->size);
			mem_node_add_range(basepfn, basepfn + npgs - 1);
		}
	}
}
Esempio n. 16
0
/*ARGSUSED*/
static int
bootfs_getapage(vnode_t *vp, u_offset_t off, size_t len, uint_t *protp,
    page_t *pl[], size_t plsz, struct seg *seg, caddr_t addr, enum seg_rw rw,
    cred_t *cr)
{
	bootfs_node_t *bnp = vp->v_data;
	page_t *pp, *fpp;
	pfn_t pfn;

	for (;;) {
		/* Easy case where the page exists */
		pp = page_lookup(vp, off, rw == S_CREATE ? SE_EXCL : SE_SHARED);
		if (pp != NULL) {
			if (pl != NULL) {
				pl[0] = pp;
				pl[1] = NULL;
			} else {
				page_unlock(pp);
			}
			return (0);
		}

		pp = page_create_va(vp, off, PAGESIZE, PG_EXCL | PG_WAIT, seg,
		    addr);

		/*
		 * If we didn't get the page, that means someone else beat us to
		 * creating this so we need to try again.
		 */
		if (pp != NULL)
			break;
	}

	pfn = btop((bnp->bvn_addr + off) & PAGEMASK);
	fpp = page_numtopp_nolock(pfn);

	if (ppcopy(fpp, pp) == 0) {
		pvn_read_done(pp, B_ERROR);
		return (EIO);
	}

	if (pl != NULL) {
		pvn_plist_init(pp, pl, plsz, off, PAGESIZE, rw);
	} else {
		pvn_io_done(pp);
	}

	return (0);
}
Esempio n. 17
0
MainWindow::MainWindow(QWidget *parent) :
    QMainWindow(parent),
    ui(new Ui::MainWindow)
{
    ui->setupUi(this);
    setFixedSize(ui->graphicsView->width()+10,ui->graphicsView->height()+25);
    m_file = menuBar()->addMenu("Game");
    m_help = menuBar()->addMenu("Help");
    m_newGame = new QAction(tr("&New Game"),this);
    //m_settings = new QAction(tr("&Settings"),this);
    m_exit = new QAction(tr("E&xit"),this);
    m_about = new QAction(tr("About arkanoid"),this);
    m_newGame->setShortcut(QKeySequence::New);
    m_newGame->setShortcut(QKeySequence::Close);

    QPixmap bgPix(":/img/hexagon_pattern.png");
    bgPix = bgPix.scaled(800,600, Qt::IgnoreAspectRatio, Qt::SmoothTransformation);
    m_scene = new QGraphicsScene(0,0,ui->graphicsView->width(),ui->graphicsView->height());
    ui->graphicsView->setScene(m_scene);

    QPixmap blft(":/img/border_left.png");
    QPixmap btop(":/img/border_top.png");
    QPixmap brght(":/img/border_right.png");
    blft = blft.scaled(800,25, Qt::IgnoreAspectRatio, Qt::SmoothTransformation);
    btop = btop.scaled(25,600, Qt::IgnoreAspectRatio, Qt::SmoothTransformation);
    brght = brght.scaled(800,25, Qt::IgnoreAspectRatio, Qt::SmoothTransformation);

    ui->graphicsView->setViewportUpdateMode(QGraphicsView::BoundingRectViewportUpdate);
    ui->graphicsView->setBackgroundBrush(bgPix);
    ui->graphicsView->setCacheMode(QGraphicsView::CacheBackground);
    ui->graphicsView->setRenderHints(QPainter::Antialiasing | QPainter::SmoothPixmapTransform);

    QIcon ico(tr(":/icon.ico"));
    setWindowIcon(ico);

    connect(m_newGame,SIGNAL(triggered()),this,SLOT(startNewGame()));
    //connect(m_settings,SIGNAL(triggered()),this,SLOT(openSettings()));
    connect(m_exit,SIGNAL(triggered()),this,SLOT(close()));
    connect(m_about,SIGNAL(triggered()),this,SLOT(openAbout()));

    m_file->addAction(m_newGame);
    m_file->addSeparator();
    //m_file->addAction(m_settings);
    //m_file->addSeparator();
    m_file->addAction(m_exit);
    m_help->addAction(m_about);

    setCentralWidget(ui->graphicsView);
}
Esempio n. 18
0
void
boot_mapin(caddr_t addr, size_t size)
{
	caddr_t	 eaddr;
	page_t	*pp;
	pfn_t	 pfnum;

	if (page_resv(btop(size), KM_NOSLEEP) == 0)
		panic("boot_mapin: page_resv failed");

	for (eaddr = addr + size; addr < eaddr; addr += PAGESIZE) {
		pfnum = va_to_pfn(addr);
		if (pfnum == PFN_INVALID)
			continue;
		if ((pp = page_numtopp_nolock(pfnum)) == NULL)
			panic("boot_mapin(): No pp for pfnum = %lx", pfnum);

		/*
		 * must break up any large pages that may have constituent
		 * pages being utilized for BOP_ALLOC()'s before calling
		 * page_numtopp().The locking code (ie. page_reclaim())
		 * can't handle them
		 */
		if (pp->p_szc != 0)
			page_boot_demote(pp);

		pp = page_numtopp(pfnum, SE_EXCL);
		if (pp == NULL || PP_ISFREE(pp))
			panic("boot_alloc: pp is NULL or free");

		/*
		 * If the cage is on but doesn't yet contain this page,
		 * mark it as non-relocatable.
		 */
		if (kcage_on && !PP_ISNORELOC(pp)) {
			PP_SETNORELOC(pp);
			PLCNT_XFER_NORELOC(pp);
		}

		(void) page_hashin(pp, &kvp, (u_offset_t)(uintptr_t)addr, NULL);
		pp->p_lckcnt = 1;
#if defined(__x86)
		page_downgrade(pp);
#else
		page_unlock(pp);
#endif
	}
}
Esempio n. 19
0
/*
 * Return the page frame number if a valid segkpm mapping exists
 * for vaddr, otherwise return PFN_INVALID. No locks are grabbed.
 * Should only be used by other sfmmu routines.
 */
pfn_t
sfmmu_kpm_vatopfn(caddr_t vaddr)
{
	uintptr_t	paddr;
	pfn_t		pfn;
	page_t	*pp;

	ASSERT(kpm_enable && IS_KPM_ADDR(vaddr));

	SFMMU_KPM_VTOP(vaddr, paddr);
	pfn = (pfn_t)btop(paddr);
	pp = page_numtopp_nolock(pfn);
	if (pp)
		return (pfn);
	else
		return ((pfn_t)PFN_INVALID);
}
Esempio n. 20
0
File: rw.c Progetto: JamesLinus/vsta
/*
 * swap_rw()
 *	Look up underlying swap device, forward operation
 */
void
swap_rw(struct msg *m, struct file *f, uint bytes)
{
	struct swapmap *s;
	uint blk, op = m->m_op & MSG_MASK;

	/*
	 * Check for permission, page alignment
	 */
	if (((op == FS_WRITE) || (op == FS_ABSWRITE)) &&
			!(f->f_perms & ACC_WRITE)) {
		msg_err(m->m_sender, EPERM);
		return;
	}
	if ((m->m_nseg != 1) || (bytes & (NBPG-1)) ||
			(f->f_pos & (NBPG-1))) {
		msg_err(m->m_sender, EINVAL);
		return;
	}
	blk = btop(f->f_pos);

	/*
	 * Find entry for next part of I/O
	 */
	if ((s = swapent(blk)) == 0) {
		msg_err(m->m_sender, EINVAL);
		return;
	}

	/*
	 * Convert offset relative to beginning of this chunk of
	 * swap space.
	 */
	m->m_arg1 = ptob(blk - s->s_block + s->s_off);

	/*
	 * Send off the I/O
	 */
	if (msg_send(s->s_port, m) < 0) {
		msg_err(m->m_sender, strerror());
		return;
	}
	m->m_buflen = m->m_arg = m->m_arg1 = m->m_nseg = 0;
	msg_reply(m->m_sender, m);
}
Esempio n. 21
0
/* ARGSUSED */
static int
segkmem_checkprot(struct seg *seg, caddr_t addr, size_t size, uint_t prot)
{
	ASSERT(RW_LOCK_HELD(&seg->s_as->a_lock));

	if (seg->s_as != &kas)
		segkmem_badop();

	/*
	 * If it is one of segkp pages, call into segkp.
	 */
	if (segkp_bitmap && seg == &kvseg &&
	    BT_TEST(segkp_bitmap, btop((uintptr_t)(addr - seg->s_base))))
		return (SEGOP_CHECKPROT(segkp, addr, size, prot));

	segkmem_badop();
	return (0);
}
Esempio n. 22
0
/* ARGSUSED */
static int
segkmem_getmemid(struct seg *seg, caddr_t addr, memid_t *memidp)
{
	ASSERT(RW_LOCK_HELD(&seg->s_as->a_lock));

	if (seg->s_as != &kas)
		segkmem_badop();

	/*
	 * If it is one of segkp pages, call into segkp.
	 */
	if (segkp_bitmap && seg == &kvseg &&
	    BT_TEST(segkp_bitmap, btop((uintptr_t)(addr - seg->s_base))))
		return (SEGOP_GETMEMID(segkp, addr, memidp));

	segkmem_badop();
	return (0);
}
Esempio n. 23
0
/* ARGSUSED */
static int
segkmem_kluster(struct seg *seg, caddr_t addr, ssize_t delta)
{
	ASSERT(RW_LOCK_HELD(&seg->s_as->a_lock));

	if (seg->s_as != &kas)
		segkmem_badop();

	/*
	 * If it is one of segkp pages, call into segkp.
	 */
	if (segkp_bitmap && seg == &kvseg &&
	    BT_TEST(segkp_bitmap, btop((uintptr_t)(addr - seg->s_base))))
		return (SEGOP_KLUSTER(segkp, addr, delta));

	segkmem_badop();
	return (0);
}
Esempio n. 24
0
static int
segkmem_setprot(struct seg *seg, caddr_t addr, size_t size, uint_t prot)
{
	ASSERT(RW_LOCK_HELD(&seg->s_as->a_lock));

	if (seg->s_as != &kas || size > seg->s_size ||
	    addr < seg->s_base || addr + size > seg->s_base + seg->s_size)
		panic("segkmem_setprot: bad args");

	/*
	 * If it is one of segkp pages, call segkp.
	 */
	if (segkp_bitmap && seg == &kvseg &&
	    BT_TEST(segkp_bitmap, btop((uintptr_t)(addr - seg->s_base))))
		return (SEGOP_SETPROT(segkp, addr, size, prot));

	if (prot == 0)
		hat_unload(kas.a_hat, addr, size, HAT_UNLOAD);
	else
		hat_chgprot(kas.a_hat, addr, size, prot);
	return (0);
}
Esempio n. 25
0
/*ARGSUSED2*/
static int
mmmmap(dev_t dev, off_t off, int prot)
{
	pfn_t pf;
	struct memlist *pmem;
	minor_t minor = getminor(dev);

	switch (minor) {
	case M_MEM:
		pf = btop(off);
		memlist_read_lock();
		for (pmem = phys_install; pmem != NULL; pmem = pmem->next) {
			if (pf >= BTOP(pmem->address) &&
			    pf < BTOP(pmem->address + pmem->size)) {
				memlist_read_unlock();
				return (impl_obmem_pfnum(pf));
			}
		}
		memlist_read_unlock();
		break;

	case M_KMEM:
	case M_ALLKMEM:
		/* no longer supported with KPR */
		return (-1);

	case M_ZERO:
		/*
		 * We shouldn't be mmap'ing to /dev/zero here as
		 * mmsegmap() should have already converted
		 * a mapping request for this device to a mapping
		 * using seg_vn for anonymous memory.
		 */
		break;

	}
	return (-1);
}
Esempio n. 26
0
/*
 * Process the physical installed list for boot.
 * Finds:
 * 1) the pfn of the highest installed physical page,
 * 2) the number of pages installed
 * 3) the number of distinct contiguous regions these pages fall into.
 */
void
installed_top_size(
	struct memlist *list,	/* pointer to start of installed list */
	pfn_t *high_pfn,	/* return ptr for top value */
	pgcnt_t *pgcnt,		/* return ptr for sum of installed pages */
	uint_t	*ranges)	/* return ptr for the count of contig. ranges */
{
	pfn_t top = 0;
	pgcnt_t sumpages = 0;
	pfn_t highp;		/* high page in a chunk */
	uint_t cnt = 0;

	for (; list; list = list->next) {
		++cnt;
		highp = (list->address + list->size - 1) >> PAGESHIFT;
		if (top < highp)
			top = highp;
		sumpages += btop(list->size);
	}

	*high_pfn = top;
	*pgcnt = sumpages;
	*ranges = cnt;
}
Esempio n. 27
0
/*
 * rdxmem does the real work of read requests for xmemfs.
 */
static int
rdxmem(
	struct xmount *xm,
	struct xmemnode *xp,
	struct uio *uio,
	struct caller_context *ct)
{
	ulong_t blockoffset;	/* offset in xmemfs file (uio_offset) */
	caddr_t base;
	ssize_t bytes;		/* bytes to uiomove */
	struct vnode *vp;
	int error;
	uint_t	blocknumber;
	long oresid = uio->uio_resid;
	size_t	bsize = xm->xm_bsize;
	offset_t	offset;

	vp = XNTOV(xp);

	XMEMPRINTF(1, ("rdxmem: vp %p\n", (void *)vp));

	ASSERT(RW_LOCK_HELD(&xp->xn_contents));

	if (MANDLOCK(vp, xp->xn_mode)) {
		rw_exit(&xp->xn_contents);
		/*
		 * xmem_getattr ends up being called by chklock
		 */
		error = chklock(vp, FREAD,
			uio->uio_loffset, uio->uio_resid, uio->uio_fmode, ct);
		rw_enter(&xp->xn_contents, RW_READER);
		if (error != 0) {
			XMEMPRINTF(1,
			    ("rdxmem: vp %p error %x\n", (void *)vp, error));
			return (error);
		}
	}
	ASSERT(xp->xn_type == VREG);

	if ((offset = uio->uio_loffset) >= MAXOFF_T) {
		XMEMPRINTF(1, ("rdxmem: vp %p bad offset %llx\n",
		    (void *)vp, uio->uio_loffset));
		return (0);
	}
	if (offset < 0)
		return (EINVAL);

	if (uio->uio_resid == 0) {
		XMEMPRINTF(1, ("rdxmem: vp %p resid 0\n", (void *)vp));
		return (0);
	}

	blocknumber = offset >> xm->xm_bshift;
	do {
		offset_t diff, pagestart, pageend;
		uint_t	pageinblock;

		blockoffset = offset & (bsize - 1);
		/*
		 * A maximum of xm->xm_bsize bytes of data is transferred
		 * each pass through this loop
		 */
		bytes = MIN(bsize - blockoffset, uio->uio_resid);

		diff = xp->xn_size - offset;

		if (diff <= 0) {
			error = 0;
			goto out;
		}
		if (diff < bytes)
			bytes = diff;

		if (!xp->xn_ppa[blocknumber])
			if (error = xmem_fillpages(xp, vp, offset, bytes, 1)) {
				return (error);
			}
		/*
		 * We have to drop the contents lock to prevent the VM
		 * system from trying to reacquire it in xmem_getpage()
		 * should the uiomove cause a pagefault.
		 */
		rw_exit(&xp->xn_contents);

#ifdef LOCKNEST
		xmem_getpage();
#endif

		/* 2/10 panic in hat_memload_array - len & MMU_OFFSET */

		pagestart = offset & ~(offset_t)(PAGESIZE - 1);
		pageend = (offset + bytes - 1) & ~(offset_t)(PAGESIZE - 1);
		if (xm->xm_ppb == 1)
			base = segxmem_getmap(xm->xm_map, vp,
			    pagestart, pageend - pagestart + PAGESIZE,
			    (page_t **)&xp->xn_ppa[blocknumber], S_READ);
		else {
			pageinblock = btop(blockoffset);
			base = segxmem_getmap(xm->xm_map, vp,
			    pagestart, pageend - pagestart + PAGESIZE,
			    &xp->xn_ppa[blocknumber][pageinblock], S_READ);

		}
		error = uiomove(base + (blockoffset & (PAGESIZE - 1)),
			bytes, UIO_READ, uio);

		segxmem_release(xm->xm_map, base,
			pageend - pagestart + PAGESIZE);
		/*
		 * Re-acquire contents lock.
		 */
		rw_enter(&xp->xn_contents, RW_READER);

		offset = uio->uio_loffset;
		blocknumber++;
	} while (error == 0 && uio->uio_resid > 0);

out:
	gethrestime(&xp->xn_atime);

	/*
	 * If we've already done a partial read, terminate
	 * the read but return no error.
	 */
	if (oresid != uio->uio_resid)
		error = 0;

	return (error);
}
Esempio n. 28
0
/*
 * wrxmem does the real work of write requests for xmemfs.
 */
static int
wrxmem(struct xmount *xm, struct xmemnode *xp, struct uio *uio,
	struct cred *cr, struct caller_context *ct)
{
	uint_t		blockoffset;	/* offset in the block */
	uint_t		blkwr;		/* offset in blocks into xmem file */
	uint_t		blkcnt;
	caddr_t		base;
	ssize_t		bytes;		/* bytes to uiomove */
	struct vnode	*vp;
	int		error = 0;
	size_t		bsize = xm->xm_bsize;
	rlim64_t	limit = uio->uio_llimit;
	long		oresid = uio->uio_resid;
	timestruc_t 	now;
	offset_t	offset;

	/*
	 * xp->xn_size is incremented before the uiomove
	 * is done on a write.  If the move fails (bad user
	 * address) reset xp->xn_size.
	 * The better way would be to increment xp->xn_size
	 * only if the uiomove succeeds.
	 */
	long		xn_size_changed = 0;
	offset_t	old_xn_size;

	vp = XNTOV(xp);
	ASSERT(vp->v_type == VREG);

	XMEMPRINTF(1, ("wrxmem: vp %p resid %lx off %llx\n",
	    (void *)vp, uio->uio_resid, uio->uio_loffset));

	ASSERT(RW_WRITE_HELD(&xp->xn_contents));
	ASSERT(RW_WRITE_HELD(&xp->xn_rwlock));

	if (MANDLOCK(vp, xp->xn_mode)) {
		rw_exit(&xp->xn_contents);
		/*
		 * xmem_getattr ends up being called by chklock
		 */
		error = chklock(vp, FWRITE,
			uio->uio_loffset, uio->uio_resid, uio->uio_fmode, ct);

		rw_enter(&xp->xn_contents, RW_WRITER);
		if (error != 0) {
			XMEMPRINTF(8, ("wrxmem: vp %p error %x\n",
			    (void *)vp, error));
			return (error);
		}
	}

	if ((offset = uio->uio_loffset) < 0)
		return (EINVAL);

	if (offset >= limit) {
		proc_t *p = ttoproc(curthread);

		mutex_enter(&p->p_lock);
		(void) rctl_action(rctlproc_legacy[RLIMIT_FSIZE], p->p_rctls,
		    p, RCA_UNSAFE_SIGINFO);
		mutex_exit(&p->p_lock);
		return (EFBIG);
	}

	if (uio->uio_resid == 0) {
		XMEMPRINTF(8, ("wrxmem: vp %p resid %lx\n",
			(void *)vp, uio->uio_resid));
		return (0);
	}

	/*
	 * Get the highest blocknumber and allocate page array if needed.
	 * Note that if xm_bsize != PAGESIZE, each ppa[] is pointer to
	 * a page array rather than just a page.
	 */
	blkcnt = howmany((offset + uio->uio_resid), bsize);
	blkwr = offset >> xm->xm_bshift;	/* write begins here */

	XMEMPRINTF(1, ("wrxmem: vp %p blkcnt %x blkwr %x xn_ppasz %lx\n",
	    (void *)vp, blkcnt, blkwr, xp->xn_ppasz));

	/* file size increase */
	if (xp->xn_ppasz < blkcnt) {

		page_t		***ppa;
		int		ppasz;
		uint_t		blksinfile = howmany(xp->xn_size, bsize);

		/*
		 * check if sufficient blocks available for the given offset.
		 */
		if (blkcnt - blksinfile > xm->xm_max - xm->xm_mem)
			return (ENOSPC);

		/*
		 * to prevent reallocating every time the file grows by a
		 * single block, double the size of the array.
		 */
		if (blkcnt < xp->xn_ppasz * 2)
			ppasz = xp->xn_ppasz * 2;
		else
			ppasz = blkcnt;


		ppa = kmem_zalloc(ppasz * sizeof (page_t **), KM_SLEEP);

		ASSERT(ppa);

		if (xp->xn_ppasz) {
			bcopy(xp->xn_ppa, ppa, blksinfile * sizeof (*ppa));
			kmem_free(xp->xn_ppa, xp->xn_ppasz * sizeof (*ppa));
		}
		xp->xn_ppa = ppa;
		xp->xn_ppasz = ppasz;

		/*
		 * fill in the 'hole' if write offset beyond file size. This
		 * helps in creating large files quickly; an application can
		 * lseek to a large offset and perform a single write
		 * operation to create the large file.
		 */

		if (blksinfile < blkwr) {

			old_xn_size = xp->xn_size;
			xp->xn_size = (offset_t)blkwr * bsize;

			XMEMPRINTF(4, ("wrxmem: fill vp %p blks %x to %x\n",
			    (void *)vp, blksinfile, blkcnt - 1));
			error = xmem_fillpages(xp, vp,
				(offset_t)blksinfile * bsize,
				(offset_t)(blkcnt - blksinfile) * bsize, 1);
			if (error) {
				/* truncate file back to original size */
				(void) xmemnode_trunc(xm, xp, old_xn_size);
				return (error);
			}
			/*
			 * if error on blkwr, this allows truncation of the
			 * filled hole.
			 */
			xp->xn_size = old_xn_size;
		}
	}

	do {
		offset_t	pagestart, pageend;
		page_t		**ppp;

		blockoffset = (uint_t)offset & (bsize - 1);
		/*
		 * A maximum of xm->xm_bsize bytes of data is transferred
		 * each pass through this loop
		 */
		bytes = MIN(bsize - blockoffset, uio->uio_resid);

		ASSERT(bytes);

		if (offset + bytes >= limit) {
			if (offset >= limit) {
				error = EFBIG;
				goto out;
			}
			bytes = limit - offset;
		}


		if (!xp->xn_ppa[blkwr]) {
			/* zero fill new pages - simplify partial updates */
			error = xmem_fillpages(xp, vp, offset, bytes, 1);
			if (error)
				return (error);
		}

		/* grow the file to the new length */
		if (offset + bytes > xp->xn_size) {
			xn_size_changed = 1;
			old_xn_size = xp->xn_size;
			xp->xn_size = offset + bytes;
		}

#ifdef LOCKNEST
		xmem_getpage();
#endif

		/* xn_ppa[] is a page_t * if ppb == 1 */
		if (xm->xm_ppb == 1)
			ppp = (page_t **)&xp->xn_ppa[blkwr];
		else
			ppp = &xp->xn_ppa[blkwr][btop(blockoffset)];

		pagestart = offset & ~(offset_t)(PAGESIZE - 1);
		/*
		 * subtract 1 in case (offset + bytes) is mod PAGESIZE
		 * so that pageend is the actual index of last page.
		 */
		pageend = (offset + bytes - 1) & ~(offset_t)(PAGESIZE - 1);

		base = segxmem_getmap(xm->xm_map, vp,
			pagestart, pageend - pagestart + PAGESIZE,
			ppp, S_WRITE);

		rw_exit(&xp->xn_contents);

		error = uiomove(base + (offset - pagestart), bytes,
							UIO_WRITE, uio);
		segxmem_release(xm->xm_map, base,
				pageend - pagestart + PAGESIZE);

		/*
		 * Re-acquire contents lock.
		 */
		rw_enter(&xp->xn_contents, RW_WRITER);
		/*
		 * If the uiomove failed, fix up xn_size.
		 */
		if (error) {
			if (xn_size_changed) {
				/*
				 * The uiomove failed, and we
				 * allocated blocks,so get rid
				 * of them.
				 */
				(void) xmemnode_trunc(xm, xp, old_xn_size);
			}
		} else {
			if ((xp->xn_mode & (S_IXUSR | S_IXGRP | S_IXOTH)) &&
			    (xp->xn_mode & (S_ISUID | S_ISGID)) &&
			    secpolicy_vnode_setid_retain(cr,
			    (xp->xn_mode & S_ISUID) != 0 && xp->xn_uid == 0)
				!= 0) {

				/*
				 * Clear Set-UID & Set-GID bits on
				 * successful write if not privileged
				 * and at least one of the execute bits
				 * is set.  If we always clear Set-GID,
				 * mandatory file and record locking is
				 * unuseable.
				 */
				xp->xn_mode &= ~(S_ISUID | S_ISGID);
			}
			gethrestime(&now);
			xp->xn_mtime = now;
			xp->xn_ctime = now;
		}
		offset = uio->uio_loffset;	/* uiomove sets uio_loffset */
		blkwr++;
	} while (error == 0 && uio->uio_resid > 0 && bytes != 0);

out:
	/*
	 * If we've already done a partial-write, terminate
	 * the write but return no error.
	 */
	if (oresid != uio->uio_resid)
		error = 0;
	return (error);
}
Esempio n. 29
0
/*
 * vas_fault()
 *	Process a fault within the given address space
 *
 * Returns 0 if the fault could be resolved, 1 if process needs to
 * receive an event.  The HAT layer is expected to reliably hold
 * a translation added via hat_addtrans() until hat_deletetrans().
 * A lost translation would cause the atl to hold multiple entries.
 */
vas_fault(void *vas, void *vaddr, int write)
{
	struct pview *pv;
	struct pset *ps;
	struct perpage *pp;
	uint idx, pvidx;
	int error = 0;
	int wasvalid;

	/*
	 * Easiest--no view matches address
	 */
	if ((pv = find_pview(vas, vaddr)) == 0) {
		return(1);
	}
	ASSERT_DEBUG(pv->p_valid, "vas_fault: pview !p_valid");
	ps = pv->p_set;

	/*
	 * Next easiest--trying to write to read-only view
	 */
	if (write && (pv->p_prot & PROT_RO)) {
		v_lock(&ps->p_lock, SPL0_SAME);
		return(1);
	}

	/*
	 * Transfer from pset lock to page slot lock
	 */
	pvidx = btop((char *)vaddr - (char *)pv->p_vaddr);
	idx = pvidx + pv->p_off;
	pp = find_pp(ps, idx);
	lock_slot(ps, pp);

	/*
	 * If the slot is bad, can't fill
	 */
	if (pp->pp_flags & PP_BAD) {
		error = 1;
		goto out;
	}

	/*
	 * If slot is invalid, request it be filled.  Otherwise just
	 * add a reference.
	 */
	if (!(pp->pp_flags & PP_V)) {
		wasvalid = 0;
		if ((*(ps->p_ops->psop_fillslot))(ps, pp, idx)) {
			error = 1;
			goto out;
		}
		ASSERT(pp->pp_flags & PP_V, "vm_fault: lost the page");
	} else {
		wasvalid = 1;
		ref_slot(ps, pp, idx);
	}

	/*
	 * Break COW association when we write it
	 */
	if ((pp->pp_flags & PP_COW) && write) {
		/*
		 * May or may not be there.  If it is, remove
		 * its reference from the per-page struct.
		 */
		if (wasvalid) {
			if (pv->p_valid[pvidx]) {
				ASSERT(delete_atl(pp, pv, pvidx) == 0,
					"vas_fault: p_valid no atl");
				pv->p_valid[pvidx] = 0;
			}
			deref_slot(ps, pp, idx);
		}
		cow_write(ps, pp, idx);
		ASSERT(pp->pp_flags & PP_V, "vm_fault: lost the page 2");

	/*
	 * If not writing to a COW association, then inhibit adding
	 * the translation if it's already present (another thread
	 * ran and brought it in for us, probably)
	 */
	} else if (pv->p_valid[pvidx]) {
		deref_slot(ps, pp, idx);
		goto out;
	}

	/*
	 * With a valid slot, add a hat translation and tabulate
	 * the entry with an atl.
	 */
	add_atl(pp, pv, pvidx, 0);
	hat_addtrans(pv, vaddr, pp->pp_pfn, pv->p_prot |
		((pp->pp_flags & PP_COW) ? PROT_RO : 0));
	ASSERT_DEBUG(pv->p_valid[pvidx] == 0, "vas_fault: p_valid went on");
	pv->p_valid[pvidx] = 1;

	/*
	 * Free the various things we hold and return
	 */
out:
	unlock_slot(ps, pp);
	return(error);
}
Esempio n. 30
0
long
lx_sysinfo(struct lx_sysinfo *sip)
{
	struct lx_sysinfo si;
	zone_t *zone = curthread->t_procp->p_zone;
	uint64_t zphysmem, zfreemem, ztotswap, zfreeswap;

	si.si_uptime = gethrestime_sec() - zone->zone_boot_time;

	/*
	 * We scale down the load in avenrun to allow larger load averages
	 * to fit in 32 bits.  Linux doesn't, so we remove the scaling
	 * here.
	 */
	si.si_loads[0] = zone->zone_avenrun[0] << FSHIFT;
	si.si_loads[1] = zone->zone_avenrun[1] << FSHIFT;
	si.si_loads[2] = zone->zone_avenrun[2] << FSHIFT;

	/*
	 * In linux each thread looks like a process, so we conflate the
	 * two in this stat as well.
	 */
	si.si_procs = (int32_t)zone->zone_nlwps;

	/*
	 * If memory or swap limits are set on the zone, use those, otherwise
	 * use the system values. physmem and freemem are in pages, but the
	 * zone values are in bytes. Likewise, ani_max and ani_free are in
	 * pages.
	 */
	if (zone->zone_phys_mem_ctl == UINT64_MAX) {
		zphysmem = physmem;
		zfreemem = freemem;
	} else {
		zphysmem = btop(zone->zone_phys_mem_ctl);
		zfreemem = btop(zone->zone_phys_mem_ctl - zone->zone_phys_mem);
	}

	if (zone->zone_max_swap_ctl == UINT64_MAX) {
		ztotswap = k_anoninfo.ani_max;
		zfreeswap = k_anoninfo.ani_free;
	} else {
		/*
		 * See the comment in swapctl for a description of how free is
		 * calculated within a zone.
		 */
		rctl_qty_t used;
		spgcnt_t avail;
		uint64_t max;

		avail = MAX((spgcnt_t)(availrmem - swapfs_minfree), 0);
		max = k_anoninfo.ani_max + k_anoninfo.ani_mem_resv + avail;

		mutex_enter(&zone->zone_mem_lock);
		ztotswap = btop(zone->zone_max_swap_ctl);
		used = btop(zone->zone_max_swap);
		mutex_exit(&zone->zone_mem_lock);

		zfreeswap = MIN(ztotswap, max) - used;
	}

	/*
	 * If the maximum memory stat is less than 1^20 pages (i.e. 4GB),
	 * then we report the result in bytes.  Otherwise we use pages.
	 * Once we start supporting >1TB systems/zones, we'll need a third
	 * option.
	 */
	if (MAX(zphysmem, ztotswap) < 1024 * 1024) {
		si.si_totalram = ptob(zphysmem);
		si.si_freeram = ptob(zfreemem);
		si.si_totalswap = ptob(ztotswap);
		si.si_freeswap = ptob(zfreeswap);
		si.si_mem_unit = 1;
	} else {
		si.si_totalram = zphysmem;
		si.si_freeram = zfreemem;
		si.si_totalswap = ztotswap;
		si.si_freeswap = zfreeswap;
		si.si_mem_unit = PAGESIZE;
	}
	si.si_bufferram = 0;
	si.si_sharedram = 0;

	/*
	 * These two stats refer to high physical memory.  If an
	 * application running in a Linux zone cares about this, then
	 * either it or we are broken.
	 */
	si.si_totalhigh = 0;
	si.si_freehigh = 0;

	if (copyout(&si, sip, sizeof (si)) != 0)
		return (set_errno(EFAULT));
	return (0);
}