Example #1
0
/*
 * Hunt down any pages in the system that have not yet been retired, invoking
 * the provided callback function on each of them.
 */
void
page_retire_hunt(void (*callback)(page_t *))
{
	page_t *pp;
	page_t *first;
	uint64_t tbr, found;
	int i;

	PR_DEBUG(prd_hunt);

	if (PR_KSTAT_PENDING == 0) {
		return;
	}

	PR_DEBUG(prd_dohunt);

	found = 0;
	mutex_enter(&pr_q_mutex);

	tbr = PR_KSTAT_PENDING;

	for (i = 0; i < PR_PENDING_QMAX; i++) {
		if ((pp = pr_pending_q[i]) != NULL) {
			mutex_exit(&pr_q_mutex);
			callback(pp);
			mutex_enter(&pr_q_mutex);
			found++;
		}
	}

	if (PR_KSTAT_EQFAIL == PR_KSTAT_DQFAIL && found == tbr) {
		mutex_exit(&pr_q_mutex);
		PR_DEBUG(prd_earlyhunt);
		return;
	}
	mutex_exit(&pr_q_mutex);

	PR_DEBUG(prd_latehunt);

	/*
	 * We've lost track of a page somewhere. Hunt it down.
	 */
	memsegs_lock(0);
	pp = first = page_first();
	do {
		if (PP_PR_REQ(pp)) {
			callback(pp);
			if (++found == tbr) {
				break;	/* got 'em all */
			}
		}
	} while ((pp = page_next(pp)) != first);
	memsegs_unlock(0);
}
Example #2
0
/*
 * Page retire self-test. For now, it always returns 0.
 */
int
page_retire_test(void)
{
	page_t *first, *pp, *cpp, *cpp2, *lpp;

	/*
	 * Tests the corner case where a large page can't be retired
	 * because one of the constituent pages is locked. We mark
	 * one page to be retired and try to retire it, and mark the
	 * other page to be retired but don't try to retire it, so
	 * that page_unlock() in the failure path will recurse and try
	 * to retire THAT page. This is the worst possible situation
	 * we can get ourselves into.
	 */
	memsegs_lock(0);
	pp = first = page_first();
	do {
		if (pp->p_szc && PP_PAGEROOT(pp) == pp) {
			cpp = pp + 1;
			lpp = PP_ISFREE(pp)? pp : pp + 2;
			cpp2 = pp + 3;
			if (!page_trylock(lpp, pp == lpp? SE_EXCL : SE_SHARED))
				continue;
			if (!page_trylock(cpp, SE_EXCL)) {
				page_unlock(lpp);
				continue;
			}
			page_settoxic(cpp, PR_FMA | PR_BUSY);
			page_settoxic(cpp2, PR_FMA);
			page_tryretire(cpp);	/* will fail */
			page_unlock(lpp);
			(void) page_retire(cpp->p_pagenum, PR_FMA);
			(void) page_retire(cpp2->p_pagenum, PR_FMA);
		}
	} while ((pp = page_next(pp)) != first);
	memsegs_unlock(0);

	return (0);
}
Example #3
0
/*
 * We want to add memory, but have no spare page_t structures.  Use some of
 * our new memory for the page_t structures.
 *
 * Somewhat similar to kphysm_add_memory_dynamic(), but simpler.
 */
static int
balloon_init_new_pages(mfn_t framelist[], pgcnt_t count)
{
	pgcnt_t	metapgs, totalpgs, num_pages;
	paddr_t	metasz;
	pfn_t	meta_start;
	page_t	*page_array;
	caddr_t	va;
	int	i, rv, locked;
	mem_structs_t *mem;
	struct memseg *segp;

	/* Calculate the number of pages we're going to add */
	totalpgs = bln_stats.bln_new_target - bln_stats.bln_current_pages;

	/*
	 * The following calculates the number of "meta" pages -- the pages
	 * that will be required to hold page_t structures for all new pages.
	 * Proof of this calculation is left up to the reader.
	 */
	metapgs = totalpgs - (((uint64_t)(totalpgs) << PAGESHIFT) /
	    (PAGESIZE + sizeof (page_t)));

	/*
	 * Given the number of page_t structures we need, is there also
	 * room in our meta pages for a memseg and memlist struct?
	 * If not, we'll need one more meta page.
	 */
	if ((metapgs << PAGESHIFT) < (totalpgs * sizeof (page_t) +
	    MEM_STRUCT_SIZE))
		metapgs++;

	/*
	 * metapgs is calculated from totalpgs, which may be much larger than
	 * count.  If we don't have enough pages, all of the pages in this
	 * batch will be made meta pages, and a future trip through
	 * balloon_inc_reservation() will add the rest of the meta pages.
	 */
	if (metapgs > count)
		metapgs = count;

	/*
	 * Figure out the number of page_t structures that can fit in metapgs
	 *
	 * This will cause us to initialize more page_t structures than we
	 * need - these may be used in future memory increases.
	 */
	metasz = pfn_to_pa(metapgs);
	num_pages = (metasz - MEM_STRUCT_SIZE) / sizeof (page_t);

	DTRACE_PROBE3(balloon__alloc__stats, pgcnt_t, totalpgs, pgcnt_t,
	    num_pages, pgcnt_t, metapgs);

	/*
	 * We only increment mfn_count by count, not num_pages, to keep the
	 * space of all valid pfns contiguous.  This means we create page_t
	 * structures with invalid pagenums -- we deal with this situation
	 * in balloon_page_sub.
	 */
	mfn_count += count;

	/*
	 * Get a VA for the pages that will hold page_t and other structures.
	 * The memseg and memlist structures will go at the beginning, with
	 * the page_t structures following.
	 */
	va = (caddr_t)vmem_alloc(heap_arena, metasz, VM_SLEEP);
	/* LINTED: improper alignment */
	mem = (mem_structs_t *)va;
	page_array = mem->pages;

	meta_start = bln_stats.bln_max_pages;

	/*
	 * Set the mfn to pfn mapping for the meta pages.
	 */
	locked = balloon_lock_contig_pfnlist(metapgs);
	for (i = 0; i < metapgs; i++) {
		reassign_pfn(bln_stats.bln_max_pages + i, framelist[i]);
	}
	if (locked)
		unlock_contig_pfnlist();

	/*
	 * For our meta pages, map them in and zero the page.
	 * This will be the first time touching the new pages.
	 */
	hat_devload(kas.a_hat, va, metasz, bln_stats.bln_max_pages,
	    PROT_READ | PROT_WRITE,
	    HAT_LOAD | HAT_LOAD_LOCK | HAT_LOAD_NOCONSIST);
	bzero(va, metasz);

	/*
	 * Initialize the page array for the new pages.
	 */
	for (i = 0; i < metapgs; i++) {
		page_array[i].p_pagenum = bln_stats.bln_max_pages++;
		page_array[i].p_offset = (u_offset_t)-1;
		page_iolock_init(&page_array[i]);
		rv = page_lock(&page_array[i], SE_EXCL, NULL, P_NO_RECLAIM);
		ASSERT(rv == 1);
	}

	/*
	 * For the rest of the pages, initialize the page_t struct and
	 * add them to the free list
	 */
	for (i = metapgs; i < num_pages; i++) {
		page_array[i].p_pagenum = bln_stats.bln_max_pages++;
		page_array[i].p_offset = (u_offset_t)-1;
		page_iolock_init(&page_array[i]);
		rv = page_lock(&page_array[i], SE_EXCL, NULL, P_NO_RECLAIM);
		ASSERT(rv == 1);
		balloon_page_add(&page_array[i]);
	}

	/*
	 * Remember where I said that we don't call this function?  The missing
	 * code right here is why.  We need to set up kpm mappings for any new
	 * pages coming in.  However, if someone starts up a domain with small
	 * memory, then greatly increases it, we could get in some horrible
	 * deadlock situations as we steal page tables for kpm use, and
	 * userland applications take them right back before we can use them
	 * to set up our new memory.  Once a way around that is found, and a
	 * few other changes are made, we'll be able to enable this code.
	 */

	/*
	 * Update kernel structures, part 1: memsegs list
	 */
	mem->memseg.pages_base = meta_start;
	mem->memseg.pages_end = bln_stats.bln_max_pages - 1;
	mem->memseg.pages = &page_array[0];
	mem->memseg.epages = &page_array[num_pages - 1];
	mem->memseg.next = NULL;
	memsegs_lock(1);
	for (segp = memsegs; segp->next != NULL; segp = segp->next)
		;
	segp->next = &mem->memseg;
	memsegs_unlock(1);

	/*
	 * Update kernel structures, part 2: mem_node array
	 */
	mem_node_add_slice(meta_start, bln_stats.bln_max_pages);

	/*
	 * Update kernel structures, part 3: phys_install array
	 * (*sigh* how many of these things do we need?)
	 */
	memlist_write_lock();
	memlist_add(pfn_to_pa(meta_start), num_pages, &mem->memlist,
	    &phys_install);
	memlist_write_unlock();

	build_pfn_hash();

	return (metapgs);
}