Beispiel #1
0
/*
 * Take a retired page off the retired-pages vnode and clear the toxic flags.
 * If "free" is nonzero, lock it and put it back on the freelist. If "free"
 * is zero, the caller already holds SE_EXCL lock so we simply unretire it
 * and don't do anything else with it.
 *
 * Any unretire messages are printed from this routine.
 *
 * Returns 0 if page pp was unretired; else an error code.
 */
int
page_unretire_pp(page_t *pp, int free)
{
	/*
	 * To be retired, a page has to be hashed onto the retired_pages vnode
	 * and have PR_RETIRED set in p_toxic.
	 */
	if (free == 0 || page_try_reclaim_lock(pp, SE_EXCL, SE_RETIRED)) {
		ASSERT(PAGE_EXCL(pp));
		PR_DEBUG(prd_ulocked);
		if (!PP_RETIRED(pp)) {
			PR_DEBUG(prd_unotretired);
			page_unlock(pp);
			return (page_retire_done(pp, PRD_UNR_NOT));
		}

		PR_MESSAGE(CE_NOTE, 1, "unretiring retired"
		    " page 0x%08x.%08x", mmu_ptob((uint64_t)pp->p_pagenum));
		if (pp->p_toxic & PR_FMA) {
			PR_DECR_KSTAT(pr_fma);
		} else if (pp->p_toxic & PR_UE) {
			PR_DECR_KSTAT(pr_ue);
		} else {
			PR_DECR_KSTAT(pr_mce);
		}
		page_clrtoxic(pp, PR_ALLFLAGS);

		if (free) {
			PR_DEBUG(prd_udestroy);
			page_destroy(pp, 0);
		} else {
			PR_DEBUG(prd_uhashout);
			page_hashout(pp, NULL);
		}

		mutex_enter(&freemem_lock);
		availrmem++;
		mutex_exit(&freemem_lock);

		PR_DEBUG(prd_uunretired);
		PR_DECR_KSTAT(pr_retired);
		PR_INCR_KSTAT(pr_unretired);
		return (page_retire_done(pp, PRD_UNR_SUCCESS));
	}
	PR_DEBUG(prd_unotlocked);
	return (page_retire_done(pp, PRD_UNR_CANTLOCK));
}
Beispiel #2
0
/*
 * page_retire_pp() decides what to do with a failing page.
 *
 * When we get a free page (e.g. the scrubber or in the free path) life is
 * nice because the page is clean and marked free -- those always retire
 * nicely. From there we go by order of difficulty. If the page has data,
 * we attempt to relocate its contents to a suitable replacement page. If
 * that does not succeed, we look to see if it is clean. If after all of
 * this we have a clean, unmapped page (which we usually do!), we retire it.
 * If the page is not clean, we still process it regardless on a UE; for
 * CEs or FMA requests, we fail leaving the page in service. The page will
 * eventually be tried again later. We always return with the page unlocked
 * since we are called from page_unlock().
 *
 * We don't call panic or do anything fancy down in here. Our boss the DE
 * gets paid handsomely to do his job of figuring out what to do when errors
 * occur. We just do what he tells us to do.
 */
static int
page_retire_pp(page_t *pp)
{
	int		toxic;

	ASSERT(PAGE_EXCL(pp));
	ASSERT(pp->p_iolock_state == 0);
	ASSERT(pp->p_szc == 0);

	PR_DEBUG(prd_top);
	PR_TYPES(pp);

	toxic = pp->p_toxic;
	ASSERT(toxic & PR_REASONS);

	if ((toxic & (PR_FMA | PR_MCE)) && !(toxic & PR_UE) &&
	    page_retire_limit()) {
		page_clrtoxic(pp, PR_FMA | PR_MCE | PR_MSG | PR_BUSY);
		page_retire_dequeue(pp);
		page_unlock(pp);
		return (page_retire_done(pp, PRD_LIMIT));
	}

	if (PP_ISFREE(pp)) {
		int dbgnoreclaim = MTBF(recl_calls, recl_mtbf) == 0;

		PR_DEBUG(prd_free);

		if (dbgnoreclaim || !page_reclaim(pp, NULL)) {
			PR_DEBUG(prd_noreclaim);
			PR_INCR_KSTAT(pr_failed);
			/*
			 * page_reclaim() returns with `pp' unlocked when
			 * it fails.
			 */
			if (dbgnoreclaim)
				page_unlock(pp);
			return (page_retire_done(pp, PRD_FAILED));
		}
	}
	ASSERT(!PP_ISFREE(pp));

	if ((toxic & PR_UE) == 0 && pp->p_vnode && !PP_ISNORELOCKERNEL(pp) &&
	    MTBF(reloc_calls, reloc_mtbf)) {
		page_t *newpp;
		spgcnt_t count;

		/*
		 * If we can relocate the page, great! newpp will go
		 * on without us, and everything is fine.  Regardless
		 * of whether the relocation succeeds, we are still
		 * going to take `pp' around back and shoot it.
		 */
		newpp = NULL;
		if (page_relocate(&pp, &newpp, 0, 0, &count, NULL) == 0) {
			PR_DEBUG(prd_reloc);
			page_unlock(newpp);
			ASSERT(hat_page_getattr(pp, P_MOD) == 0);
		} else {
			PR_DEBUG(prd_relocfail);
		}
	}

	if (hat_ismod(pp)) {
		PR_DEBUG(prd_mod);
		PR_INCR_KSTAT(pr_failed);
		page_unlock(pp);
		return (page_retire_done(pp, PRD_FAILED));
	}

	if (PP_ISKVP(pp)) {
		PR_DEBUG(prd_kern);
		PR_INCR_KSTAT(pr_failed_kernel);
		page_unlock(pp);
		return (page_retire_done(pp, PRD_FAILED));
	}

	if (pp->p_lckcnt || pp->p_cowcnt) {
		PR_DEBUG(prd_locked);
		PR_INCR_KSTAT(pr_failed);
		page_unlock(pp);
		return (page_retire_done(pp, PRD_FAILED));
	}

	(void) hat_pageunload(pp, HAT_FORCE_PGUNLOAD);
	ASSERT(!hat_page_is_mapped(pp));

	/*
	 * If the page is modified, and was not relocated; we can't
	 * retire it without dropping data on the floor. We have to
	 * recheck after unloading since the dirty bit could have been
	 * set since we last checked.
	 */
	if (hat_ismod(pp)) {
		PR_DEBUG(prd_mod_late);
		PR_INCR_KSTAT(pr_failed);
		page_unlock(pp);
		return (page_retire_done(pp, PRD_FAILED));
	}

	if (pp->p_vnode) {
		PR_DEBUG(prd_hashout);
		page_hashout(pp, NULL);
	}
	ASSERT(!pp->p_vnode);

	/*
	 * The problem page is locked, demoted, unmapped, not free,
	 * hashed out, and not COW or mlocked (whew!).
	 *
	 * Now we select our ammunition, take it around back, and shoot it.
	 */
	if (toxic & PR_UE) {
		if (page_retire_transient_ue(pp)) {
			PR_DEBUG(prd_uescrubbed);
			return (page_retire_done(pp, PRD_UE_SCRUBBED));
		} else {
			PR_DEBUG(prd_uenotscrubbed);
			page_retire_destroy(pp);
			return (page_retire_done(pp, PRD_SUCCESS));
		}
	} else if (toxic & PR_FMA) {
		PR_DEBUG(prd_fma);
		page_retire_destroy(pp);
		return (page_retire_done(pp, PRD_SUCCESS));
	} else if (toxic & PR_MCE) {
		PR_DEBUG(prd_mce);
		page_retire_destroy(pp);
		return (page_retire_done(pp, PRD_SUCCESS));
	}
	panic("page_retire_pp: bad toxic flags %d", toxic);
	/*NOTREACHED*/
}
Beispiel #3
0
int
vmxnet3s_txcache_init(vmxnet3s_softc_t *dp, vmxnet3s_txq_t *txq)
{
	int		i;
	int		ndescrs;
	int		node;
	page_t		*page;
	struct seg	kseg;
	vmxnet3s_txcache_t *cache = &dp->txcache;
	dev_info_t	*dip = dp->dip;

	cache->num_pages = ((txq->cmdring.size * VMXNET3_HDR_COPY_SIZE) +
	    (PAGESIZE - 1)) / PAGESIZE;

	/* Allocate pages */
	if (!page_resv(cache->num_pages, KM_SLEEP)) {
		dev_err(dip, CE_WARN, "failed to reserve %d pages",
		    cache->num_pages);
		goto out;
	}

	if (!page_create_wait(cache->num_pages, 0)) {
		dev_err(dip, CE_WARN, "failed to create %d pages",
		    cache->num_pages);
		goto unresv_pages;
	}

	cache->pages = kmem_zalloc(cache->num_pages * sizeof (page_t *),
	    KM_SLEEP);

	cache->page_maps = kmem_zalloc(cache->num_pages * sizeof (page_t *),
	    KM_SLEEP);

	kseg.s_as = &kas;
	for (i = 0; i < cache->num_pages; i++) {
		page = page_get_freelist(&kvp, 0, &kseg, (caddr_t)(i*PAGESIZE),
		    PAGESIZE, 0, NULL);
		if (page == NULL) {
			page = page_get_cachelist(&kvp, 0, &kseg,
			    (caddr_t)(i * PAGESIZE), 0, NULL);
			if (page == NULL)
				goto free_pages;
			if (!PP_ISAGED(page))
				page_hashout(page, NULL);
		}
		PP_CLRFREE(page);
		PP_CLRAGED(page);
		cache->pages[i] = page;
	}

	for (i = 0; i < cache->num_pages; i++)
		page_downgrade(cache->pages[i]);

	/* Allocate virtual address range for mapping pages */
	cache->window = vmem_alloc(heap_arena, ptob(cache->num_pages),
	    VM_SLEEP);
	ASSERT(cache->window);

	cache->num_nodes = txq->cmdring.size;

	/* Map pages */
	for (i = 0; i < cache->num_pages; i++) {
		cache->page_maps[i] = cache->window + ptob(i);
		hat_devload(kas.a_hat, cache->page_maps[i], ptob(1),
		    cache->pages[i]->p_pagenum,
		    PROT_READ | PROT_WRITE | HAT_STRICTORDER,
		    HAT_LOAD_LOCK);
	}

	/* Now setup cache items */
	cache->nodes = kmem_zalloc(txq->cmdring.size *
	    sizeof (vmxnet3s_txcache_node_t), KM_SLEEP);

	ndescrs = txq->cmdring.size;
	node = 0;
	for (i = 0; i < cache->num_pages; i++) {
		caddr_t		va;
		int		j;
		int		lim;
		uint64_t	pa;

		lim = (ndescrs <= VMXNET3_TX_CACHE_ITEMS_PER_PAGE) ? ndescrs :
		    VMXNET3_TX_CACHE_ITEMS_PER_PAGE;
		va = cache->page_maps[i];
		pa = cache->pages[i]->p_pagenum << PAGESHIFT;

		for (j = 0; j < lim; j++) {
			cache->nodes[node].pa = pa;
			cache->nodes[node].va = va;

			pa += VMXNET3_HDR_COPY_SIZE;
			va += VMXNET3_HDR_COPY_SIZE;
			node++;
		}
		ndescrs -= lim;
	}
	return (DDI_SUCCESS);

free_pages:
	page_create_putback(cache->num_pages - i);
	while (--i >= 0) {
		if (!page_tryupgrade(cache->pages[i])) {
			page_unlock(cache->pages[i]);
			while (!page_lock(cache->pages[i], SE_EXCL, NULL,
			    P_RECLAIM))
				;
		}
		page_free(cache->pages[i], 0);
	}
	kmem_free(cache->pages, cache->num_pages * PAGESIZE);
unresv_pages:
	page_unresv(cache->num_pages);
out:
	cache->num_pages = cache->num_nodes = 0;

	return (DDI_FAILURE);
}