/**
 * Destroys an allocated page.
 *
 * @param pPage         Pointer to the page to be destroyed.
 * @remarks This function expects page in @c pPage to be shared locked.
 */
static void rtR0MemObjSolPageDestroy(page_t *pPage)
{
    /*
     * We need to exclusive lock the pages before freeing them, if upgrading the shared lock to exclusive fails,
     * drop the page lock and look it up from the hash. Record the page offset before we drop the page lock as
     * we cannot touch any page_t members once the lock is dropped.
     */
    AssertPtr(pPage);
    Assert(PAGE_LOCKED_SE(pPage, SE_SHARED));

    u_offset_t offPage = pPage->p_offset;
    int rc = page_tryupgrade(pPage);
    if (!rc)
    {
        page_unlock(pPage);
        page_t *pFoundPage = page_lookup(&g_PageVnode, offPage, SE_EXCL);

        /*
         * Since we allocated the pages as PG_NORELOC we should only get back the exact page always.
         */
        AssertReleaseMsg(pFoundPage == pPage, ("Page lookup failed %p:%llx returned %p, expected %p\n",
                                               &g_PageVnode, offPage, pFoundPage, pPage));
    }
    Assert(PAGE_LOCKED_SE(pPage, SE_EXCL));
    page_pp_unlock(pPage, 0 /* COW */, 1 /* Kernel */);
    page_destroy(pPage, 0 /* move it to the free list */);
}
Ejemplo n.º 2
0
/*
 * Any changes to this routine must also be carried over to
 * devmap_free_pages() in the seg_dev driver. This is because
 * we currently don't have a special kernel segment for non-paged
 * kernel memory that is exported by drivers to user space.
 */
static void
segkmem_free_vn(vmem_t *vmp, void *inaddr, size_t size, struct vnode *vp,
    void (*func)(page_t *))
{
	page_t *pp;
	caddr_t addr = inaddr;
	caddr_t eaddr;
	pgcnt_t npages = btopr(size);

	ASSERT(((uintptr_t)addr & PAGEOFFSET) == 0);
	ASSERT(vp != NULL);

	if (kvseg.s_base == NULL) {
		segkmem_gc_list_t *gc = inaddr;
		gc->gc_arena = vmp;
		gc->gc_size = size;
		gc->gc_next = segkmem_gc_list;
		segkmem_gc_list = gc;
		return;
	}

	hat_unload(kas.a_hat, addr, size, HAT_UNLOAD_UNLOCK);

	for (eaddr = addr + size; addr < eaddr; addr += PAGESIZE) {
#if defined(__x86)
		pp = page_find(vp, (u_offset_t)(uintptr_t)addr);
		if (pp == NULL)
			panic("segkmem_free: page not found");
		if (!page_tryupgrade(pp)) {
			/*
			 * Some other thread has a sharelock. Wait for
			 * it to drop the lock so we can free this page.
			 */
			page_unlock(pp);
			pp = page_lookup(vp, (u_offset_t)(uintptr_t)addr,
			    SE_EXCL);
		}
#else
		pp = page_lookup(vp, (u_offset_t)(uintptr_t)addr, SE_EXCL);
#endif
		if (pp == NULL)
			panic("segkmem_free: page not found");
		/* Clear p_lckcnt so page_destroy() doesn't update availrmem */
		pp->p_lckcnt = 0;
		if (func)
			func(pp);
		else
			page_destroy(pp, 0);
	}
	if (func == NULL)
		page_unresv(npages);

	if (vmp != NULL)
		vmem_free(vmp, inaddr, size);

}
Ejemplo n.º 3
0
static Evas_Object *_animated_unpack_item(Evas_Object *scroller, Evas_Object *page, unsigned int pos)
{
	Evas_Object *out = NULL;
	Evas_Object *item;
	Evas_Object *next_page;

	char buf[32];
	unsigned int page_max_app;
	unsigned int page_no;
	page_scroller_sort_type_e sort_type;

	out = page_unpack_item_at(page, pos);
	retv_if(NULL == out, NULL);

	page_no = page_scroller_get_page_no(scroller, page);
	page_max_app = (unsigned int) evas_object_data_get(scroller, "page_max_app");
	sort_type = (page_scroller_sort_type_e) evas_object_data_get(scroller, "sort_type");

	pos ++;
	while (page && page_no < MAX_PAGE_NO) {
		if (0 == page_count_item(page)) {
			page_destroy(scroller, page);
			break;
		}

		for (; pos < page_max_app; pos ++) {
			item = page_unpack_item_at(page, pos);
			if (NULL == item) continue;

			page_pack_item(page, pos - 1, item);
			snprintf(buf, 32, "menu%d", pos - 1);
			edje_object_signal_emit(_EDJ(page), STR_MOVE_NEXT, buf);
			edje_object_signal_emit(_EDJ(page), STR_ANI_RETURN, buf);
		}

		if (sort_type == PAGE_SCROLLER_SORT_MAX) {
			return NULL;
		}

		page_no ++;
		next_page = page_scroller_get_page_at(scroller, page_no);
		if (next_page) {
			item = page_unpack_item_at(next_page, 0);
			if (NULL == item) continue;

			page_pack_item(page, page_max_app - 1, item);
		} else break;

		pos = 1;
		page = next_page;
	}

	return out;
}
Ejemplo n.º 4
0
/* ARGSUSED */
void
fs_dispose(struct vnode *vp, page_t *pp, int fl, int dn, struct cred *cr,
    caller_context_t *ct)
{

	ASSERT(fl == B_FREE || fl == B_INVAL);

	if (fl == B_FREE)
		page_free(pp, dn);
	else
		page_destroy(pp, dn);
}
Ejemplo n.º 5
0
/*
 * Take a retired page off the retired-pages vnode and clear the toxic flags.
 * If "free" is nonzero, lock it and put it back on the freelist. If "free"
 * is zero, the caller already holds SE_EXCL lock so we simply unretire it
 * and don't do anything else with it.
 *
 * Any unretire messages are printed from this routine.
 *
 * Returns 0 if page pp was unretired; else an error code.
 */
int
page_unretire_pp(page_t *pp, int free)
{
	/*
	 * To be retired, a page has to be hashed onto the retired_pages vnode
	 * and have PR_RETIRED set in p_toxic.
	 */
	if (free == 0 || page_try_reclaim_lock(pp, SE_EXCL, SE_RETIRED)) {
		ASSERT(PAGE_EXCL(pp));
		PR_DEBUG(prd_ulocked);
		if (!PP_RETIRED(pp)) {
			PR_DEBUG(prd_unotretired);
			page_unlock(pp);
			return (page_retire_done(pp, PRD_UNR_NOT));
		}

		PR_MESSAGE(CE_NOTE, 1, "unretiring retired"
		    " page 0x%08x.%08x", mmu_ptob((uint64_t)pp->p_pagenum));
		if (pp->p_toxic & PR_FMA) {
			PR_DECR_KSTAT(pr_fma);
		} else if (pp->p_toxic & PR_UE) {
			PR_DECR_KSTAT(pr_ue);
		} else {
			PR_DECR_KSTAT(pr_mce);
		}
		page_clrtoxic(pp, PR_ALLFLAGS);

		if (free) {
			PR_DEBUG(prd_udestroy);
			page_destroy(pp, 0);
		} else {
			PR_DEBUG(prd_uhashout);
			page_hashout(pp, NULL);
		}

		mutex_enter(&freemem_lock);
		availrmem++;
		mutex_exit(&freemem_lock);

		PR_DEBUG(prd_uunretired);
		PR_DECR_KSTAT(pr_retired);
		PR_INCR_KSTAT(pr_unretired);
		return (page_retire_done(pp, PRD_UNR_SUCCESS));
	}
	PR_DEBUG(prd_unotlocked);
	return (page_retire_done(pp, PRD_UNR_CANTLOCK));
}