/*
 * Replacement for devmap_devmem_setup() which will map a machine address
 * instead of a register set/offset.
 */
void
gfxp_map_devmem(devmap_cookie_t dhc, gfx_maddr_t maddr, size_t length,
    ddi_device_acc_attr_t *attrp)
{
	devmap_handle_t *dhp = (devmap_handle_t *)dhc;
	pfn_t pfn;


#ifdef __xpv
	ASSERT(DOMAIN_IS_INITDOMAIN(xen_info));
	pfn = xen_assign_pfn(mmu_btop(maddr));
#else
	pfn = mmu_btop(maddr);
#endif

	dhp->dh_pfn = pfn;
	dhp->dh_len = mmu_ptob(mmu_btopr(length));
	dhp->dh_roff = 0;

#ifndef DEVMAP_DEVMEM_COOKIE
#define	DEVMAP_DEVMEM_COOKIE	((ddi_umem_cookie_t)0x1) /* XXPV */
#endif /* DEVMAP_DEVMEM_COOKIE */
	dhp->dh_cookie = DEVMAP_DEVMEM_COOKIE;
	/*LINTED: E_EXPR_NULL_EFFECT*/
	dhp->dh_flags |= DEVMAP_DEFAULTS;
	dhp->dh_maxprot = PROT_ALL & dhp->dh_orig_maxprot;

	/* no callbacks needed */
	bzero(&dhp->dh_callbackops, sizeof (struct devmap_callback_ctl));

	switch (attrp->devacc_attr_dataorder) {
	case DDI_UNORDERED_OK_ACC:
		dhp->dh_hat_attr = HAT_UNORDERED_OK;
		break;
	case DDI_MERGING_OK_ACC:
		dhp->dh_hat_attr = HAT_MERGING_OK;
		break;
	case DDI_LOADCACHING_OK_ACC:
		dhp->dh_hat_attr = HAT_LOADCACHING_OK;
		break;
	case DDI_STORECACHING_OK_ACC:
		dhp->dh_hat_attr = HAT_STORECACHING_OK;
		break;
	case DDI_STRICTORDER_ACC:
	default:
		dhp->dh_hat_attr = HAT_STRICTORDER;
	}

	/* don't use large pages */
	dhp->dh_mmulevel = 0;
	dhp->dh_flags &= ~DEVMAP_FLAG_LARGE;

	dhp->dh_flags |= DEVMAP_SETUP_DONE;
}
Beispiel #2
0
void
psm_unmap(caddr_t addr, size_t len)
{
	uint_t pgoffset;
	caddr_t base;
	pgcnt_t npages;

	if (len == 0)
		return;

	pgoffset = (uintptr_t)addr & MMU_PAGEOFFSET;
	base = addr - pgoffset;
	npages = mmu_btopr(len + pgoffset);
	hat_unload(kas.a_hat, base, ptob(npages), HAT_UNLOAD_UNLOCK);
	device_arena_free(base, ptob(npages));
}
Beispiel #3
0
/*
 * Estimate how much memory we will need to save
 * the sensitive pages with compression.
 */
static caddr_t
i_cpr_storage_data_alloc(pgcnt_t pages, pgcnt_t *alloc_pages, int retry_cnt)
{
	pgcnt_t alloc_pcnt, last_pcnt;
	caddr_t addr;
	char *str;

	str = "i_cpr_storage_data_alloc:";
	if (retry_cnt == 0) {
		/*
		 * common compression ratio is about 3:1
		 * initial storage allocation is estimated at 40%
		 * to cover the majority of cases
		 */
		alloc_pcnt = INITIAL_ALLOC_PCNT;
		*alloc_pages = (pages * alloc_pcnt) / INTEGRAL;
		CPR_DEBUG(CPR_DEBUG7, "%s sensitive pages: %ld\n", str, pages);
		CPR_DEBUG(CPR_DEBUG7,
		    "%s initial est pages: %ld, alloc %ld%%\n",
		    str, *alloc_pages, alloc_pcnt);
	} else {
		/*
		 * calculate the prior compression percentage (x100)
		 * from the last attempt to save sensitive pages
		 */
		ASSERT(sensitive_pages_saved != 0);
		last_pcnt = (mmu_btopr(sensitive_size_saved) * INTEGRAL) /
		    sensitive_pages_saved;
		CPR_DEBUG(CPR_DEBUG7, "%s last ratio %ld%%\n", str, last_pcnt);

		/*
		 * new estimated storage size is based on
		 * the larger ratio + 5% for each retry:
		 * pages * (last + [5%, 10%])
		 */
		alloc_pcnt = MAX(last_pcnt, INITIAL_ALLOC_PCNT) +
		    (retry_cnt * 5);
		*alloc_pages = (pages * alloc_pcnt) / INTEGRAL;
		CPR_DEBUG(CPR_DEBUG7, "%s Retry est pages: %ld, alloc %ld%%\n",
		    str, *alloc_pages, alloc_pcnt);
	}

	addr = kmem_alloc(mmu_ptob(*alloc_pages), KM_NOSLEEP);
	CPR_DEBUG(CPR_DEBUG7, "%s alloc %ld pages\n", str, *alloc_pages);
	return (addr);
}
Beispiel #4
0
/*
 * Derived from cpr_write_statefile().
 * Allocate (or reallocate after exhausting the supply) descriptors for each
 * chunk of contiguous sensitive kpages.
 */
static int
i_cpr_storage_desc_alloc(csd_t **basepp, pgcnt_t *pgsp, csd_t **endpp,
    int retry)
{
	pgcnt_t npages;
	int chunks;
	csd_t	*descp, *end;
	size_t	len;
	char *str = "i_cpr_storage_desc_alloc:";

	/*
	 * On initial allocation, add some extra to cover overhead caused
	 * by the allocation for the storage area later.
	 */
	if (retry == 0) {
		chunks = cpr_contig_pages(NULL, STORAGE_DESC_ALLOC) +
		    EXTRA_DESCS;
		npages = mmu_btopr(sizeof (**basepp) * (pgcnt_t)chunks);
		CPR_DEBUG(CPR_DEBUG7, "%s chunks %d, ", str, chunks);
	} else {
		CPR_DEBUG(CPR_DEBUG7, "%s retry %d: ", str, retry);
		npages = *pgsp + 1;
	}
	/* Free old descriptors, if any */
	if (*basepp)
		kmem_free((caddr_t)*basepp, mmu_ptob(*pgsp));

	descp = *basepp = kmem_alloc(mmu_ptob(npages), KM_NOSLEEP);
	if (descp == NULL) {
		CPR_DEBUG(CPR_DEBUG7, "%s no space for descriptors!\n", str);
		return (ENOMEM);
	}

	*pgsp = npages;
	len = mmu_ptob(npages);
	end = *endpp = descp + (len / (sizeof (**basepp)));
	CPR_DEBUG(CPR_DEBUG7, "npages 0x%lx, len 0x%lx, items 0x%lx\n\t*basepp "
	    "%p, *endpp %p\n", npages, len, (len / (sizeof (**basepp))),
	    (void *)*basepp, (void *)*endpp);
	i_cpr_storage_desc_init(descp, npages, end);
	return (0);
}
Beispiel #5
0
caddr_t
psm_map_phys_new(paddr_t addr, size_t len, int prot)
{
	uint_t pgoffset;
	paddr_t base;
	pgcnt_t npages;
	caddr_t cvaddr;

	if (len == 0)
		return (0);

	pgoffset = addr & MMU_PAGEOFFSET;
	base = addr - pgoffset;
	npages = mmu_btopr(len + pgoffset);
	cvaddr = device_arena_alloc(ptob(npages), VM_NOSLEEP);
	if (cvaddr == NULL)
		return (0);
	hat_devload(kas.a_hat, cvaddr, mmu_ptob(npages), mmu_btop(base),
	    prot, HAT_LOAD_LOCK);
	return (cvaddr + pgoffset);
}
Beispiel #6
0
/*
 * This function takes care of pages which are not in kas or need to be
 * taken care of in a special way.  For example, panicbuf pages are not
 * in kas and their pages are allocated via prom_retain().
 */
pgcnt_t
i_cpr_count_special_kpages(int mapflag, bitfunc_t bitfunc)
{
	struct cpr_map_info *pri, *tail;
	pgcnt_t pages, total = 0;
	pfn_t pfn;

	/*
	 * Save information about prom retained panicbuf pages
	 */
	if (bitfunc == cpr_setbit) {
		pri = &cpr_prom_retain[CPR_PANICBUF];
		pri->virt = (cpr_ptr)panicbuf;
		pri->phys = va_to_pa(panicbuf);
		pri->size = sizeof (panicbuf);
	}

	/*
	 * Go through the prom_retain array to tag those pages.
	 */
	tail = &cpr_prom_retain[CPR_PROM_RETAIN_CNT];
	for (pri = cpr_prom_retain; pri < tail; pri++) {
		pages = mmu_btopr(pri->size);
		for (pfn = ADDR_TO_PN(pri->phys); pages--; pfn++) {
			if (pf_is_memory(pfn)) {
				if (bitfunc == cpr_setbit) {
					if ((*bitfunc)(pfn, mapflag) == 0)
						total++;
				} else
					total++;
			}
		}
	}

	return (total);
}