Exemplo n.º 1
0
static pt_entry_t *
efi_1t1_pte(vm_offset_t va)
{
	pml4_entry_t *pml4e;
	pdp_entry_t *pdpe;
	pd_entry_t *pde;
	pt_entry_t *pte;
	vm_page_t m;
	vm_pindex_t pml4_idx, pdp_idx, pd_idx;
	vm_paddr_t mphys;

	pml4_idx = pmap_pml4e_index(va);
	pml4e = &efi_pml4[pml4_idx];
	if (*pml4e == 0) {
		m = efi_1t1_page(1 + pml4_idx);
		mphys =  VM_PAGE_TO_PHYS(m);
		*pml4e = mphys | X86_PG_RW | X86_PG_V;
	} else {
		mphys = *pml4e & ~PAGE_MASK;
	}

	pdpe = (pdp_entry_t *)PHYS_TO_DMAP(mphys);
	pdp_idx = pmap_pdpe_index(va);
	pdpe += pdp_idx;
	if (*pdpe == 0) {
		m = efi_1t1_page(1 + NPML4EPG + (pml4_idx + 1) * (pdp_idx + 1));
		mphys =  VM_PAGE_TO_PHYS(m);
		*pdpe = mphys | X86_PG_RW | X86_PG_V;
	} else {
		mphys = *pdpe & ~PAGE_MASK;
	}

	pde = (pd_entry_t *)PHYS_TO_DMAP(mphys);
	pd_idx = pmap_pde_index(va);
	pde += pd_idx;
	if (*pde == 0) {
		m = efi_1t1_page(1 + NPML4EPG + NPML4EPG * NPDPEPG +
		    (pml4_idx + 1) * (pdp_idx + 1) * (pd_idx + 1));
		mphys = VM_PAGE_TO_PHYS(m);
		*pde = mphys | X86_PG_RW | X86_PG_V;
	} else {
		mphys = *pde & ~PAGE_MASK;
	}

	pte = (pt_entry_t *)PHYS_TO_DMAP(mphys);
	pte += pmap_pte_index(va);
	KASSERT(*pte == 0, ("va %#jx *pt %#jx", va, *pte));

	return (pte);
}
Exemplo n.º 2
0
static pt_entry_t *
efi_1t1_l3(vm_offset_t va)
{
	pd_entry_t *l0, *l1, *l2;
	pt_entry_t *l3;
	vm_pindex_t l0_idx, l1_idx, l2_idx;
	vm_page_t m;
	vm_paddr_t mphys;

	l0_idx = pmap_l0_index(va);
	l0 = &efi_l0[l0_idx];
	if (*l0 == 0) {
		m = efi_1t1_page(1 + l0_idx);
		mphys = VM_PAGE_TO_PHYS(m);
		*l0 = mphys | L0_TABLE;
	} else {
		mphys = *l0 & ~ATTR_MASK;
	}

	l1 = (pd_entry_t *)PHYS_TO_DMAP(mphys);
	l1_idx = pmap_l1_index(va);
	l1 += l1_idx;
	if (*l1 == 0) {
		m = efi_1t1_page(1 + L0_ENTRIES + (l0_idx + 1) * (l1_idx + 1));
		mphys = VM_PAGE_TO_PHYS(m);
		*l1 = mphys | L1_TABLE;
	} else {
		mphys = *l1 & ~ATTR_MASK;
	}

	l2 = (pd_entry_t *)PHYS_TO_DMAP(mphys);
	l2_idx = pmap_l2_index(va);
	l2 += l2_idx;
	if (*l2 == 0) {
		m = efi_1t1_page(1 + L0_ENTRIES + L0_ENTRIES * Ln_ENTRIES +
		    (l0_idx + 1) * (l1_idx + 1) * (l2_idx + 1));
		mphys = VM_PAGE_TO_PHYS(m);
		*l2 = mphys | L2_TABLE;
	} else {
		mphys = *l2 & ~ATTR_MASK;
	}

	l3 = (pt_entry_t *)PHYS_TO_DMAP(mphys);
	l3 += pmap_l3_index(va);
	KASSERT(*l3 == 0, ("%s: Already mapped: va %#jx *pt %#jx", __func__,
	    va, *l3));

	return (l3);
}
Exemplo n.º 3
0
void *
uma_small_alloc(uma_zone_t zone, int bytes, u_int8_t *flags, int wait)
{
	static vm_pindex_t colour;
	vm_page_t m;
	vm_paddr_t pa;
	void *va;
	int pflags;

	*flags = UMA_SLAB_PRIV;
	if ((wait & (M_NOWAIT|M_USE_RESERVE)) == M_NOWAIT)
		pflags = VM_ALLOC_INTERRUPT | VM_ALLOC_WIRED;
	else
		pflags = VM_ALLOC_SYSTEM | VM_ALLOC_WIRED;
	if (wait & M_ZERO)
		pflags |= VM_ALLOC_ZERO;
	for (;;) {
		m = vm_page_alloc(NULL, colour++, pflags | VM_ALLOC_NOOBJ);
		if (m == NULL) {
			if (wait & M_NOWAIT)
				return (NULL);
			else
				VM_WAIT;
		} else
			break;
	}
	pa = m->phys_addr;
	dump_add_page(pa);
	va = (void *)PHYS_TO_DMAP(pa);
	if ((wait & M_ZERO) && (m->flags & PG_ZERO) == 0)
		pagezero(va);
	return (va);
}
Exemplo n.º 4
0
static void
ept_dump(uint64_t *ptp, int nlevels)
{
	int i, t, tabs;
	uint64_t *ptpnext, ptpval;

	if (--nlevels < 0)
		return;

	tabs = 3 - nlevels;
	for (t = 0; t < tabs; t++)
		printf("\t");
	printf("PTP = %p\n", ptp);

	for (i = 0; i < 512; i++) {
		ptpval = ptp[i];

		if (ptpval == 0)
			continue;
		
		for (t = 0; t < tabs; t++)
			printf("\t");
		printf("%3d 0x%016lx\n", i, ptpval);

		if (nlevels != 0 && (ptpval & EPT_PG_SUPERPAGE) == 0) {
			ptpnext = (uint64_t *)
				  PHYS_TO_DMAP(ptpval & EPT_ADDR_MASK);
			ept_dump(ptpnext, nlevels);
		}
	}
}
Exemplo n.º 5
0
void
bs_remap_earlyboot(void)
{
	vm_paddr_t pa, spa;
	vm_offset_t va;
	int i;
	vm_memattr_t ma;

	for (i = 0; i < earlyboot_map_idx; i++) {
		spa = earlyboot_mappings[i].addr;

		if (hw_direct_map &&
		   PHYS_TO_DMAP(spa) == earlyboot_mappings[i].virt &&
		   pmap_dev_direct_mapped(spa, earlyboot_mappings[i].size) == 0)
			continue;

		ma = VM_MEMATTR_DEFAULT;
		switch (earlyboot_mappings[i].flags) {
			case BUS_SPACE_MAP_CACHEABLE:
				ma = VM_MEMATTR_CACHEABLE;
				break;
			case BUS_SPACE_MAP_PREFETCHABLE:
				ma = VM_MEMATTR_PREFETCHABLE;
				break;
		}

		pa = trunc_page(spa);
		va = trunc_page(earlyboot_mappings[i].virt);
		while (pa < spa + earlyboot_mappings[i].size) {
			pmap_kenter_attr(va, pa, ma);
			va += PAGE_SIZE;
			pa += PAGE_SIZE;
		}
	}
}
Exemplo n.º 6
0
void *
uma_small_alloc(uma_zone_t zone, int bytes, u_int8_t *flags, int wait)
{
	vm_page_t m;
	vm_paddr_t pa;
	void *va;
	int pflags;

	*flags = UMA_SLAB_PRIV;
	pflags = malloc2vm_flags(wait) | VM_ALLOC_NOOBJ | VM_ALLOC_WIRED;
	for (;;) {
		m = vm_page_alloc(NULL, 0, pflags);
		if (m == NULL) {
			if (wait & M_NOWAIT)
				return (NULL);
			else
				VM_WAIT;
		} else
			break;
	}
	pa = m->phys_addr;
	if ((wait & M_NODUMP) == 0)
		dump_add_page(pa);
	va = (void *)PHYS_TO_DMAP(pa);
	if ((wait & M_ZERO) && (m->flags & PG_ZERO) == 0)
		pagezero(va);
	return (va);
}
Exemplo n.º 7
0
struct lwbuf *
lwbuf_alloc(vm_page_t m, struct lwbuf *lwb_cache)
{
    struct lwbuf *lwb = lwb_cache;

    lwb->m = m;
    lwb->kva = PHYS_TO_DMAP(VM_PAGE_TO_PHYS(lwb->m));

    return (lwb);
}
Exemplo n.º 8
0
void *
XX_PhysToVirt(physAddress_t addr)
{
	struct pv_entry *pv;
	vm_page_t page;
	int cpu;

	/* Check CCSR */
	if (addr >= ccsrbar_pa && addr < ccsrbar_pa + ccsrbar_size)
		return ((void *)((vm_offset_t)(addr - ccsrbar_pa) +
		    ccsrbar_va));

	cpu = PCPU_GET(cpuid);

	/* Handle BMAN mappings */
	if ((addr >= XX_PInfo.portal_ce_pa[BM_PORTAL][cpu]) &&
	    (addr < XX_PInfo.portal_ce_pa[BM_PORTAL][cpu] +
	    XX_PInfo.portal_ce_size[BM_PORTAL][cpu]))
		return ((void *)(XX_PInfo.portal_ci_va[BM_PORTAL] +
		    (vm_offset_t)(addr - XX_PInfo.portal_ci_pa[BM_PORTAL][cpu])));

	if ((addr >= XX_PInfo.portal_ci_pa[BM_PORTAL][cpu]) &&
	    (addr < XX_PInfo.portal_ci_pa[BM_PORTAL][cpu] +
	    XX_PInfo.portal_ci_size[BM_PORTAL][cpu]))
		return ((void *)(XX_PInfo.portal_ci_va[BM_PORTAL] +
		    (vm_offset_t)(addr - XX_PInfo.portal_ci_pa[BM_PORTAL][cpu])));

	/* Handle QMAN mappings */
	if ((addr >= XX_PInfo.portal_ce_pa[QM_PORTAL][cpu]) &&
	    (addr < XX_PInfo.portal_ce_pa[QM_PORTAL][cpu] +
	    XX_PInfo.portal_ce_size[QM_PORTAL][cpu]))
		return ((void *)(XX_PInfo.portal_ce_va[QM_PORTAL] +
		    (vm_offset_t)(addr - XX_PInfo.portal_ce_pa[QM_PORTAL][cpu])));

	if ((addr >= XX_PInfo.portal_ci_pa[QM_PORTAL][cpu]) &&
	    (addr < XX_PInfo.portal_ci_pa[QM_PORTAL][cpu] +
	    XX_PInfo.portal_ci_size[QM_PORTAL][cpu]))
		return ((void *)(XX_PInfo.portal_ci_va[QM_PORTAL] +
		    (vm_offset_t)(addr - XX_PInfo.portal_ci_pa[QM_PORTAL][cpu])));

	page = PHYS_TO_VM_PAGE(addr);
	pv = TAILQ_FIRST(&page->md.pv_list);

	if (pv != NULL)
		return ((void *)(pv->pv_va + ((vm_offset_t)addr & PAGE_MASK)));

	if (PMAP_HAS_DMAP)
		return ((void *)(uintptr_t)PHYS_TO_DMAP(addr));

	printf("NetCommSW: "
	    "Unable to translate physical address 0x%09jx!\n", (uintmax_t)addr);

	return (NULL);
}
Exemplo n.º 9
0
void *
vm_gpa_hold(struct vm *vm, vm_paddr_t gpa, size_t len, int reqprot,
	    void **cookie)
{
	int count, pageoff;
	vm_page_t m;

	pageoff = gpa & PAGE_MASK;
	if (len > PAGE_SIZE - pageoff)
		panic("vm_gpa_hold: invalid gpa/len: 0x%016lx/%lu", gpa, len);

	count = vm_fault_quick_hold_pages(&vm->vmspace->vm_map,
	    trunc_page(gpa), PAGE_SIZE, reqprot, &m, 1);

	if (count == 1) {
		*cookie = m;
		return ((void *)(PHYS_TO_DMAP(VM_PAGE_TO_PHYS(m)) + pageoff));
	} else {
		*cookie = NULL;
		return (NULL);
	}
}
Exemplo n.º 10
0
/*
 * Create the 1:1 virtual to physical map for EFI
 */
bool
efi_create_1t1_map(struct efi_md *map, int ndesc, int descsz)
{
	struct efi_md *p;
	pt_entry_t *l3;
	vm_offset_t va;
	uint64_t idx;
	int i, mode;

	obj_1t1_pt = vm_pager_allocate(OBJT_PHYS, NULL, L0_ENTRIES +
	    L0_ENTRIES * Ln_ENTRIES + L0_ENTRIES * Ln_ENTRIES * Ln_ENTRIES +
	    L0_ENTRIES * Ln_ENTRIES * Ln_ENTRIES * Ln_ENTRIES,
	    VM_PROT_ALL, 0, NULL);
	VM_OBJECT_WLOCK(obj_1t1_pt);
	efi_l0_page = efi_1t1_page(0);
	VM_OBJECT_WUNLOCK(obj_1t1_pt);
	efi_l0 = (pd_entry_t *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(efi_l0_page));
	bzero(efi_l0, L0_ENTRIES * sizeof(*efi_l0));

	for (i = 0, p = map; i < ndesc; i++, p = efi_next_descriptor(p,
	    descsz)) {
		if ((p->md_attr & EFI_MD_ATTR_RT) == 0)
			continue;
		if (p->md_virt != NULL) {
			if (bootverbose)
				printf("EFI Runtime entry %d is mapped\n", i);
			goto fail;
		}
		if ((p->md_phys & EFI_PAGE_MASK) != 0) {
			if (bootverbose)
				printf("EFI Runtime entry %d is not aligned\n",
				    i);
			goto fail;
		}
		if (p->md_phys + p->md_pages * EFI_PAGE_SIZE < p->md_phys ||
		    p->md_phys + p->md_pages * EFI_PAGE_SIZE >=
		    VM_MAXUSER_ADDRESS) {
			printf("EFI Runtime entry %d is not in mappable for RT:"
			    "base %#016jx %#jx pages\n",
			    i, (uintmax_t)p->md_phys,
			    (uintmax_t)p->md_pages);
			goto fail;
		}
		if ((p->md_attr & EFI_MD_ATTR_WB) != 0)
			mode = VM_MEMATTR_WRITE_BACK;
		else if ((p->md_attr & EFI_MD_ATTR_WT) != 0)
			mode = VM_MEMATTR_WRITE_THROUGH;
		else if ((p->md_attr & EFI_MD_ATTR_WC) != 0)
			mode = VM_MEMATTR_WRITE_COMBINING;
		else if ((p->md_attr & EFI_MD_ATTR_UC) != 0)
			mode = VM_MEMATTR_UNCACHEABLE;
		else {
			if (bootverbose)
				printf("EFI Runtime entry %d mapping "
				    "attributes unsupported\n", i);
			mode = VM_MEMATTR_UNCACHEABLE;
		}

		printf("MAP %lx mode %x pages %lu\n", p->md_phys, mode, p->md_pages);
		VM_OBJECT_WLOCK(obj_1t1_pt);
		for (va = p->md_phys, idx = 0; idx < p->md_pages; idx++,
		    va += PAGE_SIZE) {
			l3 = efi_1t1_l3(va);
			*l3 = va | ATTR_DEFAULT | ATTR_IDX(mode) |
			    ATTR_AP(ATTR_AP_RW) | L3_PAGE;
		}
		VM_OBJECT_WUNLOCK(obj_1t1_pt);
	}

	return (true);
fail:
	efi_destroy_1t1_map();
	return (false);
}
Exemplo n.º 11
0
/* ARGSUSED */
int
memrw(struct cdev *dev, struct uio *uio, int flags)
{
	struct iovec *iov;
	void *p;
	ssize_t orig_resid;
	u_long v, vd;
	u_int c;
	int error;

	error = 0;
	orig_resid = uio->uio_resid;
	while (uio->uio_resid > 0 && error == 0) {
		iov = uio->uio_iov;
		if (iov->iov_len == 0) {
			uio->uio_iov++;
			uio->uio_iovcnt--;
			if (uio->uio_iovcnt < 0)
				panic("memrw");
			continue;
		}
		v = uio->uio_offset;
		c = ulmin(iov->iov_len, PAGE_SIZE - (u_int)(v & PAGE_MASK));

		switch (dev2unit(dev)) {
		case CDEV_MINOR_KMEM:
			/*
			 * Since c is clamped to be less or equal than
			 * PAGE_SIZE, the uiomove() call does not
			 * access past the end of the direct map.
			 */
			if (v >= DMAP_MIN_ADDRESS &&
			    v < DMAP_MIN_ADDRESS + dmaplimit) {
				error = uiomove((void *)v, c, uio);
				break;
			}

			if (!kernacc((void *)v, c, uio->uio_rw == UIO_READ ?
			    VM_PROT_READ : VM_PROT_WRITE)) {
				error = EFAULT;
				break;
			}

			/*
			 * If the extracted address is not accessible
			 * through the direct map, then we make a
			 * private (uncached) mapping because we can't
			 * depend on the existing kernel mapping
			 * remaining valid until the completion of
			 * uiomove().
			 *
			 * XXX We cannot provide access to the
			 * physical page 0 mapped into KVA.
			 */
			v = pmap_extract(kernel_pmap, v);
			if (v == 0) {
				error = EFAULT;
				break;
			}
			/* FALLTHROUGH */
		case CDEV_MINOR_MEM:
			if (v < dmaplimit) {
				vd = PHYS_TO_DMAP(v);
				error = uiomove((void *)vd, c, uio);
				break;
			}
			if (v > cpu_getmaxphyaddr()) {
				error = EFAULT;
				break;
			}
			p = pmap_mapdev(v, PAGE_SIZE);
			error = uiomove(p, c, uio);
			pmap_unmapdev((vm_offset_t)p, PAGE_SIZE);
			break;
		}
	}
	/*
	 * Don't return error if any byte was written.  Read and write
	 * can return error only if no i/o was performed.
	 */
	if (uio->uio_resid != orig_resid)
		error = 0;
	return (error);
}
Exemplo n.º 12
0
bool
efi_create_1t1_map(struct efi_md *map, int ndesc, int descsz)
{
	struct efi_md *p;
	pt_entry_t *pte;
	vm_offset_t va;
	uint64_t idx;
	int bits, i, mode;

	obj_1t1_pt = vm_pager_allocate(OBJT_PHYS, NULL, ptoa(1 +
	    NPML4EPG + NPML4EPG * NPDPEPG + NPML4EPG * NPDPEPG * NPDEPG),
	    VM_PROT_ALL, 0, NULL);
	efi_1t1_idx = 0;
	VM_OBJECT_WLOCK(obj_1t1_pt);
	efi_pml4_page = efi_1t1_page();
	VM_OBJECT_WUNLOCK(obj_1t1_pt);
	efi_pml4 = (pml4_entry_t *)PHYS_TO_DMAP(VM_PAGE_TO_PHYS(efi_pml4_page));
	pmap_pinit_pml4(efi_pml4_page);

	for (i = 0, p = map; i < ndesc; i++, p = efi_next_descriptor(p,
	    descsz)) {
		if ((p->md_attr & EFI_MD_ATTR_RT) == 0)
			continue;
		if (p->md_virt != NULL && (uint64_t)p->md_virt != p->md_phys) {
			if (bootverbose)
				printf("EFI Runtime entry %d is mapped\n", i);
			goto fail;
		}
		if ((p->md_phys & EFI_PAGE_MASK) != 0) {
			if (bootverbose)
				printf("EFI Runtime entry %d is not aligned\n",
				    i);
			goto fail;
		}
		if (p->md_phys + p->md_pages * EFI_PAGE_SIZE < p->md_phys ||
		    p->md_phys + p->md_pages * EFI_PAGE_SIZE >=
		    VM_MAXUSER_ADDRESS) {
			printf("EFI Runtime entry %d is not in mappable for RT:"
			    "base %#016jx %#jx pages\n",
			    i, (uintmax_t)p->md_phys,
			    (uintmax_t)p->md_pages);
			goto fail;
		}
		if ((p->md_attr & EFI_MD_ATTR_WB) != 0)
			mode = VM_MEMATTR_WRITE_BACK;
		else if ((p->md_attr & EFI_MD_ATTR_WT) != 0)
			mode = VM_MEMATTR_WRITE_THROUGH;
		else if ((p->md_attr & EFI_MD_ATTR_WC) != 0)
			mode = VM_MEMATTR_WRITE_COMBINING;
		else if ((p->md_attr & EFI_MD_ATTR_WP) != 0)
			mode = VM_MEMATTR_WRITE_PROTECTED;
		else if ((p->md_attr & EFI_MD_ATTR_UC) != 0)
			mode = VM_MEMATTR_UNCACHEABLE;
		else {
			if (bootverbose)
				printf("EFI Runtime entry %d mapping "
				    "attributes unsupported\n", i);
			mode = VM_MEMATTR_UNCACHEABLE;
		}
		bits = pmap_cache_bits(kernel_pmap, mode, FALSE) | X86_PG_RW |
		    X86_PG_V;
		VM_OBJECT_WLOCK(obj_1t1_pt);
		for (va = p->md_phys, idx = 0; idx < p->md_pages; idx++,
		    va += PAGE_SIZE) {
			pte = efi_1t1_pte(va);
			pte_store(pte, va | bits);
		}
		VM_OBJECT_WUNLOCK(obj_1t1_pt);
	}

	return (true);

fail:
	efi_destroy_1t1_map();
	return (false);
}
Exemplo n.º 13
0
static int
vt_efifb_init(struct vt_device *vd)
{
	int		depth, d;
	struct fb_info	*info;
	struct efi_fb	*efifb;
	caddr_t		kmdp;

	info = vd->vd_softc;
	if (info == NULL)
		info = vd->vd_softc = (void *)&local_info;

	kmdp = preload_search_by_type("elf kernel");
	if (kmdp == NULL)
		kmdp = preload_search_by_type("elf64 kernel");
	efifb = (struct efi_fb *)preload_search_info(kmdp,
	    MODINFO_METADATA | MODINFOMD_EFI_FB);
	if (efifb == NULL)
		return (CN_DEAD);

	info->fb_height = efifb->fb_height;
	info->fb_width = efifb->fb_width;

	depth = fls(efifb->fb_mask_red);
	d = fls(efifb->fb_mask_green);
	depth = d > depth ? d : depth;
	d = fls(efifb->fb_mask_blue);
	depth = d > depth ? d : depth;
	d = fls(efifb->fb_mask_reserved);
	depth = d > depth ? d : depth;
	info->fb_depth = depth;

	info->fb_stride = efifb->fb_stride * (depth / 8);

	vt_generate_cons_palette(info->fb_cmap, COLOR_FORMAT_RGB,
	    efifb->fb_mask_red, ffs(efifb->fb_mask_red) - 1,
	    efifb->fb_mask_green, ffs(efifb->fb_mask_green) - 1,
	    efifb->fb_mask_blue, ffs(efifb->fb_mask_blue) - 1);

	info->fb_size = info->fb_height * info->fb_stride;
	info->fb_pbase = efifb->fb_addr;
	/*
	 * Use the direct map as a crutch until pmap is available. Once pmap
	 * is online, the framebuffer will be remapped by vt_efifb_remap()
	 * using pmap_mapdev_attr().
	 */
	info->fb_vbase = PHYS_TO_DMAP(efifb->fb_addr);

	/* Get pixel storage size. */
	info->fb_bpp = info->fb_stride / info->fb_width * 8;

	/*
	 * Early FB driver work with static window buffer, so reduce to minimal
	 * size, buffer or screen.
	 */
	info->fb_width = MIN(info->fb_width, VT_FB_DEFAULT_WIDTH);
	info->fb_height = MIN(info->fb_height, VT_FB_DEFAULT_HEIGHT);

	vt_fb_init(vd);

	return (CN_INTERNAL);
}
Exemplo n.º 14
0
/* ARGSUSED */
int
memrw(struct cdev *dev, struct uio *uio, int flags)
{
	struct iovec *iov;
	int error = 0;
	vm_offset_t va, eva, off, v;
	vm_prot_t prot;
	struct vm_page m;
	vm_page_t marr;
	vm_size_t cnt;

	cnt = 0;
	error = 0;

	while (uio->uio_resid > 0 && !error) {
		iov = uio->uio_iov;
		if (iov->iov_len == 0) {
			uio->uio_iov++;
			uio->uio_iovcnt--;
			if (uio->uio_iovcnt < 0)
				panic("memrw");
			continue;
		}
		if (dev2unit(dev) == CDEV_MINOR_MEM) {
			v = uio->uio_offset;

kmem_direct_mapped:	off = v & PAGE_MASK;
			cnt = PAGE_SIZE - ((vm_offset_t)iov->iov_base &
			    PAGE_MASK);
			cnt = min(cnt, PAGE_SIZE - off);
			cnt = min(cnt, iov->iov_len);

			if (mem_valid(v, cnt)) {
				error = EFAULT;
				break;
			}
	
			if (hw_direct_map && !pmap_dev_direct_mapped(v, cnt)) {
				error = uiomove((void *)PHYS_TO_DMAP(v), cnt,
				    uio);
			} else {
				m.phys_addr = trunc_page(v);
				marr = &m;
				error = uiomove_fromphys(&marr, off, cnt, uio);
			}
		}
		else if (dev2unit(dev) == CDEV_MINOR_KMEM) {
			va = uio->uio_offset;

			if ((va < VM_MIN_KERNEL_ADDRESS) || (va > virtual_end)) {
				v = DMAP_TO_PHYS(va);
				goto kmem_direct_mapped;
			}

			va = trunc_page(uio->uio_offset);
			eva = round_page(uio->uio_offset
			    + iov->iov_len);

			/* 
			 * Make sure that all the pages are currently resident
			 * so that we don't create any zero-fill pages.
			 */

			for (; va < eva; va += PAGE_SIZE)
				if (pmap_extract(kernel_pmap, va) == 0)
					return (EFAULT);

			prot = (uio->uio_rw == UIO_READ)
			    ? VM_PROT_READ : VM_PROT_WRITE;

			va = uio->uio_offset;
			if (kernacc((void *) va, iov->iov_len, prot)
			    == FALSE)
				return (EFAULT);

			error = uiomove((void *)va, iov->iov_len, uio);

			continue;
		}
	}

	return (error);
}
Exemplo n.º 15
0
static int
vt_efb_init(struct vt_device *vd)
{
	int		depth, d, disable, i, len;
	struct fb_info	*info;
	struct efi_fb	*efifb;
	caddr_t		kmdp;

	info = vd->vd_softc;

	disable = 0;
	TUNABLE_INT_FETCH("hw.syscons.disable", &disable);
	if (disable != 0)
		return (CN_DEAD);

	kmdp = preload_search_by_type("elf kernel");
	if (kmdp == NULL)
		kmdp = preload_search_by_type("elf64 kernel");
        efifb = (struct efi_fb *)preload_search_info(kmdp,
            MODINFO_METADATA | MODINFOMD_EFI_FB);
	if (efifb == NULL)
		return (CN_DEAD);

	info->fb_height = efifb->fb_height;
	info->fb_width = efifb->fb_width;

	depth = fls(efifb->fb_mask_red);
	d = fls(efifb->fb_mask_green);
	depth = d > depth ? d : depth;
	d = fls(efifb->fb_mask_blue);
	depth = d > depth ? d : depth;
	d = fls(efifb->fb_mask_reserved);
	depth = d > depth ? d : depth;
	info->fb_depth = depth;

	info->fb_stride = efifb->fb_stride * (depth / 8);

	vt_generate_vga_palette(info->fb_cmap, COLOR_FORMAT_RGB,
	    efifb->fb_mask_red, ffs(efifb->fb_mask_red) - 1,
	    efifb->fb_mask_green, ffs(efifb->fb_mask_green) - 1,
	    efifb->fb_mask_blue, ffs(efifb->fb_mask_blue) - 1);

	info->fb_size = info->fb_height * info->fb_stride;
	info->fb_pbase = efifb->fb_addr;
	/*
	 * We could use pmap_mapdev here except that the kernel pmap
	 * hasn't been created yet and hence any attempt to lock it will
	 * fail.
	 */
	info->fb_vbase = PHYS_TO_DMAP(efifb->fb_addr);

	/* blank full size */
	len = info->fb_size / 4;
	for (i = 0; i < len; i++) {
		((uint32_t *)info->fb_vbase)[i] = 0;
	}

	/* Get pixel storage size. */
	info->fb_bpp = info->fb_stride / info->fb_width * 8;

	/*
	 * Early FB driver work with static window buffer, so reduce to minimal
	 * size, buffer or screen.
	 */
	info->fb_width = MIN(info->fb_width, VT_FB_DEFAULT_WIDTH);
	info->fb_height = MIN(info->fb_height, VT_FB_DEFAULT_HEIGHT);

	fb_probe(info);
	vt_fb_init(vd);


	return (CN_INTERNAL);
}
Exemplo n.º 16
0
/* ARGSUSED */
int
memrw(struct cdev *dev, struct uio *uio, int flags)
{
	struct iovec *iov;
	u_long c, v;
	int error, o, sflags;
	vm_offset_t addr, eaddr;

	GIANT_REQUIRED;

	error = 0;
	c = 0;
	sflags = curthread_pflags_set(TDP_DEVMEMIO);
	while (uio->uio_resid > 0 && error == 0) {
		iov = uio->uio_iov;
		if (iov->iov_len == 0) {
			uio->uio_iov++;
			uio->uio_iovcnt--;
			if (uio->uio_iovcnt < 0)
				panic("memrw");
			continue;
		}
		if (dev2unit(dev) == CDEV_MINOR_MEM) {
			v = uio->uio_offset;
kmemphys:
			o = v & PAGE_MASK;
			c = min(uio->uio_resid, (u_int)(PAGE_SIZE - o));
			v = PHYS_TO_DMAP(v);
			if (v < DMAP_MIN_ADDRESS ||
			    (v > DMAP_MIN_ADDRESS + dmaplimit &&
			    v <= DMAP_MAX_ADDRESS) ||
			    pmap_kextract(v) == 0) {
				error = EFAULT;
				goto ret;
			}
			error = uiomove((void *)v, (int)c, uio);
			continue;
		}
		else if (dev2unit(dev) == CDEV_MINOR_KMEM) {
			v = uio->uio_offset;

			if (v >= DMAP_MIN_ADDRESS && v < DMAP_MAX_ADDRESS) {
				v = DMAP_TO_PHYS(v);
				goto kmemphys;
			}

			c = iov->iov_len;

			/*
			 * Make sure that all of the pages are currently
			 * resident so that we don't create any zero-fill
			 * pages.
			 */
			addr = trunc_page(v);
			eaddr = round_page(v + c);

			if (addr < VM_MIN_KERNEL_ADDRESS) {
				error = EFAULT;
				goto ret;
			}
			for (; addr < eaddr; addr += PAGE_SIZE) {
				if (pmap_extract(kernel_pmap, addr) == 0) {
					error = EFAULT;
					goto ret;
				}
			}
			if (!kernacc((caddr_t)(long)v, c,
			    uio->uio_rw == UIO_READ ? 
			    VM_PROT_READ : VM_PROT_WRITE)) {
				error = EFAULT;
				goto ret;
			}

			error = uiomove((caddr_t)(long)v, (int)c, uio);
			continue;
		}
		/* else panic! */
	}
ret:
	curthread_pflags_restore(sflags);
	return (error);
}
Exemplo n.º 17
0
int
memrw(struct cdev *dev, struct uio *uio, int flags)
{
	struct iovec *iov;
	struct vm_page m;
	vm_page_t marr;
	vm_offset_t off, v;
	u_int cnt;
	int error;

	error = 0;

	while (uio->uio_resid > 0 && error == 0) {
		iov = uio->uio_iov;
		if (iov->iov_len == 0) {
			uio->uio_iov++;
			uio->uio_iovcnt--;
			if (uio->uio_iovcnt < 0)
				panic("memrw");
			continue;
		}

		v = uio->uio_offset;
		off = v & PAGE_MASK;
		cnt = ulmin(iov->iov_len, PAGE_SIZE - (u_int)off);
		if (cnt == 0)
			continue;

		switch(dev2unit(dev)) {
		case CDEV_MINOR_KMEM:
			/* If the address is in the DMAP just copy it */
			if (VIRT_IN_DMAP(v)) {
				error = uiomove((void *)v, cnt, uio);
				break;
			}

			if (!kernacc((void *)v, cnt, uio->uio_rw == UIO_READ ?
			    VM_PROT_READ : VM_PROT_WRITE)) {
				error = EFAULT;
				break;
			}

			/* Get the physical address to read */
			v = pmap_extract(kernel_pmap, v);
			if (v == 0) {
				error = EFAULT;
				break;
			}

			/* FALLTHROUGH */
		case CDEV_MINOR_MEM:
			/* If within the DMAP use this to copy from */
			if (PHYS_IN_DMAP(v)) {
				v = PHYS_TO_DMAP(v);
				error = uiomove((void *)v, cnt, uio);
				break;
			}

			/* Have uiomove_fromphys handle the data */
			m.phys_addr = trunc_page(v);
			marr = &m;
			uiomove_fromphys(&marr, off, cnt, uio);
			break;
		}
	}

	return (error);
}