Esempio n. 1
0
int
_bus_dmamap_load_vaddr(bus_dma_tag_t t, bus_dmamap_t map,
    void *buf, bus_size_t size, pmap_t pmap)
{
	vaddr_t vaddr;
	paddr_t paddr;
	vaddr_t next, end;
	int error;

	vaddr = (vaddr_t)buf;
	end = vaddr + size;

	if (pmap == pmap_kernel() &&
	    vaddr >= SH3_P1SEG_BASE && end <= SH3_P2SEG_END)
		paddr = SH3_P1SEG_TO_PHYS(vaddr);
	else {
		for (next = (vaddr + PAGE_SIZE) & ~PAGE_MASK;
		    next < end; next += PAGE_SIZE) {
			pmap_extract(pmap, vaddr, &paddr);
			error = _bus_dmamap_load_paddr(t, map,
			    paddr, vaddr, next - vaddr);
			if (error != 0)
				return (error);

			vaddr = next;
		}

		pmap_extract(pmap, vaddr, &paddr);
		size = end - vaddr;
	}

	return (_bus_dmamap_load_paddr(t, map, paddr, vaddr, size));
}
Esempio n. 2
0
/*
 * Map an IO request into kernel virtual address space.
 */
int
vmapbuf(struct buf *bp, vsize_t len)
{
	vaddr_t uva, kva;
	paddr_t pa;
	vsize_t size, off;
	int npf;
	struct pmap *upmap, *kpmap;

#ifdef DIAGNOSTIC
	if ((bp->b_flags & B_PHYS) == 0)
		panic("vmapbuf");
#endif
	upmap = vm_map_pmap(&bp->b_proc->p_vmspace->vm_map);
	kpmap = vm_map_pmap(phys_map);
	bp->b_saveaddr = bp->b_data;
	uva = trunc_page((vaddr_t)bp->b_data);
	off = (vaddr_t)bp->b_data - uva;
	size = round_page(off + len);
	kva = uvm_km_alloc(phys_map, len, 0, UVM_KMF_VAONLY | UVM_KMF_WAITVA);
	bp->b_data = (void *)(kva + off);
	npf = btoc(size);
	while (npf--) {
		if (pmap_extract(upmap, uva, &pa) == false)
			panic("vmapbuf: null page frame");
		pmap_enter(kpmap, kva, pa,
		    VM_PROT_READ | VM_PROT_WRITE, PMAP_WIRED);
		uva += PAGE_SIZE;
		kva += PAGE_SIZE;
	}
	pmap_update(kpmap);

	return 0;
}
Esempio n. 3
0
/*
 * Map a user I/O request into kernel virtual address space.
 * Note: the pages are already locked by uvm_vslock(), so we
 * do not need to pass an access_type to pmap_enter().
 */
int
vmapbuf(struct buf *bp, vsize_t len)
{
    struct pmap *upmap;
    vaddr_t uva;	/* User VA (map from) */
    vaddr_t kva;	/* Kernel VA (new to) */
    paddr_t pa; 	/* physical address */
    vsize_t off;

    if ((bp->b_flags & B_PHYS) == 0)
        panic("vmapbuf");

    bp->b_saveaddr = bp->b_data;
    uva = trunc_page((vaddr_t)bp->b_data);
    off = (vaddr_t)bp->b_data - uva;
    len = round_page(off + len);
    kva = uvm_km_alloc(kernel_map, len, 0, UVM_KMF_VAONLY | UVM_KMF_WAITVA);
    bp->b_data = (void *)(kva + off);

    upmap = vm_map_pmap(&bp->b_proc->p_vmspace->vm_map);
    do {
        if (pmap_extract(upmap, uva, &pa) == FALSE)
            panic("vmapbuf: null page frame");
        /* Now map the page into kernel space. */
        pmap_kenter_pa(kva, pa, VM_PROT_READ | VM_PROT_WRITE, 0);

        uva += PAGE_SIZE;
        kva += PAGE_SIZE;
        len -= PAGE_SIZE;
    } while (len);
    pmap_update(pmap_kernel());

    return 0;
}
Esempio n. 4
0
/*
 * Map an IO request into kernel virtual address space.
 */
void
vmapbuf(struct buf *bp, vsize_t len)
{
	vaddr_t faddr, taddr, off;
	paddr_t pa;
	
#ifdef	DIAGNOSTIC
	if (!(bp->b_flags & B_PHYS))
		panic("vmapbuf");
#endif
	faddr = trunc_page((vaddr_t)(bp->b_saveaddr = bp->b_data));
	off = (vaddr_t)bp->b_data - faddr;
	len = round_page(off + len);
	taddr = uvm_km_valloc_wait(phys_map, len);
	bp->b_data = (caddr_t)(taddr + off);
	for (; len > 0; len -= NBPG) {
		pmap_extract(vm_map_pmap(&bp->b_proc->p_vmspace->vm_map),
		    faddr, &pa);
		pmap_enter(vm_map_pmap(phys_map), taddr, pa,
		    VM_PROT_READ | VM_PROT_WRITE, PMAP_WIRED);
		faddr += NBPG;
		taddr += NBPG;
	}
	pmap_update(vm_map_pmap(phys_map));
}
Esempio n. 5
0
/*
 * Include or exclude pages in a sparse dump, by half-open virtual
 * address interval (which may wrap around the end of the space).
 */
static void
sparse_dump_mark(vaddr_t vbegin, vaddr_t vend, int includep)
{
	pmap_t pmap;
	paddr_t p;
	vaddr_t v;

	/*
	 * If a partial page is called for, the whole page must be included.
	 */
	if (includep) {
		vbegin = rounddown(vbegin, PAGE_SIZE);
		vend = roundup(vend, PAGE_SIZE);
	} else {
		vbegin = roundup(vbegin, PAGE_SIZE);
		vend = rounddown(vend, PAGE_SIZE);
	}

	pmap = pmap_kernel();
	for (v = vbegin; v != vend; v += PAGE_SIZE) {
		if (pmap_extract(pmap, v, &p)) {
			if (includep)
				setbit(sparse_dump_physmap, p/PAGE_SIZE);
			else
				clrbit(sparse_dump_physmap, p/PAGE_SIZE);
		}
	}
}
Esempio n. 6
0
/*
 * Add an entry to the IOMMU table.
 */
void
viommu_enter(struct iommu_state *is, struct strbuf_ctl *sb, bus_addr_t va,
    paddr_t pa, int flags)
{
	u_int64_t tsbid = IOTSBSLOT(va, is->is_tsbsize);
	paddr_t page_list[1], addr;
	u_int64_t attr, nmapped;
	int err;

	KASSERT(sb == NULL);

#ifdef DIAGNOSTIC
	if (va < is->is_dvmabase || (va + PAGE_MASK) > is->is_dvmaend)
		panic("viommu_enter: va %#lx not in DVMA space", va);
#endif

	attr = PCI_MAP_ATTR_READ | PCI_MAP_ATTR_WRITE;
	if (flags & BUS_DMA_READ)
		attr &= ~PCI_MAP_ATTR_READ;
	if (flags & BUS_DMA_WRITE)
		attr &= ~PCI_MAP_ATTR_WRITE;

	page_list[0] = trunc_page(pa);
	if (!pmap_extract(pmap_kernel(), (vaddr_t)page_list, &addr))
		panic("viommu_enter: pmap_extract failed");
	err = hv_pci_iommu_map(is->is_devhandle, tsbid, 1, attr,
	    addr, &nmapped);
	if (err != H_EOK || nmapped != 1)
		panic("hv_pci_iommu_map: err=%d", err);
}
Esempio n. 7
0
/*
 * Map an IO request into kernel virtual address space.
 */
void
vmapbuf(struct buf *bp, vsize_t len)
{
	struct pmap *pm = vm_map_pmap(&bp->b_proc->p_vmspace->vm_map);
	vaddr_t kva, uva;
	vsize_t size, off;

#ifdef DIAGNOSTIC
	if ((bp->b_flags & B_PHYS) == 0)
		panic("vmapbuf");
#endif
	bp->b_saveaddr = bp->b_data;
	uva = trunc_page((vaddr_t)bp->b_data);
	off = (vaddr_t)bp->b_data - uva;
	size = round_page(off + len);

	kva = uvm_km_valloc_prefer_wait(phys_map, size, uva);
	bp->b_data = (caddr_t)(kva + off);
	while (size > 0) {
		paddr_t pa;

		if (pmap_extract(pm, uva, &pa) == FALSE)
			panic("vmapbuf: null page frame");
		else
			pmap_kenter_pa(kva, pa, UVM_PROT_RW);
		uva += PAGE_SIZE;
		kva += PAGE_SIZE;
		size -= PAGE_SIZE;
	}
	pmap_update(pmap_kernel());
}
Esempio n. 8
0
paddr_t
videommap(dev_t dev, off_t off, int prot)
{
    struct video_softc *sc;
    int unit;
    caddr_t p;
    paddr_t pa;

    DPRINTF(("%s: off=%d, prot=%d\n", __func__, off, prot));

    unit = VIDEOUNIT(dev);
    if (unit >= video_cd.cd_ndevs ||
            (sc = video_cd.cd_devs[unit]) == NULL)
        return (-1);

    if (sc->sc_dying)
        return (-1);

    if (sc->hw_if->mappage == NULL)
        return (-1);

    p = sc->hw_if->mappage(sc->hw_hdl, off, prot);
    if (p == NULL)
        return (-1);
    if (pmap_extract(pmap_kernel(), (vaddr_t)p, &pa) == FALSE)
        panic("videommap: invalid page");
    sc->sc_vidmode = VIDMODE_MMAP;

    return (pa);
}
Esempio n. 9
0
/*
 * Map a user I/O request into kernel virtual address space.
 * Note: the pages are already locked by uvm_vslock(), so we
 * do not need to pass an access_type to pmap_enter().   
 */
void
vmapbuf(struct buf *bp, vsize_t len)
{
	vaddr_t faddr, taddr, off;
	paddr_t fpa;

	if ((bp->b_flags & B_PHYS) == 0)
		panic("vmapbuf");
	faddr = trunc_page((vaddr_t)(bp->b_saveaddr = bp->b_data));
	off = (vaddr_t)bp->b_data - faddr;
	len = round_page(off + len);
	taddr= uvm_km_valloc_wait(phys_map, len);
	bp->b_data = (caddr_t)(taddr + off);
	/*
	 * The region is locked, so we expect that pmap_pte() will return
	 * non-NULL.
	 * XXX: unwise to expect this in a multithreaded environment.
	 * anything can happen to a pmap between the time we lock a 
	 * region, release the pmap lock, and then relock it for
	 * the pmap_extract().
	 *
	 * no need to flush TLB since we expect nothing to be mapped
	 * where we we just allocated (TLB will be flushed when our
	 * mapping is removed).
	 */
	while (len) {
		(void) pmap_extract(vm_map_pmap(&bp->b_proc->p_vmspace->vm_map),
		    faddr, &fpa);
		pmap_kenter_pa(taddr, fpa, PROT_READ | PROT_WRITE);
		faddr += PAGE_SIZE;
		taddr += PAGE_SIZE;
		len -= PAGE_SIZE;
	}
}
Esempio n. 10
0
void
obio_iomem_unmap(void *v, bus_space_handle_t bsh, bus_size_t size)
{
	u_long va, endva;
	bus_addr_t bpa;

	if (bsh >= SH3_P2SEG_BASE && bsh <= SH3_P2SEG_END) {
		/* maybe CS0,1,2,3,4,7 */
		return;
	}

	/* CS5,6 */
	va = trunc_page(bsh);
	endva = round_page(bsh + size);

#ifdef DIAGNOSTIC
	if (endva <= va)
		panic("obio_io_unmap: overflow");
#endif

	pmap_extract(pmap_kernel(), va, &bpa);
	bpa += bsh & PGOFSET;

	pmap_kremove(va, endva - va);

	/*
	 * Free the kernel virtual mapping.
	 */
	uvm_km_free(kernel_map, va, endva - va);
}
Esempio n. 11
0
/*
 * Map a user I/O request into kernel virtual address space.
 */
int
vmapbuf(struct buf *bp, vsize_t len)
{
	vaddr_t kva;	/* Kernel VA (new to) */

	if ((bp->b_flags & B_PHYS) == 0)
		panic("vmapbuf");

	vaddr_t uva = mips_trunc_page(bp->b_data);
	const vaddr_t off = (vaddr_t)bp->b_data - uva;
        len = mips_round_page(off + len);

	kva = uvm_km_alloc(phys_map, len, atop(uva) & uvmexp.colormask,
	    UVM_KMF_VAONLY | UVM_KMF_WAITVA | UVM_KMF_COLORMATCH);
	KASSERT((atop(kva ^ uva) & uvmexp.colormask) == 0);
	bp->b_saveaddr = bp->b_data;
	bp->b_data = (void *)(kva + off);
	struct pmap * const upmap = vm_map_pmap(&bp->b_proc->p_vmspace->vm_map);
	do {
		paddr_t pa;	/* physical address */
		if (pmap_extract(upmap, uva, &pa) == false)
			panic("vmapbuf: null page frame");
		pmap_kenter_pa(kva, pa, VM_PROT_READ | VM_PROT_WRITE,
		    PMAP_WIRED);
		uva += PAGE_SIZE;
		kva += PAGE_SIZE;
		len -= PAGE_SIZE;
	} while (len);
	pmap_update(pmap_kernel());

	return 0;
}
Esempio n. 12
0
/*
 *	vm_fault_unwire:
 *
 *	Unwire a range of virtual addresses in a map.
 */
void
vm_fault_unwire(vm_map_t map, vm_offset_t start, vm_offset_t end,
    boolean_t fictitious)
{
	vm_paddr_t pa;
	vm_offset_t va;
	vm_page_t m;
	pmap_t pmap;

	pmap = vm_map_pmap(map);

	/*
	 * Since the pages are wired down, we must be able to get their
	 * mappings from the physical map system.
	 */
	for (va = start; va < end; va += PAGE_SIZE) {
		pa = pmap_extract(pmap, va);
		if (pa != 0) {
			pmap_change_wiring(pmap, va, FALSE);
			if (!fictitious) {
				m = PHYS_TO_VM_PAGE(pa);
				vm_page_lock(m);
				vm_page_unwire(m, TRUE);
				vm_page_unlock(m);
			}
		}
	}
}
/* This code was originally stolen from the alpha port. */
int
vmapbuf(struct buf *bp, vsize_t len)
{
	vaddr_t faddr, taddr, off;
	paddr_t pa;
	struct proc *p;
	vm_prot_t prot;

	if ((bp->b_flags & B_PHYS) == 0)
		panic("vmapbuf");
	p = bp->b_proc;
	bp->b_saveaddr = bp->b_data;
	faddr = trunc_page((vaddr_t)bp->b_data);
	off = (vaddr_t)bp->b_data - faddr;
	len = round_page(off + len);
	taddr = uvm_km_alloc(phys_map, len, 0, UVM_KMF_VAONLY | UVM_KMF_WAITVA);
	bp->b_data = (void *)(taddr + off);
	len = atop(len);
	prot = bp->b_flags & B_READ ? VM_PROT_READ | VM_PROT_WRITE :
				      VM_PROT_READ;
	while (len--) {
		if (pmap_extract(vm_map_pmap(&p->p_vmspace->vm_map), faddr,
		    &pa) == false)
			panic("vmapbuf: null page frame");
		pmap_enter(vm_map_pmap(phys_map), taddr, trunc_page(pa),
		    prot, prot | PMAP_WIRED);
		faddr += PAGE_SIZE;
		taddr += PAGE_SIZE;
	}
	pmap_update(vm_map_pmap(phys_map));

	return 0;
}
int
mappedcopyout(void *f, void *t, size_t count)
{
	void *fromp = f, *top = t;
	vaddr_t kva;
	paddr_t upa;
	size_t len;
	int off, alignable;
	pmap_t upmap;
#define CADDR2 caddr1

#ifdef DEBUG
	if (mappedcopydebug & MDB_COPYOUT)
		printf("mappedcopyout(%p, %p, %lu), pid %d\n",
		    fromp, top, (u_long)count, curproc->p_pid);
	mappedcopyoutcount++;
#endif

	if (CADDR2 == 0)
		CADDR2 = (void *) uvm_km_alloc(kernel_map, PAGE_SIZE, 0,
		    UVM_KMF_VAONLY);

	kva = (vaddr_t) CADDR2;
	off = (int)((u_long)top & PAGE_MASK);
	alignable = (off == ((u_long)fromp & PAGE_MASK));
	upmap = vm_map_pmap(&curproc->p_vmspace->vm_map);
	while (count > 0) {
		/*
		 * First access of a page, use subyte to make sure
		 * page is faulted in and write access allowed.
		 */
		if (subyte(top, *((char *)fromp)) == -1)
			return EFAULT;
		/*
		 * Map in the page and memcpy data out to it
		 */
		if (pmap_extract(upmap, trunc_page((vaddr_t)top), &upa)
		    == false)
			panic("mappedcopyout: null page frame");
		len = min(count, (PAGE_SIZE - off));
		pmap_enter(pmap_kernel(), kva, upa,
		    VM_PROT_READ|VM_PROT_WRITE,
		    VM_PROT_READ|VM_PROT_WRITE|PMAP_WIRED);
		pmap_update(pmap_kernel());
		if (len == PAGE_SIZE && alignable && off == 0)
			copypage(fromp, (void *)kva);
		else
			memcpy((void *)(kva + off), fromp, len);
		fromp += len;
		top += len;
		count -= len;
		off = 0;
	}
	pmap_remove(pmap_kernel(), kva, kva + PAGE_SIZE);
	pmap_update(pmap_kernel());
	return 0;
#undef CADDR2
}
Esempio n. 15
0
void RealView_framebuffer_init(void)
{
    gRealviewPl111Base = ml_io_map(REALVIEW_PL111_BASE, PAGE_SIZE);

    /*
     * The hardware demands a framebuffer, but the framebuffer has to be given
     * in a hardware address.
     */
    void *framebuffer = pmap_steal_memory(1024 * 768 * 4);
    void *framebuffer_phys = pmap_extract(kernel_pmap, framebuffer);

    uint32_t depth = 2;
    uint32_t width = 1024;
    uint32_t height = 768;

    uint32_t pitch = (width * depth);
    uint32_t fb_length = (pitch * width);

    uint32_t timingRegister, controlRegister;

    /*
     * Set framebuffer address 
     */
    HARDWARE_REGISTER(gRealviewPl111Base + PL111_UPPER_FB) = framebuffer_phys;
    HARDWARE_REGISTER(gRealviewPl111Base + PL111_LOWER_FB) = framebuffer_phys;

    /*
     * Initialize timings to 1024x768x16 
     */
    HARDWARE_REGISTER(gRealviewPl111Base + PL111_TIMINGS_0) = LCDTIMING0_PPL(width);
    HARDWARE_REGISTER(gRealviewPl111Base + PL111_TIMINGS_1) = LCDTIMING1_LPP(height);

    /*
     * Enable the TFT/LCD Display 
     */
    HARDWARE_REGISTER(gRealviewPl111Base + PL111_CONTROL) = LCDCONTROL_LCDEN | LCDCONTROL_LCDTFT | LCDCONTROL_LCDPWR | LCDCONTROL_LCDBPP(5);

    PE_state.video.v_baseAddr = (unsigned long) framebuffer_phys;
    PE_state.video.v_rowBytes = width * 4;
    PE_state.video.v_width = width;
    PE_state.video.v_height = height;
    PE_state.video.v_depth = 4 * (8);   // 16bpp

    kprintf(KPRINTF_PREFIX "framebuffer initialized\n");
    bzero(framebuffer, (pitch * height));

    char tempbuf[16];
    
	if (PE_parse_boot_argn("-graphics-mode", tempbuf, sizeof(tempbuf))) {
        /*
         * BootX like framebuffer. 
         */
        memset(framebuffer, 0xb9, PE_state.video.v_rowBytes * PE_state.video.v_height);
        initialize_screen((void *) &PE_state.video, kPEGraphicsMode);
    } else {
		initialize_screen((void *) &PE_state.video, kPETextMode);
	}
}
Esempio n. 16
0
/*
 * Create writeable aliases of memory we need
 * to write to as kernel is mapped read-only
 */
void *codepatch_maprw(vaddr_t *nva, vaddr_t dest)
{
	paddr_t kva = trunc_page((paddr_t)dest);
	paddr_t po = (paddr_t)dest & PAGE_MASK;
	paddr_t pa1, pa2;

	if (*nva == 0)
		*nva = (vaddr_t)km_alloc(2 * PAGE_SIZE, &kv_any, &kp_none,
					&kd_waitok);

	pmap_extract(pmap_kernel(), kva, &pa1);
	pmap_extract(pmap_kernel(), kva + PAGE_SIZE, &pa2);
	pmap_kenter_pa(*nva, pa1, PROT_READ | PROT_WRITE);
	pmap_kenter_pa(*nva + PAGE_SIZE, pa2, PROT_READ | PROT_WRITE);
	pmap_update(pmap_kernel());

	return (void *)(*nva + po);
}
Esempio n. 17
0
/*
 * Convert kernel VA to physical address
 */
int
kvtop(caddr_t addr)
{
    paddr_t pa;

    if (pmap_extract(pmap_kernel(), (vaddr_t)addr, &pa) == FALSE)
        panic("kvtop: zero page frame");
    return((int)pa);
}
Esempio n. 18
0
static vm_offset_t
boot_map(
	void *		data,	/* private data */
	vm_offset_t	offset)	/* offset to map */
{
	vm_offset_t	start_offset = (vm_offset_t) data;

	return pmap_extract(kernel_pmap, start_offset + offset);
}
Esempio n. 19
0
static paddr_t
getphysmem(u_long size)
{
	struct	memarr *pmemarr;	/* physical memory regions */
	int	npmemarr;		/* number of entries in pmemarr */
	struct memarr *mp;
	int i;
	extern char start[];	/* top of stack (see srt0.S) */

	/*
	 * Find the physical memory area that's in use by the boot loader.
	 * Our stack grows down from label `start'; assume we need no more
	 * than 16K of stack space.
	 * The top of the boot loader is the next 4MB boundary.
	 */
	if (pmap_extract((vaddr_t)start - (16*1024), &bstart) != 0)
		return ((paddr_t)-1);

	bend = roundup(bstart, 0x400000);

	/*
	 * Get available physical memory from the prom.
	 */
	npmemarr = prom_makememarr(NULL, 0, MEMARR_AVAILPHYS);
	pmemarr = alloc(npmemarr*sizeof(struct memarr));
	if (pmemarr == NULL)
		return ((paddr_t)-1);
	npmemarr = prom_makememarr(pmemarr, npmemarr, MEMARR_AVAILPHYS);

	/*
	 * Find a suitable loading address.
	 */
	for (mp = pmemarr, i = npmemarr; --i >= 0; mp++) {
		paddr_t pa = (paddr_t)pmemarr[i].addr;
		u_long len = (u_long)pmemarr[i].len;

		/* Check whether it will fit in front of us */
		if (pa < bstart && len >= size && (bstart - pa) >= size)
			return (pa);

		/* Skip the boot program memory */
		if (pa < bend) {
			if (len < bend - pa)
				/* Not large enough */
				continue;

			/* Shrink this segment */
			len -=  bend - pa;
			pa = bend;
		}

		/* Does it fit in the remainder of this segment? */
		if (len >= size)
			return (pa);
	}
	return ((paddr_t)-1);
}
/*
 * Convert kernel VA to physical address
 */
int
kvtop(void *addr)
{
	paddr_t pa;

	if (pmap_extract(pmap_kernel(), (vaddr_t)addr, &pa) == false)
		panic("kvtop: zero page frame");
	return (int)pa;
}
Esempio n. 21
0
File: ldc.c Progetto: ryo/netbsd-src
void
ldc_reset(struct ldc_conn *lc)
{
	int err;
	vaddr_t va;
	paddr_t pa;

	DPRINTF(("Resetting connection\n"));

	mutex_enter(&lc->lc_txq->lq_mtx);

#if OPENBSD_BUSDMA
	err = hv_ldc_tx_qconf(lc->lc_id,
	    lc->lc_txq->lq_map->dm_segs[0].ds_addr, lc->lc_txq->lq_nentries);
#else
        va = lc->lc_txq->lq_va;
	pa = 0;
	if (pmap_extract(pmap_kernel(), va, &pa) == FALSE)
	  panic("pmap_extract failed %lx\n", va);
	err = hv_ldc_tx_qconf(lc->lc_id, pa, lc->lc_txq->lq_nentries);
#endif
	if (err != H_EOK)
		printf("%s: hv_ldc_tx_qconf %d\n", __func__, err);

#if OPENBSD_BUSDMA
	err = hv_ldc_rx_qconf(lc->lc_id,
	    lc->lc_rxq->lq_map->dm_segs[0].ds_addr, lc->lc_rxq->lq_nentries);
#else
        va = lc->lc_rxq->lq_va;
	pa = 0;
	if (pmap_extract(pmap_kernel(), va, &pa) == FALSE)
	  panic("pmap_extract failed %lx\n", va);
	err = hv_ldc_tx_qconf(lc->lc_id, pa, lc->lc_rxq->lq_nentries);
#endif
	if (err != H_EOK)
		printf("%s: hv_ldc_rx_qconf %d\n", __func__, err);

	lc->lc_tx_seqid = 0;
	lc->lc_state = 0;
	lc->lc_tx_state = lc->lc_rx_state = LDC_CHANNEL_DOWN;
	mutex_exit(&lc->lc_txq->lq_mtx);

	lc->lc_reset(lc);
}
Esempio n. 22
0
/*
 * Return 0 if all pages in the passed buffer lie within the DMA'able
 * range RAM.
 */
int
_isa_dma_check_buffer(void *buf, buf_size_t buflen, int segcnt,
		buf_size_t boundary, struct proc *p)
{
	vaddr_t vaddr = (vaddr_t)buf;
	vaddr_t endva;
	paddr_t pa, lastpa;
	u_long pagemask = ~(boundary - 1);
	pmap_t pmap;
	int nsegs;

	endva = round_page(vaddr + buflen);

	nsegs = 1;
	lastpa = 0;

	if (p != NULL)
		pmap = p->p_vmspace->vm_map.pmap;
	else
		pmap = pmap_kernel();

	for (; vaddr < endva; vaddr += NBPG) {
		/*
		 * Get physical address for this segment.
		 */
		pmap_extract(pmap, (vaddr_t)vaddr, &pa);
		pa = trunc_page(pa);

		/*
		 * Is it below the DMA'able threshold?
		 */
		if (pa > ISA_DMA_BOUNCE_THRESHOLD)
			return (EINVAL);

		if (lastpa) {
			/*
			 * Check excessive segment count.
			 */
			if (lastpa + NBPG != pa) {
				if (++nsegs > segcnt)
					return (EFBIG);
			}

			/*
			 * Check boundary restriction.
			 */
			if (boundary) {
				if ((lastpa ^ pa) & pagemask)
					return (EINVAL);
			}
		}
		lastpa = pa;
	}

	return (0);
}
/*
 * Convert kernel VA to physical address
 */
paddr_t
kvtop(void *addr)
{
	paddr_t pa;
	bool ret;

	ret = pmap_extract(pmap_kernel(), (vaddr_t)addr, &pa);
	KASSERT(ret == true);
	return pa;
}
static void
vidcvideo_getdevconfig(vaddr_t dense_addr, u_int mem_size,
		struct fb_devconfig *dc)
{

	dc->dc_vaddr = dense_addr;
	(void) pmap_extract(pmap_kernel(), dc->dc_vaddr, &(dc->dc_paddr));

	vidcvideo_getmode(&dc->mode_info);

	dc->dc_width = dc->mode_info.timings.hdisplay;
	dc->dc_height = dc->mode_info.timings.vdisplay;
	dc->dc_log2_depth = dc->mode_info.log2_bpp;
	dc->dc_depth = 1 << dc->dc_log2_depth;
	dc->dc_videobase = dc->dc_vaddr;
	dc->dc_blanked = 0;

	/* this should/could be done somewhat more elegant! */
	switch (dc->dc_depth) {
		case 1:
			dc->dc_rowbytes = dc->dc_width / 8;
			break;
		case 2:
			dc->dc_rowbytes = dc->dc_width / 4;
			break;
		case 4:
			dc->dc_rowbytes = dc->dc_width / 2;
			break;
		case 8:
			dc->dc_rowbytes = dc->dc_width;
			break;
		case 16:
			dc->dc_rowbytes = dc->dc_width * 2;
			break;
		case 32:
			dc->dc_rowbytes = dc->dc_width * 4;
			break;
		default:
			printf("Unknown colour depth %d ... what to do ?", dc->dc_depth);
			break;
	}

	/* setup the correct size */
	dc->dc_size = mem_size;

	/* initialize colormap and cursor resource */
	vidcvideo_colourmap_and_cursor_init(dc);

	/* blank the memory */
	memset((void*)dc->dc_vaddr, 0, dc->dc_size);

	/* intitialise miscelanious */
	dc->dc_writeback_delay = 0;
}
Esempio n. 25
0
/*
 * Get user stack from the thread.
 * This assumes the thread is unlocked, idle,
 * and 64-bit.
 */
static struct ksample_stack *
stack_capture_user(struct thread *thread)
{
        struct ksample_stack *retval = NULL;
        struct amd64_frame frame = { 0 };
        size_t depth = 0;
        static const size_t MAXDEPTH = 4096 / sizeof(vm_offset_t);
        caddr_t *pcs = NULL;
        int error = 0;
	pmap_t pmap = vmspace_pmap(thread->td_proc->p_vmspace);
	
        frame.f_frame = (void*)thread->td_frame->tf_rbp;
        pcs = malloc(sizeof(*pcs) * MAXDEPTH, M_TEMP, M_WAITOK | M_ZERO);
        pcs[depth++] = (caddr_t)thread->td_frame->tf_rip;

//      printf("%s(%d):  frame.f_frame = %x\n", __FUNCTION__, __LINE__, (unsigned int)frame.f_frame);

        while (frame.f_frame && depth < MAXDEPTH) {
                struct iovec iov;
                struct uio uio;

                iov.iov_base = (caddr_t)&frame;
                iov.iov_len = sizeof(frame);
                uio.uio_iov = &iov;
                uio.uio_iovcnt = 1;
                uio.uio_offset = (off_t)(uintptr_t)frame.f_frame;
                uio.uio_resid = sizeof(frame);
                uio.uio_segflg = UIO_SYSSPACE;
                uio.uio_rw = UIO_READ;
                uio.uio_td = curthread;
		
		// If it's not mapped in, just stop
		if (pmap_extract(pmap, (vm_offset_t)frame.f_frame) == 0) {
			break;
		}
                error = proc_rwmem(thread->td_proc, &uio);
                if (error) {
//			printf("%s(%d):  error = %d\n", __FUNCTION__, __LINE__, error);
			break;
                }
                pcs[depth++] = (caddr_t)frame.f_retaddr;
//              printf("%s(%d):  frame.f_frame = %x\n", __FUNCTION__, __LINE__, (unsigned int)frame.f_frame);
        }
//      printf("%s(%d):  depth = %u\n", __FUNCTION__, __LINE__, (unsigned int)depth);
        retval = malloc(sizeof(struct ksample_stack) + depth * sizeof(caddr_t), M_TEMP, M_WAITOK);
        if (retval) {
                retval->depth = depth;
                bcopy(pcs, retval->pcs, depth * sizeof(pcs[0]));
        }
//	printf("%s(%d)\n", __FUNCTION__, __LINE__);
        free(pcs, M_TEMP);
        return retval;
}
Esempio n. 26
0
/*
 * Move pages from one kernel virtual address to another.
 * Both addresses are assumed to reside in the Sysmap.
 */
void
pagemove(caddr_t from, caddr_t to, size_t size)
{
	paddr_t pa;
	boolean_t rv;

	KASSERT(((vaddr_t)from & PGOFSET) == 0);
	KASSERT(((vaddr_t)to & PGOFSET) == 0);
	KASSERT((size & PGOFSET) == 0);
	while (size > 0) {
		rv = pmap_extract(pmap_kernel(), (vaddr_t)from, &pa);
		KASSERT(rv);
		KASSERT(!pmap_extract(pmap_kernel(), (vaddr_t)to, NULL));
		pmap_kremove((vaddr_t)from, PAGE_SIZE);
		pmap_kenter_pa((vaddr_t)to, pa,
			       VM_PROT_READ|VM_PROT_WRITE);
		from += PAGE_SIZE;
		to += PAGE_SIZE;
		size -= PAGE_SIZE;
	}
}
Esempio n. 27
0
void
vme_a32_unmap(bus_space_tag_t tag, bus_space_handle_t handle, bus_size_t size)
{
	struct vme_softc *sc = (void *)vme_cd.cd_devs[0];
	vaddr_t va = (vaddr_t)handle;
	paddr_t pa;

	if (pmap_extract(pmap_kernel(), va, &pa) == FALSE)
		return;

	return vme_unmap(sc, sc->sc_ext_a32, VME_A32, va, pa, size);
}
Esempio n. 28
0
ACPI_STATUS
acpi_md_OsGetPhysicalAddress(void *LogicalAddress,
    ACPI_PHYSICAL_ADDRESS *PhysicalAddress)
{
	paddr_t pa;

	if (pmap_extract(pmap_kernel(), (vaddr_t) LogicalAddress, &pa)) {
		*PhysicalAddress = pa;
		return (AE_OK);
	}

	return (AE_ERROR);
}
Esempio n. 29
0
void
kvm86_init()
{
	size_t vmdsize;
	char *buf;
	struct kvm86_data *vmd;
	struct pcb *pcb;
	paddr_t pa;
	int i;

	vmdsize = round_page(sizeof(struct kvm86_data)) + PAGE_SIZE;

	if ((buf = km_alloc(vmdsize, &kv_any, &kp_zero, &kd_waitok)) == NULL)
		return;
	
	/* first page is stack */
	vmd = (struct kvm86_data *)(buf + PAGE_SIZE);
	pcb = &vmd->pcb;

	/*
	 * derive pcb and TSS from proc0
	 * we want to access all IO ports, so we need a full-size
	 *  permission bitmap
	 * XXX do we really need the pcb or just the TSS?
	 */
	memcpy(pcb, &proc0.p_addr->u_pcb, sizeof(struct pcb));
	pcb->pcb_tss.tss_esp0 = (int)vmd;
	pcb->pcb_tss.tss_ss0 = GSEL(GDATA_SEL, SEL_KPL);
	for (i = 0; i < sizeof(vmd->iomap) / 4; i++)
		vmd->iomap[i] = 0;
	pcb->pcb_tss.tss_ioopt =
	    ((caddr_t)vmd->iomap - (caddr_t)&pcb->pcb_tss) << 16;

	/* setup TSS descriptor (including our iomap) */
	setsegment(&vmd->sd, &pcb->pcb_tss,
	    sizeof(struct pcb) + sizeof(vmd->iomap) - 1,
	    SDT_SYS386TSS, SEL_KPL, 0, 0);

	/* prepare VM for BIOS calls */
	kvm86_mapbios(vmd);
	if ((bioscallscratchpage = km_alloc(PAGE_SIZE, &kv_any, &kp_dirty,
	    &kd_waitok)) == NULL)
		return;

	pmap_extract(pmap_kernel(), (vaddr_t)bioscallscratchpage, &pa);
	kvm86_map(vmd, pa, BIOSCALLSCRATCHPAGE_VMVA);
	bioscallvmd = vmd;
	bioscalltmpva = (vaddr_t)km_alloc(PAGE_SIZE, &kv_any, &kp_none,
	    &kd_waitok);
	mtx_init(&kvm86_mp_mutex, IPL_IPI);
}
Esempio n. 30
0
int
vsunlock(
	user_addr_t addr,
	user_size_t len,
	__unused int dirtied)
{
#if FIXME  /* [ */
	pmap_t		pmap;
	vm_page_t	pg;
	vm_map_offset_t	vaddr;
	ppnum_t		paddr;
#endif  /* FIXME ] */
	kern_return_t	kret;
	vm_map_t	map;

	map = current_map();

#if FIXME  /* [ */
	if (dirtied) {
		pmap = get_task_pmap(current_task());
		for (vaddr = vm_map_trunc_page(addr, PAGE_MASK);
		     vaddr < vm_map_round_page(addr+len, PAGE_MASK);
		     vaddr += PAGE_SIZE) {
			paddr = pmap_extract(pmap, vaddr);
			pg = PHYS_TO_VM_PAGE(paddr);
			vm_page_set_modified(pg);
		}
	}
#endif  /* FIXME ] */
#ifdef	lint
	dirtied++;
#endif	/* lint */
	kret = vm_map_unwire(map,
			     vm_map_trunc_page(addr,
					       vm_map_page_mask(map)),
			     vm_map_round_page(addr+len,
					       vm_map_page_mask(map)),
			     FALSE);
	switch (kret) {
	case KERN_SUCCESS:
		return (0);
	case KERN_INVALID_ADDRESS:
	case KERN_NO_SPACE:
		return (ENOMEM);
	case KERN_PROTECTION_FAILURE:
		return (EACCES);
	default:
		return (EINVAL);
	}
}