Example #1
0
void handle_tlb_miss(struct cpu_regs *regs)
{
	uint32_t esr, dear;
	pgd_t pgd = mmu_get_current_pgd();

	esr = mfspr(SPR_ESR);
	dear = mfspr(SPR_DEAR);

	/* 
	 * Search on current page global directory
	 */
	if (pte_present(pgd, dear)) {

		/* get page table entry from given fault address */
		pte_t pte = vtopte(pgd, dear);

		if (page_present(pte, dear)) {
			mmu_replace_tlb_entry(dear,
					      (paddr_t)ptetopg(pte, dear),
					      pte);
			return;
		}
	}
	panic("Access denied\n");
}
Example #2
0
munmap()
{
#ifdef notdef
	register struct a {
		caddr_t	addr;
		int	len;
	} *uap = (struct a *)u.u_ap;
	int off;
	int fv, lv;
	register struct pte *pte;

	if (((int)uap->addr & CLOFSET) || (uap->len & CLOFSET)) {
		u.u_error = EINVAL;
		return;
	}
	fv = btop(uap->addr);
	lv = btop(uap->addr + uap->len - 1);
	if (lv < fv || !isadsv(u.u_procp, fv) || !isadsv(u.u_procp, lv)) {
		u.u_error = EINVAL;
		return;
	}
	for (off = 0; off < uap->len; off += NBPG) {
		pte = vtopte(u.u_procp, fv);
		u.u_procp->p_rssize -= vmemfree(pte, 1);
		*(int *)pte = (PG_UW|PG_FOD);
		((struct fpte *)pte)->pg_fileno = PG_FZERO;
		fv++;
	}
	u.u_procp->p_flag |= SPTECHG;
#endif
}
Example #3
0
void
cpu_physwindow_init(int cpu)
{
	cpu_data_t		*cdp = cpu_data_ptr[cpu];
	cpu_desc_index_t	*cdi = &cdp->cpu_desc_index;
        vm_offset_t 		phys_window;

	if (vm_allocate(kernel_map, &phys_window,
			PAGE_SIZE, VM_FLAGS_ANYWHERE)
				!= KERN_SUCCESS)
	        panic("cpu_physwindow_init: couldn't allocate phys map window");

        /*
         * make sure the page that encompasses the
         * pte pointer we're interested in actually
         * exists in the page table
         */
	pmap_expand(kernel_pmap, phys_window);

	cdp->cpu_physwindow_base = phys_window;
	cdp->cpu_physwindow_ptep = vtopte(phys_window);

	cdi->cdi_gdt[sel_idx(PHYS_WINDOW_SEL)] = physwindow_desc_pattern;
	cdi->cdi_gdt[sel_idx(PHYS_WINDOW_SEL)].offset = phys_window;

	fix_desc(&cdi->cdi_gdt[sel_idx(PHYS_WINDOW_SEL)], 1);
}
Example #4
0
int
s3c2xx0_bs_map(void *t, bus_addr_t bpa, bus_size_t size,
               int flag, bus_space_handle_t * bshp)
{
    u_long startpa, endpa, pa;
    vm_offset_t va;
    pt_entry_t *pte;
    const struct pmap_devmap *pd;

    if ((pd = pmap_devmap_find_pa(bpa, size)) != NULL) {
        /* Device was statically mapped. */
        *bshp = pd->pd_va + (bpa - pd->pd_pa);
        return 0;
    }

    startpa = trunc_page(bpa);
    endpa = round_page(bpa + size);

    va = kmem_alloc_nofault(kernel_map, endpa - startpa);
    if (!va)
        return (ENOMEM);

    *bshp = (bus_space_handle_t) (va + (bpa - startpa));

    for (pa = startpa; pa < endpa; pa += PAGE_SIZE, va += PAGE_SIZE) {
        pmap_kenter(va, pa);
        pte = vtopte(va);
        if ((flag & BUS_SPACE_MAP_CACHEABLE) == 0)
            *pte &= ~L2_S_CACHE_MASK;
    }
    return (0);
}
Example #5
0
int
obio_bs_map(void *t, bus_addr_t bpa, bus_size_t size, int flags,
    bus_space_handle_t *bshp)
{
	const struct pmap_devmap *pd;
	paddr_t startpa, endpa, pa, offset;
	vaddr_t va;
	pt_entry_t *pte;

	if ((pd = pmap_devmap_find_pa(bpa, size)) != NULL) {
		/* Device was statically mapped. */
		*bshp = pd->pd_va + (bpa - pd->pd_pa);
		return (0);
	}

	endpa = round_page(bpa + size);
	offset = bpa & PAGE_MASK;
	startpa = trunc_page(bpa);
		
	va = uvm_km_valloc(kernel_map, endpa - startpa);
	if (va == 0)
		return ENOMEM;

	*bshp = va + offset;

	for (pa = startpa; pa < endpa; pa += PAGE_SIZE, va += PAGE_SIZE) {
		pmap_kenter_pa(va, pa, VM_PROT_READ | VM_PROT_WRITE);
		pte = vtopte(va);
		*pte &= ~L2_S_CACHE_MASK;
		PTE_SYNC(pte);
	}
	pmap_update(pmap_kernel());

	return (0);
}
Example #6
0
/*
 * Common function for mapping DMA-safe memory.  May be called by
 * bus-specific DMA memory map functions.
 */
int
_bus_dmamem_map(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs,
    size_t size, caddr_t *kvap, int flags)
{
	vaddr_t va, sva;
	size_t ssize;
	bus_addr_t addr;
	int curseg;
#ifdef DEBUG_DMA
	pt_entry_t *ptep;
#endif

#ifdef DEBUG_DMA
	printf("dmamem_map: t=%p segs=%p nsegs=%x size=%lx flags=%x\n", t,
	    segs, nsegs, (unsigned long)size, flags);
#endif	/* DEBUG_DMA */

	size = round_page(size);
	va = (vaddr_t)km_alloc(size, &kv_any, &kp_none, &kd_nowait);

	if (va == 0)
		return (ENOMEM);

	*kvap = (caddr_t)va;

	sva = va;
	ssize = size;
	for (curseg = 0; curseg < nsegs; curseg++) {
		for (addr = segs[curseg].ds_addr;
		    addr < (segs[curseg].ds_addr + segs[curseg].ds_len);
		    addr += PAGE_SIZE, va += PAGE_SIZE, size -= PAGE_SIZE) {
#ifdef DEBUG_DMA
			printf("wiring p%lx to v%lx", addr, va);
#endif	/* DEBUG_DMA */
			if (size == 0)
				panic("_bus_dmamem_map: size botch");
			pmap_kenter_cache(va, addr,
			    VM_PROT_READ | VM_PROT_WRITE,
			    !(flags & BUS_DMA_COHERENT));

#ifdef DEBUG_DMA
			ptep = vtopte(va);
			printf(" pte=v%p *pte=%x\n", ptep, *ptep);
#endif	/* DEBUG_DMA */
		}
	}
	pmap_update(pmap_kernel());
#ifdef DEBUG_DMA
	printf("dmamem_map: =%p\n", *kvap);
#endif	/* DEBUG_DMA */
	return (0);
}
Example #7
0
/*
 * For optimal cache cleaning we need two 16K banks of
 * virtual address space that NOTHING else will access
 * and then we alternate the cache cleaning between the
 * two banks.
 * The cache cleaning code requires requires 2 banks aligned
 * on total size boundry so the banks can be alternated by
 * eorring the size bit (assumes the bank size is a power of 2)
 */
void
ixdp_ixp12x0_cc_setup(void)
{
	int loop;
	paddr_t kaddr;
	pt_entry_t *pte;

	(void) pmap_extract(pmap_kernel(), KERNEL_TEXT_BASE, &kaddr);
	for (loop = 0; loop < CPU_IXP12X0_CACHE_CLEAN_SIZE; loop += PAGE_SIZE) {
                pte = vtopte(ixp12x0_cc_base + loop);
                *pte = L2_S_PROTO | kaddr |
                    L2_S_PROT(PTE_KERNEL, VM_PROT_READ) | pte_l2_s_cache_mode;
		PTE_SYNC(pte);
        }
	ixp12x0_cache_clean_addr = ixp12x0_cc_base;
	ixp12x0_cache_clean_size = CPU_IXP12X0_CACHE_CLEAN_SIZE / 2;
}
Example #8
0
int
i80321_bs_map(void *t, bus_addr_t bpa, bus_size_t size, int flag,
    bus_space_handle_t *bshp)
{
	const struct pmap_devmap *pd;
	paddr_t startpa, endpa, pa, pagecnt;
	vaddr_t va;
	pt_entry_t *pte;

	if ((pd = pmap_devmap_find_pa(bpa, size)) != NULL) {
		/* Device was statically mapped. */
		*bshp = pd->pd_va + (bpa - pd->pd_pa);
		return (0);
	}

#if 0
printf("i80321_bs_map bpa %x, size %x flag %x\n", bpa, size, flag);
#endif
	endpa = round_page(bpa + size);
	startpa = trunc_page(bpa);
	pagecnt = endpa - startpa;

	va = (vaddr_t)km_alloc(endpa - startpa, &kv_any, &kp_none, &kd_nowait);
	if (va == 0)
		return(ENOMEM);
#if 0
printf("i80321_bs_map va %x pa %x, endpa %x, sz %x\n", va, startpa,
    endpa, endpa-startpa);
#endif

	*bshp = (bus_space_handle_t)(va + (bpa - startpa));

	for (pa = startpa; pagecnt > 0;
	    pa += PAGE_SIZE, va += PAGE_SIZE, pagecnt -= PAGE_SIZE) {
		pmap_kenter_pa(va, pa, VM_PROT_READ | VM_PROT_WRITE);
		if ((flag & BUS_SPACE_MAP_CACHEABLE) == 0) {
			pte = vtopte(va);
			*pte &= ~L2_S_CACHE_MASK;
			PTE_SYNC(pte);
		}
	}
	pmap_update(pmap_kernel());

	return (0);
}
Example #9
0
/* mem bs */
int
ixp425_pci_mem_bs_map(void *t, bus_addr_t bpa, bus_size_t size,
	      int cacheable, bus_space_handle_t *bshp)
{
	const struct pmap_devmap	*pd;

	paddr_t		startpa;
	paddr_t		endpa;
	paddr_t		pa;
	paddr_t		offset;
	vaddr_t		va;
	pt_entry_t	*pte;

	if ((pd = pmap_devmap_find_pa(bpa, size)) != NULL) {
		/* Device was statically mapped. */
		*bshp = pd->pd_va + (bpa - pd->pd_pa);
		return 0;
	}

	endpa = round_page(bpa + size);
	offset = bpa & PAGE_MASK;
	startpa = trunc_page(bpa);

	/* Get some VM.  */
	va = uvm_km_alloc(kernel_map, endpa - startpa, 0,
	    UVM_KMF_VAONLY | UVM_KMF_NOWAIT);
	if (va == 0)
		return ENOMEM;

	/* Store the bus space handle */
	*bshp = va + offset;

	/* Now map the pages */
	for (pa = startpa; pa < endpa; pa += PAGE_SIZE, va += PAGE_SIZE) {
		pmap_kenter_pa(va, pa, VM_PROT_READ | VM_PROT_WRITE, 0);
		pte = vtopte(va);
		*pte &= ~L2_S_CACHE_MASK;
		PTE_SYNC(pte);
	}
	pmap_update(pmap_kernel());

	return(0);
}
Example #10
0
int
armv7_bs_map(void *t, bus_addr_t bpa, bus_size_t size,
	      int flag, bus_space_handle_t *bshp)
{
	u_long startpa, endpa, pa;
	vaddr_t va;
	pt_entry_t *pte;

	if ((u_long)bpa > (u_long)KERNEL_BASE) {
		/* Some IO registers (ex. UART ports for console)
		   are mapped to fixed address by board specific
		   routine. */
		*bshp = bpa;
		return(0);
	}

	startpa = trunc_page(bpa);
	endpa = round_page(bpa + size);

	/* XXX use extent manager to check duplicate mapping */

	va = uvm_km_valloc(kernel_map, endpa - startpa);
	if (! va)
		return(ENOMEM);

	*bshp = (bus_space_handle_t)(va + (bpa - startpa));

	for (pa = startpa; pa < endpa; pa += PAGE_SIZE, va += PAGE_SIZE) {
		pmap_kenter_pa(va, pa, VM_PROT_READ | VM_PROT_WRITE);
		if ((flag & BUS_SPACE_MAP_CACHEABLE) == 0) {
			pte = vtopte(va);
			*pte &= ~L2_S_CACHE_MASK;
			PTE_SYNC(pte);
			/* XXX: pmap_kenter_pa() also does PTE_SYNC(). a bit of
			 *      waste.
			 */
		}
	}
	pmap_update(pmap_kernel());

	return(0);
}
Example #11
0
/*
 * Determine if the memory at va..(va+len) is valid.
 */
int
kgdb_acc(vaddr_t va, size_t len)
{
	vaddr_t last_va;
	pt_entry_t *pte;

	last_va = va + len;
	va  &= ~PGOFSET;
	last_va &= ~PGOFSET;

	do {
		if (va < VM_MIN_KERNEL_ADDRESS)
			pte = vtopte(va);
		else
			pte = kvtopte(va);
		if ((*pte & PG_V) == 0)
			return (0);
		va += PAGE_SIZE;
	} while (va < last_va);

	return (1);
}
int
generic_bs_map(void *t, bus_addr_t bpa, bus_size_t size, int flags,
    bus_space_handle_t *bshp)
{
	const struct pmap_devmap *pd;
	vm_paddr_t startpa, endpa, pa, offset;
	vm_offset_t va;
	pt_entry_t *pte;

	if ((pd = pmap_devmap_find_pa(bpa, size)) != NULL) {
		/* Device was statically mapped. */
		*bshp = pd->pd_va + (bpa - pd->pd_pa);
		return (0);
	}

	endpa = round_page(bpa + size);
	offset = bpa & PAGE_MASK;
	startpa = trunc_page(bpa);

	va = kmem_alloc(kernel_map, endpa - startpa);
	if (va == 0)
		return (ENOMEM);

	*bshp = va + offset;

	for (pa = startpa; pa < endpa; pa += PAGE_SIZE, va += PAGE_SIZE) {
		pmap_kenter(va, pa);
		pte = vtopte(va);
		if (!(flags & BUS_SPACE_MAP_CACHEABLE)) {
			*pte &= ~L2_S_CACHE_MASK;
			PTE_SYNC(pte);
		}
	}

	return (0);
}
int
s3c2xx0_bs_map(void *t, bus_addr_t bpa, bus_size_t size,
	       int flag, bus_space_handle_t * bshp)
{
	u_long startpa, endpa, pa;
	vaddr_t va;
	pt_entry_t *pte;
	const struct pmap_devmap	*pd;

	if ((pd = pmap_devmap_find_pa(bpa, size)) != NULL) {
		/* Device was statically mapped. */
		*bshp = pd->pd_va + (bpa - pd->pd_pa);
		return 0;
	}
	startpa = trunc_page(bpa);
	endpa = round_page(bpa + size);

	/* XXX use extent manager to check duplicate mapping */

	va = uvm_km_alloc(kernel_map, endpa - startpa, 0,
	    UVM_KMF_VAONLY | UVM_KMF_NOWAIT);
	if (!va)
		return (ENOMEM);

	*bshp = (bus_space_handle_t) (va + (bpa - startpa));

	for (pa = startpa; pa < endpa; pa += PAGE_SIZE, va += PAGE_SIZE) {
		pmap_kenter_pa(va, pa, VM_PROT_READ | VM_PROT_WRITE, 0);
		pte = vtopte(va);
		if ((flag & BUS_SPACE_MAP_CACHEABLE) == 0)
			*pte &= ~L2_S_CACHE_MASK;
	}
	pmap_update(pmap_kernel());

	return (0);
}
Example #14
0
static int
dcons_drv_init(int stage)
{
#if defined(__i386__) || defined(__amd64__)
	quad_t addr, size;
#endif

	if (drv_init)
		return(drv_init);

	drv_init = -1;

	bzero(&dg, sizeof(dg));
	dcons_conf = &dg;
	dg.cdev = &dcons_consdev;
	dg.buf = NULL;
	dg.size = DCONS_BUF_SIZE;

#if defined(__i386__) || defined(__amd64__)
	if (getenv_quad("dcons.addr", &addr) > 0 &&
	    getenv_quad("dcons.size", &size) > 0) {
#ifdef __i386__
		vm_paddr_t pa;
		/*
		 * Allow read/write access to dcons buffer.
		 */
		for (pa = trunc_page(addr); pa < addr + size; pa += PAGE_SIZE)
			*vtopte(KERNBASE + pa) |= PG_RW;
		invltlb();
#endif
		/* XXX P to V */
		dg.buf = (struct dcons_buf *)(vm_offset_t)(KERNBASE + addr);
		dg.size = size;
		if (dcons_load_buffer(dg.buf, dg.size, sc) < 0)
			dg.buf = NULL;
	}
#endif
	if (dg.buf != NULL)
		goto ok;

#ifndef KLD_MODULE
	if (stage == 0) { /* XXX or cold */
		/*
		 * DCONS_FORCE_CONSOLE == 1 and statically linked.
		 * called from cninit(). can't use contigmalloc yet .
		 */
		dg.buf = (struct dcons_buf *) bssbuf;
		dcons_init(dg.buf, dg.size, sc);
	} else
#endif
	{
		/*
		 * DCONS_FORCE_CONSOLE == 0 or kernel module case.
		 * if the module is loaded after boot,
		 * bssbuf could be non-continuous.
		 */ 
		dg.buf = (struct dcons_buf *) contigmalloc(dg.size,
			M_DEVBUF, 0, 0x10000, 0xffffffff, PAGE_SIZE, 0ul);
		if (dg.buf == NULL)
			return (-1);
		dcons_init(dg.buf, dg.size, sc);
	}

ok:
	dcons_buf = dg.buf;

	drv_init = 1;

	return 0;
}
Example #15
0
File: esc.c Project: ryo/netbsd-src
void
escinitialize(struct esc_softc *dev)
{
	int		 i;

	dev->sc_led_status = 0;

	TAILQ_INIT(&dev->sc_xs_pending);
	TAILQ_INIT(&dev->sc_xs_free);

/*
 * Initialize the esc_pending structs and link them into the free list. We
 * have to set vm_link_data.pages to 0 or the vm FIX won't work.
 */
	for(i=0; i<MAXPENDING; i++) {
		TAILQ_INSERT_TAIL(&dev->sc_xs_free, &dev->sc_xs_store[i],
				  link);
	}

/*
 * Calculate the correct clock conversion factor 2 <= factor <= 8, i.e. set
 * the factor to clock_freq / 5 (int).
 */
	if (dev->sc_clock_freq <= 10)
		dev->sc_clock_conv_fact = 2;
	if (dev->sc_clock_freq <= 40)
		dev->sc_clock_conv_fact = 2+((dev->sc_clock_freq-10)/5);
	else
		panic("escinitialize: Clock frequence too high");

/* Setup and save the basic configuration registers */
	dev->sc_config1 = (dev->sc_host_id & ESC_CFG1_BUS_ID_MASK);
	dev->sc_config2 = ESC_CFG2_FEATURES_ENABLE;
	dev->sc_config3 = (dev->sc_clock_freq > 25 ? ESC_CFG3_FASTCLK : 0);

/* Precalculate timeout value and clock period. */
/* Ekkk ... floating point in the kernel !!!! */
/*	dev->sc_timeout_val  = 1+dev->sc_timeout*dev->sc_clock_freq/
				 (7.682*dev->sc_clock_conv_fact);*/
	dev->sc_timeout_val  = 1+dev->sc_timeout*dev->sc_clock_freq/
				 ((7682*dev->sc_clock_conv_fact)/1000);
	dev->sc_clock_period = 1000/dev->sc_clock_freq;

	escreset(dev, 1 | 2);	/* Reset Chip and Bus */

	dev->sc_units_disconnected = 0;
	dev->sc_msg_in_len = 0;
	dev->sc_msg_out_len = 0;

	dev->sc_flags = 0;

	for(i=0; i<8; i++)
		esc_init_nexus(dev, &dev->sc_nexus[i]);

/*
 * Setup bump buffer.
 */
	dev->sc_bump_va = (u_char *)uvm_km_alloc(kernel_map, dev->sc_bump_sz, 0,
	    UVM_KMF_WIRED | UVM_KMF_ZERO);
	(void) pmap_extract(pmap_kernel(), (vaddr_t)dev->sc_bump_va,
	    (paddr_t *)&dev->sc_bump_pa);

/*
 * Setup pages to noncachable, that way we don't have to flush the cache
 * every time we need "bumped" transfer.
 */
	pt_entry_t * const ptep = vtopte((vaddr_t) dev->sc_bump_va);
	const pt_entry_t opte = *ptep;
	const pt_entry_t npte = opte & ~L2_C;
	l2pte_set(ptep, npte, opte);
	PTE_SYNC(ptep);
	cpu_tlb_flushD();
	cpu_dcache_wbinv_range((vaddr_t)dev->sc_bump_va, PAGE_SIZE);

	printf(" dmabuf V0x%08x P0x%08x", (u_int)dev->sc_bump_va, (u_int)dev->sc_bump_pa);
}
Example #16
0
smmap()
{
#ifdef notdef
	struct a {
		caddr_t	addr;
		int	len;
		int	prot;
		int	share;
		int	fd;
		off_t	pos;
	} *uap = (struct a *)u.u_ap;
	register struct file *fp;
	register struct inode *ip;
	register struct fpte *pte;
	int off;
	int fv, lv, pm;
	dev_t dev;
	int (*mapfun)();
	extern struct file *getinode();

	fp = getinode(uap->fd);
	if (fp == NULL)
		return;
	ip = (struct inode *)fp->f_data;
	if ((ip->i_mode & IFMT) != IFCHR) {
		u.u_error = EINVAL;
		return;
	}
	dev = ip->i_rdev;
	mapfun = cdevsw[major(dev)].d_mmap;
	if (mapfun == NULL) {
		u.u_error = EINVAL;
		return;
	}
	if (((int)uap->addr & CLOFSET) || (uap->len & CLOFSET) ||
	    (uap->pos & CLOFSET)) {
		u.u_error = EINVAL;
		return;
	}
	if ((uap->prot & PROT_WRITE) && (fp->f_flag&FWRITE) == 0) {
		u.u_error = EINVAL;
		return;
	}
	if ((uap->prot & PROT_READ) && (fp->f_flag&FREAD) == 0) {
		u.u_error = EINVAL;
		return;
	}
	if (uap->share != MAP_SHARED) {
		u.u_error = EINVAL;
		return;
	}
	fv = btop(uap->addr);
	lv = btop(uap->addr + uap->len - 1);
	if (lv < fv || !isadsv(u.u_procp, fv) || !isadsv(u.u_procp, lv)) {
		u.u_error = EINVAL;
		return;
	}
	for (off=0; off<uap->len; off += NBPG) {
		if ((*mapfun)(dev, uap->pos+off, uap->prot) == -1) {
			u.u_error = EINVAL;
			return;
		}
	}
	if (uap->prot & PROT_WRITE)
		pm = PG_UW;
	else
		pm = PG_URKR;
	for (off = 0; off < uap->len; off += NBPG) {
		pte = (struct fpte *)vtopte(u.u_procp, fv);
		u.u_procp->p_rssize -= vmemfree(pte, 1);
		*(int *)pte = pm;
		pte->pg_v = 1;
		pte->pg_fod = 1;
		pte->pg_fileno = uap->fd;
		pte->pg_blkno = (*mapfun)(dev, uap->pos+off, uap->prot);
		fv++;
	}
	u.u_procp->p_flag |= SPTECHG;
	u.u_pofile[uap->fd] |= UF_MAPPED;
#endif
}
Example #17
0
int
i80321_mem_bs_map(void *t, bus_addr_t bpa, bus_size_t size, int flag,
    bus_space_handle_t *bshp)
{

	struct i80321_softc *sc = t;
	vaddr_t va;
	uint32_t busbase;
	paddr_t pa, endpa, physbase;
	pt_entry_t *pte;

#if 0
printf("i80321_bs_map bpa %x, size %x flag %x : %x %x \n", bpa, size, flag,
   sc->sc_owin[0].owin_xlate_lo,
   sc->sc_owin[0].owin_xlate_lo+ VERDE_OUT_XLATE_MEM_WIN_SIZE);
#endif

	if (bpa >= sc->sc_owin[0].owin_xlate_lo &&
	    bpa < (sc->sc_owin[0].owin_xlate_lo +
		   VERDE_OUT_XLATE_MEM_WIN_SIZE)) {
		busbase = sc->sc_iwin[1].iwin_xlate;
		physbase = sc->sc_owin[0].owin_xlate_lo;
	} else
		return (EINVAL);

	if ((bpa + size) >= ( sc->sc_owin[0].owin_xlate_lo +
	    VERDE_OUT_XLATE_MEM_WIN_SIZE))
		return (EINVAL);

	/*
	 * Found the window -- PCI MEM space is now mapped by allocating
	 * some kernel VA space and mapping the pages with pmap_enter().
	 * pmap_enter() will map unmanaged pages as non-cacheable.
	 */
	pa = trunc_page((bpa - busbase) + physbase);
	endpa = round_page(((bpa - busbase) + physbase) + size);

	va = (vaddr_t)km_alloc(endpa - pa, &kv_any, &kp_none, &kd_nowait);
	if (va == 0)
		return (ENOMEM);
//printf("i80321_mem_bs_map bpa %x pa %x va %x sz %x\n", bpa, pa, va, endpa-pa);

#if 0
printf("i80321_bs_map va %x pa %x, endpa %x, sz %x\n", va, pa,
    endpa, endpa-pa);
#endif

	*bshp = va + (bpa & PAGE_MASK);

	for (; pa < endpa; pa += PAGE_SIZE, va += PAGE_SIZE) {
		pmap_kenter_pa(va, pa, VM_PROT_READ | VM_PROT_WRITE);
		if ((flag & BUS_SPACE_MAP_CACHEABLE) == 0) {
			pte = vtopte(va);
			*pte &= ~L2_S_CACHE_MASK;
			PTE_SYNC(pte);
		}
	}
	pmap_update(pmap_kernel());

	return (0);
}
Example #18
0
/*
 * Common function for mapping DMA-safe memory.  May be called by
 * bus-specific DMA memory map functions.
 */
int
_bus_dmamem_map(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs,
    size_t size, caddr_t *kvap, int flags)
{
	vaddr_t va;
	bus_addr_t addr;
	int curseg;
	pt_entry_t *ptep/*, pte*/;

#ifdef DEBUG_DMA
	printf("dmamem_map: t=%p segs=%p nsegs=%x size=%lx flags=%x\n", t,
	    segs, nsegs, (unsigned long)size, flags);
#endif	/* DEBUG_DMA */

	size = round_page(size);
	va = uvm_km_valloc(kernel_map, size);

	if (va == 0)
		return (ENOMEM);

	*kvap = (caddr_t)va;

	for (curseg = 0; curseg < nsegs; curseg++) {
		for (addr = segs[curseg].ds_addr;
		    addr < (segs[curseg].ds_addr + segs[curseg].ds_len);
		    addr += PAGE_SIZE, va += PAGE_SIZE, size -= PAGE_SIZE) {
#ifdef DEBUG_DMA
			printf("wiring p%lx to v%lx", addr, va);
#endif	/* DEBUG_DMA */
			if (size == 0)
				panic("_bus_dmamem_map: size botch");
			pmap_enter(pmap_kernel(), va, addr,
			    VM_PROT_READ | VM_PROT_WRITE,
			    VM_PROT_READ | VM_PROT_WRITE | PMAP_WIRED);
			/*
			 * If the memory must remain coherent with the
			 * cache then we must make the memory uncacheable
			 * in order to maintain virtual cache coherency.
			 * We must also guarantee the cache does not already
			 * contain the virtual addresses we are making
			 * uncacheable.
			 */
			if (flags & BUS_DMA_COHERENT) {
				cpu_dcache_wbinv_range(va, PAGE_SIZE);
				cpu_drain_writebuf();
				ptep = vtopte(va);
				*ptep &= ~L2_S_CACHE_MASK;
				PTE_SYNC(ptep);
				tlb_flush();
			}
#ifdef DEBUG_DMA
			ptep = vtopte(va);
			printf(" pte=v%p *pte=%x\n", ptep, *ptep);
#endif	/* DEBUG_DMA */
		}
	}
	pmap_update(pmap_kernel());
#ifdef DEBUG_DMA
	printf("dmamem_map: =%p\n", *kvap);
#endif	/* DEBUG_DMA */
	return (0);
}
Example #19
0
/*
 * Write bytes to kernel address space for debugger.
 */
int
db_write_bytes(vm_offset_t addr, size_t size, char *data)
{
	jmp_buf jb;
	void *prev_jb;
	char *dst;
	pt_entry_t	*ptep0 = NULL;
	pt_entry_t	oldmap0 = 0;
	vm_offset_t	addr1;
	pt_entry_t	*ptep1 = NULL;
	pt_entry_t	oldmap1 = 0;
	int ret;

	prev_jb = kdb_jmpbuf(jb);
	ret = setjmp(jb);
	if (ret == 0) {
		if (addr > trunc_page((vm_offset_t)btext) - size &&
		    addr < round_page((vm_offset_t)etext)) {

			ptep0 = vtopte(addr);
			oldmap0 = *ptep0;
			*ptep0 |= PG_RW;

			/*
			 * Map another page if the data crosses a page
			 * boundary.
			 */
			if ((*ptep0 & PG_PS) == 0) {
				addr1 = trunc_page(addr + size - 1);
				if (trunc_page(addr) != addr1) {
					ptep1 = vtopte(addr1);
					oldmap1 = *ptep1;
					*ptep1 |= PG_RW;
				}
			} else {
				addr1 = trunc_2mpage(addr + size - 1);
				if (trunc_2mpage(addr) != addr1) {
					ptep1 = vtopte(addr1);
					oldmap1 = *ptep1;
					*ptep1 |= PG_RW;
				}
			}

			invltlb();
		}

		dst = (char *)addr;

		while (size-- > 0)
			*dst++ = *data++;
	}

	(void)kdb_jmpbuf(prev_jb);

	if (ptep0) {
		*ptep0 = oldmap0;

		if (ptep1)
			*ptep1 = oldmap1;

		invltlb();
	}

	return (ret);
}
Example #20
0
/*
 * Disassemble instruction at 'loc'.  'altfmt' specifies an
 * (optional) alternate format.  Return address of start of
 * next instruction.
 */
db_addr_t
db_disasm(
    db_addr_t	loc,
    bool	altfmt)
{
	int	inst;
	int	size;
	int	short_addr;
	const char *	seg;
	const struct inst *	ip;
	const char *	i_name;
	int	i_size;
	int	i_mode;
	int	regmodrm = 0;
	bool	first;
	int	displ;
	int	prefix;
	int	imm;
	int	imm2;
	int	len;
	struct i_addr	address;

#ifdef _KERNEL
	pt_entry_t *pte, *pde;

	/*
	 * Don't try to disassemble the location if the mapping is invalid.
	 * If we do, we'll fault, and end up debugging the debugger!
	 * in the case of largepages, "pte" is really the pde and "pde" is
	 * really the entry for the pdp itself.
	 */
	if ((vaddr_t)loc >= VM_MIN_KERNEL_ADDRESS)
		pte = kvtopte((vaddr_t)loc);
	else
		pte = vtopte((vaddr_t)loc);
	pde = vtopte((vaddr_t)pte);
	if ((*pde & PG_V) == 0 || (*pte & PG_V) == 0) {
		db_printf("invalid address\n");
		return (loc);
	}
#endif

	get_value_inc(inst, loc, 1, false);
	short_addr = false;
	size = LONG;
	seg = 0;

	/*
	 * Get prefixes
	 */
	prefix = true;
	do {
		switch (inst) {
		    case 0x66:		/* data16 */
			size = WORD;
			break;
		    case 0x67:
			short_addr = true;
			break;
		    case 0x26:
			seg = "%es";
			break;
		    case 0x36:
			seg = "%ss";
			break;
		    case 0x2e:
			seg = "%cs";
			break;
		    case 0x3e:
			seg = "%ds";
			break;
		    case 0x64:
			seg = "%fs";
			break;
		    case 0x65:
			seg = "%gs";
			break;
		    case 0xf0:
			db_printf("lock ");
			break;
		    case 0xf2:
			db_printf("repne ");
			break;
		    case 0xf3:
			db_printf("repe ");	/* XXX repe VS rep */
			break;
		    default:
			prefix = false;
			break;
		}
		if (prefix)
			get_value_inc(inst, loc, 1, false);
	} while (prefix);

	if (inst >= 0xd8 && inst <= 0xdf) {
		loc = db_disasm_esc(loc, inst, short_addr, size, seg);
		db_printf("\n");
		return (loc);
	}

	if (inst == 0x0f) {
		get_value_inc(inst, loc, 1, false);
		ip = db_inst_0f[inst>>4];
		if (ip == 0)
			ip = &db_bad_inst;
		else
			ip = &ip[inst&0xf];
	} else {