Example #1
0
int
obio_bs_map(void *t, bus_addr_t bpa, bus_size_t size, int flags,
    bus_space_handle_t *bshp)
{
	const struct pmap_devmap *pd;
	paddr_t startpa, endpa, pa, offset;
	vaddr_t va;
	pt_entry_t *pte;

	if ((pd = pmap_devmap_find_pa(bpa, size)) != NULL) {
		/* Device was statically mapped. */
		*bshp = pd->pd_va + (bpa - pd->pd_pa);
		return (0);
	}

	endpa = round_page(bpa + size);
	offset = bpa & PAGE_MASK;
	startpa = trunc_page(bpa);
		
	va = uvm_km_valloc(kernel_map, endpa - startpa);
	if (va == 0)
		return ENOMEM;

	*bshp = va + offset;

	for (pa = startpa; pa < endpa; pa += PAGE_SIZE, va += PAGE_SIZE) {
		pmap_kenter_pa(va, pa, VM_PROT_READ | VM_PROT_WRITE);
		pte = vtopte(va);
		*pte &= ~L2_S_CACHE_MASK;
		PTE_SYNC(pte);
	}
	pmap_update(pmap_kernel());

	return (0);
}
Example #2
0
/*
 * For optimal cache cleaning we need two 16K banks of
 * virtual address space that NOTHING else will access
 * and then we alternate the cache cleaning between the
 * two banks.
 * The cache cleaning code requires requires 2 banks aligned
 * on total size boundry so the banks can be alternated by
 * eorring the size bit (assumes the bank size is a power of 2)
 */
void
ixdp_ixp12x0_cc_setup(void)
{
	int loop;
	paddr_t kaddr;
	pt_entry_t *pte;

	(void) pmap_extract(pmap_kernel(), KERNEL_TEXT_BASE, &kaddr);
	for (loop = 0; loop < CPU_IXP12X0_CACHE_CLEAN_SIZE; loop += PAGE_SIZE) {
                pte = vtopte(ixp12x0_cc_base + loop);
                *pte = L2_S_PROTO | kaddr |
                    L2_S_PROT(PTE_KERNEL, VM_PROT_READ) | pte_l2_s_cache_mode;
		PTE_SYNC(pte);
        }
	ixp12x0_cache_clean_addr = ixp12x0_cc_base;
	ixp12x0_cache_clean_size = CPU_IXP12X0_CACHE_CLEAN_SIZE / 2;
}
Example #3
0
int
i80321_bs_map(void *t, bus_addr_t bpa, bus_size_t size, int flag,
    bus_space_handle_t *bshp)
{
	const struct pmap_devmap *pd;
	paddr_t startpa, endpa, pa, pagecnt;
	vaddr_t va;
	pt_entry_t *pte;

	if ((pd = pmap_devmap_find_pa(bpa, size)) != NULL) {
		/* Device was statically mapped. */
		*bshp = pd->pd_va + (bpa - pd->pd_pa);
		return (0);
	}

#if 0
printf("i80321_bs_map bpa %x, size %x flag %x\n", bpa, size, flag);
#endif
	endpa = round_page(bpa + size);
	startpa = trunc_page(bpa);
	pagecnt = endpa - startpa;

	va = (vaddr_t)km_alloc(endpa - startpa, &kv_any, &kp_none, &kd_nowait);
	if (va == 0)
		return(ENOMEM);
#if 0
printf("i80321_bs_map va %x pa %x, endpa %x, sz %x\n", va, startpa,
    endpa, endpa-startpa);
#endif

	*bshp = (bus_space_handle_t)(va + (bpa - startpa));

	for (pa = startpa; pagecnt > 0;
	    pa += PAGE_SIZE, va += PAGE_SIZE, pagecnt -= PAGE_SIZE) {
		pmap_kenter_pa(va, pa, VM_PROT_READ | VM_PROT_WRITE);
		if ((flag & BUS_SPACE_MAP_CACHEABLE) == 0) {
			pte = vtopte(va);
			*pte &= ~L2_S_CACHE_MASK;
			PTE_SYNC(pte);
		}
	}
	pmap_update(pmap_kernel());

	return (0);
}
Example #4
0
/* mem bs */
int
ixp425_pci_mem_bs_map(void *t, bus_addr_t bpa, bus_size_t size,
	      int cacheable, bus_space_handle_t *bshp)
{
	const struct pmap_devmap	*pd;

	paddr_t		startpa;
	paddr_t		endpa;
	paddr_t		pa;
	paddr_t		offset;
	vaddr_t		va;
	pt_entry_t	*pte;

	if ((pd = pmap_devmap_find_pa(bpa, size)) != NULL) {
		/* Device was statically mapped. */
		*bshp = pd->pd_va + (bpa - pd->pd_pa);
		return 0;
	}

	endpa = round_page(bpa + size);
	offset = bpa & PAGE_MASK;
	startpa = trunc_page(bpa);

	/* Get some VM.  */
	va = uvm_km_alloc(kernel_map, endpa - startpa, 0,
	    UVM_KMF_VAONLY | UVM_KMF_NOWAIT);
	if (va == 0)
		return ENOMEM;

	/* Store the bus space handle */
	*bshp = va + offset;

	/* Now map the pages */
	for (pa = startpa; pa < endpa; pa += PAGE_SIZE, va += PAGE_SIZE) {
		pmap_kenter_pa(va, pa, VM_PROT_READ | VM_PROT_WRITE, 0);
		pte = vtopte(va);
		*pte &= ~L2_S_CACHE_MASK;
		PTE_SYNC(pte);
	}
	pmap_update(pmap_kernel());

	return(0);
}
Example #5
0
int
armv7_bs_map(void *t, bus_addr_t bpa, bus_size_t size,
	      int flag, bus_space_handle_t *bshp)
{
	u_long startpa, endpa, pa;
	vaddr_t va;
	pt_entry_t *pte;

	if ((u_long)bpa > (u_long)KERNEL_BASE) {
		/* Some IO registers (ex. UART ports for console)
		   are mapped to fixed address by board specific
		   routine. */
		*bshp = bpa;
		return(0);
	}

	startpa = trunc_page(bpa);
	endpa = round_page(bpa + size);

	/* XXX use extent manager to check duplicate mapping */

	va = uvm_km_valloc(kernel_map, endpa - startpa);
	if (! va)
		return(ENOMEM);

	*bshp = (bus_space_handle_t)(va + (bpa - startpa));

	for (pa = startpa; pa < endpa; pa += PAGE_SIZE, va += PAGE_SIZE) {
		pmap_kenter_pa(va, pa, VM_PROT_READ | VM_PROT_WRITE);
		if ((flag & BUS_SPACE_MAP_CACHEABLE) == 0) {
			pte = vtopte(va);
			*pte &= ~L2_S_CACHE_MASK;
			PTE_SYNC(pte);
			/* XXX: pmap_kenter_pa() also does PTE_SYNC(). a bit of
			 *      waste.
			 */
		}
	}
	pmap_update(pmap_kernel());

	return(0);
}
int
generic_bs_map(void *t, bus_addr_t bpa, bus_size_t size, int flags,
    bus_space_handle_t *bshp)
{
	const struct pmap_devmap *pd;
	vm_paddr_t startpa, endpa, pa, offset;
	vm_offset_t va;
	pt_entry_t *pte;

	if ((pd = pmap_devmap_find_pa(bpa, size)) != NULL) {
		/* Device was statically mapped. */
		*bshp = pd->pd_va + (bpa - pd->pd_pa);
		return (0);
	}

	endpa = round_page(bpa + size);
	offset = bpa & PAGE_MASK;
	startpa = trunc_page(bpa);

	va = kmem_alloc(kernel_map, endpa - startpa);
	if (va == 0)
		return (ENOMEM);

	*bshp = va + offset;

	for (pa = startpa; pa < endpa; pa += PAGE_SIZE, va += PAGE_SIZE) {
		pmap_kenter(va, pa);
		pte = vtopte(va);
		if (!(flags & BUS_SPACE_MAP_CACHEABLE)) {
			*pte &= ~L2_S_CACHE_MASK;
			PTE_SYNC(pte);
		}
	}

	return (0);
}
Example #7
0
File: esc.c Project: ryo/netbsd-src
void
escinitialize(struct esc_softc *dev)
{
	int		 i;

	dev->sc_led_status = 0;

	TAILQ_INIT(&dev->sc_xs_pending);
	TAILQ_INIT(&dev->sc_xs_free);

/*
 * Initialize the esc_pending structs and link them into the free list. We
 * have to set vm_link_data.pages to 0 or the vm FIX won't work.
 */
	for(i=0; i<MAXPENDING; i++) {
		TAILQ_INSERT_TAIL(&dev->sc_xs_free, &dev->sc_xs_store[i],
				  link);
	}

/*
 * Calculate the correct clock conversion factor 2 <= factor <= 8, i.e. set
 * the factor to clock_freq / 5 (int).
 */
	if (dev->sc_clock_freq <= 10)
		dev->sc_clock_conv_fact = 2;
	if (dev->sc_clock_freq <= 40)
		dev->sc_clock_conv_fact = 2+((dev->sc_clock_freq-10)/5);
	else
		panic("escinitialize: Clock frequence too high");

/* Setup and save the basic configuration registers */
	dev->sc_config1 = (dev->sc_host_id & ESC_CFG1_BUS_ID_MASK);
	dev->sc_config2 = ESC_CFG2_FEATURES_ENABLE;
	dev->sc_config3 = (dev->sc_clock_freq > 25 ? ESC_CFG3_FASTCLK : 0);

/* Precalculate timeout value and clock period. */
/* Ekkk ... floating point in the kernel !!!! */
/*	dev->sc_timeout_val  = 1+dev->sc_timeout*dev->sc_clock_freq/
				 (7.682*dev->sc_clock_conv_fact);*/
	dev->sc_timeout_val  = 1+dev->sc_timeout*dev->sc_clock_freq/
				 ((7682*dev->sc_clock_conv_fact)/1000);
	dev->sc_clock_period = 1000/dev->sc_clock_freq;

	escreset(dev, 1 | 2);	/* Reset Chip and Bus */

	dev->sc_units_disconnected = 0;
	dev->sc_msg_in_len = 0;
	dev->sc_msg_out_len = 0;

	dev->sc_flags = 0;

	for(i=0; i<8; i++)
		esc_init_nexus(dev, &dev->sc_nexus[i]);

/*
 * Setup bump buffer.
 */
	dev->sc_bump_va = (u_char *)uvm_km_alloc(kernel_map, dev->sc_bump_sz, 0,
	    UVM_KMF_WIRED | UVM_KMF_ZERO);
	(void) pmap_extract(pmap_kernel(), (vaddr_t)dev->sc_bump_va,
	    (paddr_t *)&dev->sc_bump_pa);

/*
 * Setup pages to noncachable, that way we don't have to flush the cache
 * every time we need "bumped" transfer.
 */
	pt_entry_t * const ptep = vtopte((vaddr_t) dev->sc_bump_va);
	const pt_entry_t opte = *ptep;
	const pt_entry_t npte = opte & ~L2_C;
	l2pte_set(ptep, npte, opte);
	PTE_SYNC(ptep);
	cpu_tlb_flushD();
	cpu_dcache_wbinv_range((vaddr_t)dev->sc_bump_va, PAGE_SIZE);

	printf(" dmabuf V0x%08x P0x%08x", (u_int)dev->sc_bump_va, (u_int)dev->sc_bump_pa);
}
Example #8
0
int
i80321_mem_bs_map(void *t, bus_addr_t bpa, bus_size_t size, int flag,
    bus_space_handle_t *bshp)
{

	struct i80321_softc *sc = t;
	vaddr_t va;
	uint32_t busbase;
	paddr_t pa, endpa, physbase;
	pt_entry_t *pte;

#if 0
printf("i80321_bs_map bpa %x, size %x flag %x : %x %x \n", bpa, size, flag,
   sc->sc_owin[0].owin_xlate_lo,
   sc->sc_owin[0].owin_xlate_lo+ VERDE_OUT_XLATE_MEM_WIN_SIZE);
#endif

	if (bpa >= sc->sc_owin[0].owin_xlate_lo &&
	    bpa < (sc->sc_owin[0].owin_xlate_lo +
		   VERDE_OUT_XLATE_MEM_WIN_SIZE)) {
		busbase = sc->sc_iwin[1].iwin_xlate;
		physbase = sc->sc_owin[0].owin_xlate_lo;
	} else
		return (EINVAL);

	if ((bpa + size) >= ( sc->sc_owin[0].owin_xlate_lo +
	    VERDE_OUT_XLATE_MEM_WIN_SIZE))
		return (EINVAL);

	/*
	 * Found the window -- PCI MEM space is now mapped by allocating
	 * some kernel VA space and mapping the pages with pmap_enter().
	 * pmap_enter() will map unmanaged pages as non-cacheable.
	 */
	pa = trunc_page((bpa - busbase) + physbase);
	endpa = round_page(((bpa - busbase) + physbase) + size);

	va = (vaddr_t)km_alloc(endpa - pa, &kv_any, &kp_none, &kd_nowait);
	if (va == 0)
		return (ENOMEM);
//printf("i80321_mem_bs_map bpa %x pa %x va %x sz %x\n", bpa, pa, va, endpa-pa);

#if 0
printf("i80321_bs_map va %x pa %x, endpa %x, sz %x\n", va, pa,
    endpa, endpa-pa);
#endif

	*bshp = va + (bpa & PAGE_MASK);

	for (; pa < endpa; pa += PAGE_SIZE, va += PAGE_SIZE) {
		pmap_kenter_pa(va, pa, VM_PROT_READ | VM_PROT_WRITE);
		if ((flag & BUS_SPACE_MAP_CACHEABLE) == 0) {
			pte = vtopte(va);
			*pte &= ~L2_S_CACHE_MASK;
			PTE_SYNC(pte);
		}
	}
	pmap_update(pmap_kernel());

	return (0);
}
Example #9
0
/*
 * Common function for mapping DMA-safe memory.  May be called by
 * bus-specific DMA memory map functions.
 */
int
_bus_dmamem_map(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs,
    size_t size, caddr_t *kvap, int flags)
{
	vaddr_t va;
	bus_addr_t addr;
	int curseg;
	pt_entry_t *ptep/*, pte*/;

#ifdef DEBUG_DMA
	printf("dmamem_map: t=%p segs=%p nsegs=%x size=%lx flags=%x\n", t,
	    segs, nsegs, (unsigned long)size, flags);
#endif	/* DEBUG_DMA */

	size = round_page(size);
	va = uvm_km_valloc(kernel_map, size);

	if (va == 0)
		return (ENOMEM);

	*kvap = (caddr_t)va;

	for (curseg = 0; curseg < nsegs; curseg++) {
		for (addr = segs[curseg].ds_addr;
		    addr < (segs[curseg].ds_addr + segs[curseg].ds_len);
		    addr += PAGE_SIZE, va += PAGE_SIZE, size -= PAGE_SIZE) {
#ifdef DEBUG_DMA
			printf("wiring p%lx to v%lx", addr, va);
#endif	/* DEBUG_DMA */
			if (size == 0)
				panic("_bus_dmamem_map: size botch");
			pmap_enter(pmap_kernel(), va, addr,
			    VM_PROT_READ | VM_PROT_WRITE,
			    VM_PROT_READ | VM_PROT_WRITE | PMAP_WIRED);
			/*
			 * If the memory must remain coherent with the
			 * cache then we must make the memory uncacheable
			 * in order to maintain virtual cache coherency.
			 * We must also guarantee the cache does not already
			 * contain the virtual addresses we are making
			 * uncacheable.
			 */
			if (flags & BUS_DMA_COHERENT) {
				cpu_dcache_wbinv_range(va, PAGE_SIZE);
				cpu_drain_writebuf();
				ptep = vtopte(va);
				*ptep &= ~L2_S_CACHE_MASK;
				PTE_SYNC(ptep);
				tlb_flush();
			}
#ifdef DEBUG_DMA
			ptep = vtopte(va);
			printf(" pte=v%p *pte=%x\n", ptep, *ptep);
#endif	/* DEBUG_DMA */
		}
	}
	pmap_update(pmap_kernel());
#ifdef DEBUG_DMA
	printf("dmamem_map: =%p\n", *kvap);
#endif	/* DEBUG_DMA */
	return (0);
}