예제 #1
0
파일: cache_tx39.c 프로젝트: ryo/netbsd-src
void
tx3900_icache_sync_all_16(void)
{

	tx3900_icache_do_inv_index_16(MIPS_PHYS_TO_KSEG0(0),
	    MIPS_PHYS_TO_KSEG0(mips_cache_info.mci_picache_size));
}
예제 #2
0
파일: cache_tx39.c 프로젝트: ryo/netbsd-src
void
tx3920_icache_sync_all_16wb(void)
{

	mips_dcache_wbinv_all();

	__asm volatile(".set push; .set mips2; sync; .set pop");

	tx3920_icache_do_inv_16(MIPS_PHYS_TO_KSEG0(0),
	    MIPS_PHYS_TO_KSEG0(mips_cache_info.mci_picache_size));
}
예제 #3
0
파일: cache_tx39.c 프로젝트: ryo/netbsd-src
void
tx3900_icache_sync_range_16(register_t va, vsize_t size)
{
	vaddr_t eva = round_line(va + size);

	va = trunc_line(va);

	if ((eva - va) >= mips_cache_info.mci_picache_size) {
		/* Just hit the whole thing. */
		va = MIPS_PHYS_TO_KSEG0(0);
		eva = MIPS_PHYS_TO_KSEG0(mips_cache_info.mci_picache_size);
	}

	tx3900_icache_do_inv_index_16(va, eva);
}
예제 #4
0
void
r10k_icache_sync_range_index(vaddr_t va, vsize_t size)
{
	vaddr_t eva, orig_va;

	orig_va = va;

	eva = round_line(va + size);
	va = trunc_line(va);

	mips_dcache_wbinv_range_index(va, (eva - va));

	__asm volatile("sync");

	/*
	 * Since we're doing Index ops, we expect to not be able
	 * to access the address we've been given.  So, get the
	 * bits that determine the cache index, and make a KSEG0
	 * address out of them.
	 */
	va = MIPS_PHYS_TO_KSEG0(orig_va & mips_picache_way_mask);

	eva = round_line(va + size);
	va = trunc_line(va);

	while (va < eva) {
		cache_op_r4k_line(va+0, CACHE_R4K_I|CACHEOP_R4K_INDEX_INV);
		cache_op_r4k_line(va+1, CACHE_R4K_I|CACHEOP_R4K_INDEX_INV);
		va += 64;
	}
}
예제 #5
0
파일: cache_tx39.c 프로젝트: ryo/netbsd-src
void
tx3900_pdcache_wbinv_all_4(void)
{
	vaddr_t va = MIPS_PHYS_TO_KSEG0(0);
	vaddr_t eva = va + mips_cache_info.mci_pdcache_size;
	volatile int *p;

	/*
	 * No Index Invalidate for the TX3900 -- have to execute a
	 * series of load instructions from the dummy buffer, instead.
	 */

	p = tx3900_dummy_buffer;
	while (va < eva) {
		(void) *p++; (void) *p++; (void) *p++; (void) *p++;
		(void) *p++; (void) *p++; (void) *p++; (void) *p++;
		(void) *p++; (void) *p++; (void) *p++; (void) *p++;
		(void) *p++; (void) *p++; (void) *p++; (void) *p++;
		(void) *p++; (void) *p++; (void) *p++; (void) *p++;
		(void) *p++; (void) *p++; (void) *p++; (void) *p++;
		(void) *p++; (void) *p++; (void) *p++; (void) *p++;
		(void) *p++; (void) *p++; (void) *p++; (void) *p++;
		va += (32 * 4);
	}
}
예제 #6
0
/*
 * Common function for mapping DMA-safe memory.  May be called by
 * bus-specific DMA memory map functions.
 */
int
_bus_dmamem_map(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs,
    size_t size, void **kvap, int flags)
{
	vaddr_t va;
	bus_addr_t addr;
	int curseg;
	const uvm_flag_t kmflags =
	    (flags & BUS_DMA_NOWAIT) != 0 ? UVM_KMF_NOWAIT : 0;

	/*
	 * If we're only mapping 1 segment, use K0SEG, to avoid
	 * TLB thrashing.
	 */
#ifdef _LP64
	if (nsegs == 1) {
		if (((mips_options.mips_cpu_flags & CPU_MIPS_D_CACHE_COHERENT) == 0)
		&&  (flags & BUS_DMA_COHERENT))
			*kvap = (void *)MIPS_PHYS_TO_XKPHYS_UNCACHED(
			    segs[0].ds_addr);
		else
			*kvap = (void *)MIPS_PHYS_TO_XKPHYS_CACHED(
			    segs[0].ds_addr);
		return 0;
	}
#else
	if ((nsegs == 1) && (segs[0].ds_addr < MIPS_PHYS_MASK)) {
		if (((mips_options.mips_cpu_flags & CPU_MIPS_D_CACHE_COHERENT) == 0)
		&&  (flags & BUS_DMA_COHERENT))
			*kvap = (void *)MIPS_PHYS_TO_KSEG1(segs[0].ds_addr);
		else
			*kvap = (void *)MIPS_PHYS_TO_KSEG0(segs[0].ds_addr);
		return (0);
	}
#endif	/* _LP64 */

	size = round_page(size);

	va = uvm_km_alloc(kernel_map, size, 0, UVM_KMF_VAONLY | kmflags);

	if (va == 0)
		return (ENOMEM);

	*kvap = (void *)va;

	for (curseg = 0; curseg < nsegs; curseg++) {
		for (addr = segs[curseg].ds_addr;
		    addr < (segs[curseg].ds_addr + segs[curseg].ds_len);
		    addr += PAGE_SIZE, va += PAGE_SIZE, size -= PAGE_SIZE) {
			if (size == 0)
				panic("_bus_dmamem_map: size botch");
			pmap_enter(pmap_kernel(), va, addr,
			    VM_PROT_READ | VM_PROT_WRITE,
			    PMAP_WIRED | VM_PROT_READ | VM_PROT_WRITE);
		}
	}
	pmap_update(pmap_kernel());

	return (0);
}
예제 #7
0
파일: dmac3.c 프로젝트: lacombar/netbsd-alc
void
dmac3_start(struct dmac3_softc *sc, vaddr_t addr, int len, int direction)
{
	struct dmac3reg *reg = sc->sc_reg;
	paddr_t pa;
	vaddr_t start, end, v;
	volatile uint32_t *p;

	if (reg->csr & DMAC3_CSR_ENABLE)
		dmac3_reset(sc);

	start = mips_trunc_page(addr);
	end   = mips_round_page(addr + len);
	p = sc->sc_dmamap;
	for (v = start; v < end; v += PAGE_SIZE) {
		pa = kvtophys(v);
		mips_dcache_wbinv_range(MIPS_PHYS_TO_KSEG0(pa), PAGE_SIZE);
		*p++ = 0;
		*p++ = (pa >> PGSHIFT) | 0xc0000000;
	}
	*p++ = 0;
	*p++ = 0x003fffff;

	addr &= PGOFSET;
	addr += sc->sc_dmaaddr;

	reg->len = len;
	reg->addr = addr;
	reg->intr = DMAC3_INTR_EOPIE | DMAC3_INTR_INTEN;
	reg->csr = DMAC3_CSR_ENABLE | direction | BURST_MODE | APAD_MODE;
}
예제 #8
0
void
r4k_sdcache_wbinv_range_index_128(vaddr_t va, vsize_t size)
{
	vaddr_t eva;

	/*
	 * Since we're doing Index ops, we expect to not be able
	 * to access the address we've been given.  So, get the
	 * bits that determine the cache index, and make a KSEG0
	 * address out of them.
	 */
	va = MIPS_PHYS_TO_KSEG0(va & (mips_cache_info.mci_sdcache_size - 1));

	eva = round_line(va + size);
	va = trunc_line(va);

	while ((eva - va) >= (32 * 128)) {
		cache_r4k_op_32lines_128(va,
		    CACHE_R4K_SD|CACHEOP_R4K_INDEX_WB_INV);
		va += (32 * 128);
	}

	while (va < eva) {
		cache_op_r4k_line(va, CACHE_R4K_SD|CACHEOP_R4K_INDEX_WB_INV);
		va += 128;
	}
}
예제 #9
0
int
arc_bus_space_compose_handle(bus_space_tag_t bst, bus_addr_t addr,
    bus_size_t size, int flags, bus_space_handle_t *bshp)
{
	bus_space_handle_t bsh = bst->bs_vbase + (addr - bst->bs_start);

	/*
	 * Since all buses can be linearly mappable, we don't have to check
	 * BUS_SPACE_MAP_LINEAR and BUS_SPACE_MAP_PREFETCHABLE.
	 */
	if ((flags & BUS_SPACE_MAP_CACHEABLE) == 0) {
		*bshp = bsh;
		return 0;
	}
	if (bsh < MIPS_KSEG1_START) /* KUSEG or KSEG0 */
		panic("arc_bus_space_compose_handle: bad address 0x%x", bsh);
	if (bsh < MIPS_KSEG2_START) { /* KSEG1 */
		*bshp = MIPS_PHYS_TO_KSEG0(MIPS_KSEG1_TO_PHYS(bsh));
		return 0;
	}
	/*
	 * KSEG2:
	 * Do not make the page cacheable in this case, since:
	 * - the page which this bus_space belongs might include
	 *   other bus_spaces.
	 * or
	 * - this bus might be mapped by wired TLB, in that case,
	 *   we cannot manupulate cacheable attribute with page granularity.
	 */
#ifdef DIAGNOSTIC
	printf("arc_bus_space_compose_handle: ignore cacheable 0x%x\n", bsh);
#endif
	*bshp = bsh;
	return 0;
}
/*
 * Perform a board-level soft-reset.
 * Note that this is not emulated by gxemul.
 */
void
platform_reset(void)
{
	char *c;

	c = (char *)MIPS_PHYS_TO_KSEG0(MALTA_SOFTRES);
	*c = MALTA_GORESET;
}
예제 #11
0
void
r3k_icache_sync_all(void)
{
	vaddr_t va = MIPS_PHYS_TO_KSEG0(0);
	vaddr_t eva = va + mips_cache_info.mci_picache_size;

	r3k_picache_do_inv(va, eva);
}
예제 #12
0
void *
cpu_uarea_alloc(bool system)
{
	struct pglist pglist;
#ifdef _LP64
	const paddr_t high = mips_avail_end;
#else
	const paddr_t high = MIPS_KSEG1_START - MIPS_KSEG0_START;
	/*
	 * Don't allocate a direct mapped uarea if aren't allocating for a
	 * system lwp and we have memory that can't be mapped via KSEG0.
	 * If 
	 */
	if (!system && high > mips_avail_end)
		return NULL;
#endif
	int error;

	/*
	 * Allocate a new physically contiguous uarea which can be
	 * direct-mapped.
	 */
	error = uvm_pglistalloc(USPACE, mips_avail_start, high,
	    USPACE_ALIGN, 0, &pglist, 1, 1);
	if (error) {
#ifdef _LP64
		if (!system)
			return NULL;
#endif
		panic("%s: uvm_pglistalloc failed: %d", __func__, error);
	}

	/*
	 * Get the physical address from the first page.
	 */
	const struct vm_page * const pg = TAILQ_FIRST(&pglist);
	KASSERT(pg != NULL);
	const paddr_t pa = VM_PAGE_TO_PHYS(pg);
	KASSERTMSG(pa >= mips_avail_start,
	    "pa (%#"PRIxPADDR") < mips_avail_start (%#"PRIxPADDR")",
	     pa, mips_avail_start);
	KASSERTMSG(pa < mips_avail_end,
	    "pa (%#"PRIxPADDR") >= mips_avail_end (%#"PRIxPADDR")",
	     pa, mips_avail_end);

	/*
	 * we need to return a direct-mapped VA for the pa.
	 */
#ifdef _LP64
	const vaddr_t va = MIPS_PHYS_TO_XKPHYS_CACHED(pa);
#else
	const vaddr_t va = MIPS_PHYS_TO_KSEG0(pa);
#endif

	return (void *)va;
}
예제 #13
0
void
r3k_pdcache_wbinv_all(void)
{
	vaddr_t va = MIPS_PHYS_TO_KSEG0(0);
	vaddr_t eva = va + mips_cache_info.mci_pdcache_size;

	/* Cache is write-through. */

	r3k_pdcache_do_inv(va, eva);
}
예제 #14
0
void
mipsNN_icache_sync_range_index_32(vaddr_t va, vsize_t size)
{
	struct mips_cache_info * const mci = &mips_cache_info;
	vaddr_t eva, tmpva;
	int i, stride, loopcount;

	/*
	 * Since we're doing Index ops, we expect to not be able
	 * to access the address we've been given.  So, get the
	 * bits that determine the cache index, and make a KSEG0
	 * address out of them.
	 */
	va = MIPS_PHYS_TO_KSEG0(va & mci->mci_picache_way_mask);

	eva = round_line32(va + size);
	va = trunc_line32(va);

	/*
	 * If we are going to flush more than is in a way, we are flushing
	 * everything.
	 */
	if (eva - va >= mci->mci_picache_way_size) {
		mipsNN_icache_sync_all_32();
		return;
	}

	/*
	 * GCC generates better code in the loops if we reference local
	 * copies of these global variables.
	 */
	stride = picache_stride;
	loopcount = picache_loopcount;

	mips_intern_dcache_wbinv_range_index(va, (eva - va));

	while ((eva - va) >= (8 * 32)) {
		tmpva = va;
		for (i = 0; i < loopcount; i++, tmpva += stride) {
			cache_r4k_op_8lines_32(tmpva,
			    CACHE_R4K_I|CACHEOP_R4K_INDEX_INV);
		}
		va += 8 * 32;
	}

	while (va < eva) {
		tmpva = va;
		for (i = 0; i < loopcount; i++, tmpva += stride) {
			cache_op_r4k_line(tmpva,
			    CACHE_R4K_I|CACHEOP_R4K_INDEX_INV);
		}
		va += 32;
	}
}
예제 #15
0
static void
mipsNN_pdcache_wbinv_range_index_32_intern(vaddr_t va, vaddr_t eva)
{
	/*
	 * Since we're doing Index ops, we expect to not be able
	 * to access the address we've been given.  So, get the
	 * bits that determine the cache index, and make a KSEG0
	 * address out of them.
	 */
	va = MIPS_PHYS_TO_KSEG0(va);
	eva = MIPS_PHYS_TO_KSEG0(eva);

	for (; (eva - va) >= (8 * 32); va += 8 * 32) {
		cache_r4k_op_8lines_32(va,
		    CACHE_R4K_D|CACHEOP_R4K_INDEX_WB_INV);
	}

	for (; va < eva; va += 32) {
		cache_op_r4k_line(va, CACHE_R4K_D|CACHEOP_R4K_INDEX_WB_INV);
	}
}
예제 #16
0
파일: cache_r4k.c 프로젝트: MarginC/kame
void
r4k_pdcache_wbinv_all_32(void)
{
	vaddr_t va = MIPS_PHYS_TO_KSEG0(0);
	vaddr_t eva = va + mips_pdcache_size;

	while (va < eva) {
		cache_r4k_op_32lines_32(va,
		    CACHE_R4K_D|CACHEOP_R4K_INDEX_WB_INV);
		va += (32 * 32);
	}
}
/*
 * Put character to Malta LCD at given position.
 */
static void
malta_lcd_putc(int pos, char c)
{
	void *addr;
	char *ch;

	if (pos < 0 || pos > 7)
		return;
	addr = (void *)(MALTA_ASCII_BASE + malta_lcd_offs[pos]);
	ch = (char *)MIPS_PHYS_TO_KSEG0(addr);
	*ch = c;
}
예제 #18
0
void
r4k_sdcache_wbinv_all_128(void)
{
	vaddr_t va = MIPS_PHYS_TO_KSEG0(0);
	vaddr_t eva = va + mips_cache_info.mci_sdcache_size;

	while (va < eva) {
		cache_r4k_op_32lines_128(va,
		    CACHE_R4K_SD|CACHEOP_R4K_INDEX_WB_INV);
		va += (32 * 128);
	}
}
예제 #19
0
void
r10k_pdcache_wbinv_all(void)
{
	vaddr_t va = MIPS_PHYS_TO_KSEG0(0);
	vaddr_t eva = va + mips_pdcache_way_size;

	while (va < eva) {
		cache_op_r4k_line(va+0, CACHE_R4K_D|CACHEOP_R4K_INDEX_WB_INV);
		cache_op_r4k_line(va+1, CACHE_R4K_D|CACHEOP_R4K_INDEX_WB_INV);
		va += 32;
	}
}
예제 #20
0
void
r4k_sdcache_wbinv_all_generic(void)
{
	vaddr_t va = MIPS_PHYS_TO_KSEG0(0);
	vaddr_t eva = va + mips_cache_info.mci_sdcache_size;
	int line_size = mips_cache_info.mci_sdcache_line_size;

	while (va < eva) {
		cache_op_r4k_line(va, CACHE_R4K_SD|CACHEOP_R4K_INDEX_WB_INV);
		va += line_size;
	}
}
예제 #21
0
/*
 * Like _bus_dmamap_load(), but for raw memory.
 */
int
_bus_dmamap_load_raw(bus_dma_tag_t t, bus_dmamap_t map,
    bus_dma_segment_t *segs, int nsegs, bus_size_t size, int flags)
{

	struct vmspace * const vm = vmspace_kernel();
	const bool coherent_p = (mips_options.mips_cpu_flags & CPU_MIPS_D_CACHE_COHERENT);
	const bool cached_p = coherent_p || (flags & BUS_DMA_COHERENT) == 0;
	bus_size_t mapsize = 0;
	bool first = true;
	int curseg = 0;
	int error = 0;

	for (; error == 0 && nsegs-- > 0; segs++) {
		void *kva;
#ifdef _LP64
		if (cached_p) {
			kva = (void *)MIPS_PHYS_TO_XKPHYS_CACHED(segs->ds_addr);
		} else {
			kva = (void *)MIPS_PHYS_TO_XKPHYS_UNCACHED(segs->ds_addr);
		}
#else
		if (segs->ds_addr >= MIPS_PHYS_MASK)
			return EFBIG;
		if (cached_p) {
			kva = (void *)MIPS_PHYS_TO_KSEG0(segs->ds_addr);
		} else {
			kva = (void *)MIPS_PHYS_TO_KSEG1(segs->ds_addr);
		}
#endif	/* _LP64 */
		mapsize += segs->ds_len;
		error = _bus_dmamap_load_buffer(t, map, kva, segs->ds_len,
		    vm, flags, &curseg, first);
		first = false;
	}
	if (error == 0) {
		map->dm_mapsize = mapsize;
		map->dm_nsegs = curseg + 1;
		map->_dm_vmspace = vm;		/* always kernel */
		/*
		 * If our cache is coherent, then the map must be coherent too.
		 */
		if (coherent_p)
			map->_dm_flags |= _BUS_DMAMAP_COHERENT;
		return 0;
	}
	/*
	 * If bus_dmamem_alloc didn't return memory that didn't need bouncing
	 * that's a bug which we will not workaround.
	 */
	return error;
}
예제 #22
0
파일: bus.c 프로젝트: ryo/netbsd-src
int
bus_space_map(bus_space_tag_t t, bus_addr_t bpa, bus_size_t size, int flags,
    bus_space_handle_t *bshp)
{
	int cacheable = flags & BUS_SPACE_MAP_CACHEABLE;

	if (cacheable)
		*bshp = MIPS_PHYS_TO_KSEG0(bpa);
	else
		*bshp = MIPS_PHYS_TO_KSEG1(bpa);

	return 0;
}
예제 #23
0
void
r10k_pdcache_wbinv_all(void)
{
	const struct mips_cache_info * const mci = &mips_cache_info;
	vaddr_t va = MIPS_PHYS_TO_KSEG0(0);
	vaddr_t eva = va + mci->mci_pdcache_way_size;

	while (va < eva) {
		cache_op_r4k_line(va+0, CACHE_R4K_D|CACHEOP_R4K_INDEX_WB_INV);
		cache_op_r4k_line(va+1, CACHE_R4K_D|CACHEOP_R4K_INDEX_WB_INV);
		va += 32;
	}
}
예제 #24
0
/*
 * Common function for mapping DMA-safe memory.  May be called by
 * bus-specific DMA memory map functions.
 */
int
_bus_dmamem_map(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs,
    size_t size, void **kvap, int flags)
{
	vaddr_t va;
	bus_addr_t addr;
	int curseg;
	const uvm_flag_t kmflags =
	    (flags & BUS_DMA_NOWAIT) != 0 ? UVM_KMF_NOWAIT : 0;

	/*
	 * If we're only mapping 1 segment, use KSEG0 or KSEG1, to avoid
	 * TLB thrashing.
	 */
	if (nsegs == 1) {
		if (flags & BUS_DMA_COHERENT)
			*kvap = (void *)MIPS_PHYS_TO_KSEG1(segs[0]._ds_paddr);
		else
			*kvap = (void *)MIPS_PHYS_TO_KSEG0(segs[0]._ds_paddr);
		return 0;
	}

	size = round_page(size);

	va = uvm_km_alloc(kernel_map, size, 0, UVM_KMF_VAONLY | kmflags);

	if (va == 0)
		return ENOMEM;

	*kvap = (void *)va;

	for (curseg = 0; curseg < nsegs; curseg++) {
		segs[curseg]._ds_vaddr = va;
		for (addr = segs[curseg]._ds_paddr;
		    addr < (segs[curseg]._ds_paddr + segs[curseg].ds_len);
		    addr += PAGE_SIZE, va += PAGE_SIZE, size -= PAGE_SIZE) {
			if (size == 0)
				panic("_bus_dmamem_map: size botch");
			pmap_enter(pmap_kernel(), va, addr,
			    VM_PROT_READ | VM_PROT_WRITE,
			    VM_PROT_READ | VM_PROT_WRITE | PMAP_WIRED);

			/* XXX Do something about COHERENT here. */
		}
	}
	pmap_update(pmap_kernel());

	return 0;
}
예제 #25
0
void
r4k_icache_sync_all_16(void)
{
	vaddr_t va = MIPS_PHYS_TO_KSEG0(0);
	vaddr_t eva = va + mips_cache_info.mci_picache_size;

	mips_dcache_wbinv_all();

	__asm volatile("sync");

	while (va < eva) {
		cache_r4k_op_32lines_16(va, CACHE_R4K_I|CACHEOP_R4K_INDEX_INV);
		va += (32 * 16);
	}
}
예제 #26
0
void
r10k_icache_sync_all(void)
{
	vaddr_t va = MIPS_PHYS_TO_KSEG0(0);
	vaddr_t eva = va + mips_picache_way_size;

	mips_dcache_wbinv_all();

	__asm volatile("sync");

	while (va < eva) {
		cache_op_r4k_line(va+0, CACHE_R4K_I|CACHEOP_R4K_INDEX_INV);
		cache_op_r4k_line(va+1, CACHE_R4K_I|CACHEOP_R4K_INDEX_INV);
		va += 64;
	}
}
예제 #27
0
파일: cache_tx39.c 프로젝트: ryo/netbsd-src
void
tx3920_pdcache_wbinv_all_16wb(void)
{
	vaddr_t va = MIPS_PHYS_TO_KSEG0(0);
	vaddr_t eva = va + mips_cache_info.mci_pdcache_size;

	/*
	 * Since we're hitting the whole thing, we don't have to
	 * worry about the 2 different "ways".
	 */

	while (va < eva) {
		cache_tx39_op_32lines_16(va,
		    CACHE_TX39_D|CACHEOP_TX3920_INDEX_WB_INV);
		va += (32 * 16);
	}
}
예제 #28
0
파일: bus_dma.c 프로젝트: ryo/netbsd-src
/*
 * Common function for mapping DMA-safe memory.  May be called by
 * bus-specific DMA memory map functions.
 */
int
_bus_dmamem_map(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs,
    size_t size, void **kvap, int flags)
{

	/*
	 * If we're only mapping 1 segment, use KSEG0 or KSEG1, to avoid
	 * TLB thrashing.
	 */
	if (nsegs == 1) {
		if (flags & BUS_DMA_COHERENT)
			*kvap = (void *)MIPS_PHYS_TO_KSEG1(segs[0].ds_addr);
		else
			*kvap = (void *)MIPS_PHYS_TO_KSEG0(segs[0].ds_addr);
		return 0;
	}

	return (_bus_dmamem_map_common(t, segs, nsegs, size, kvap, flags, 0));
}
int
__BS(map)(void *v, bus_addr_t addr, bus_size_t size, int flags,
    bus_space_handle_t *hp, int acct)
{
	struct mips_bus_space_translation mbst;
	int error;

	/*
	 * Get the translation for this address.
	 */
	error = __BS(translate)(v, addr, size, flags, &mbst);
	if (error)
		return (error);

#ifdef CHIP_EXTENT
	if (acct == 0)
		goto mapit;

#ifdef EXTENT_DEBUG
	printf("xxx: allocating 0x%lx to 0x%lx\n", addr, addr + size - 1);
#endif
        error = extent_alloc_region(CHIP_EXTENT(v), addr, size,
            EX_NOWAIT | (CHIP_EX_MALLOC_SAFE(v) ? EX_MALLOCOK : 0));
	if (error) {
#ifdef EXTENT_DEBUG
		printf("xxx: allocation failed (%d)\n", error);
		extent_print(CHIP_EXTENT(v));
#endif
		return (error);
	}

 mapit:
#endif /* CHIP_EXTENT */
	if (flags & BUS_SPACE_MAP_CACHEABLE)
		*hp = MIPS_PHYS_TO_KSEG0(mbst.mbst_sys_start +
		    (addr - mbst.mbst_bus_start));
	else
		*hp = MIPS_PHYS_TO_KSEG1(mbst.mbst_sys_start +
		    (addr - mbst.mbst_bus_start));

	return (0);
}
예제 #30
0
void
mipsNN_icache_sync_range_index_32(vm_offset_t va, vm_size_t size)
{
	unsigned int eva, tmpva;
	int i, stride, loopcount;

	/*
	 * Since we're doing Index ops, we expect to not be able
	 * to access the address we've been given.  So, get the
	 * bits that determine the cache index, and make a KSEG0
	 * address out of them.
	 */
	va = MIPS_PHYS_TO_KSEG0(va & picache_way_mask);

	eva = round_line32(va + size);
	va = trunc_line32(va);

	/*
	 * GCC generates better code in the loops if we reference local
	 * copies of these global variables.
	 */
	stride = picache_stride;
	loopcount = picache_loopcount;

	mips_intern_dcache_wbinv_range_index(va, (eva - va));

	while ((eva - va) >= (8 * 32)) {
		tmpva = va;
		for (i = 0; i < loopcount; i++, tmpva += stride)
			cache_r4k_op_8lines_32(tmpva,
			    CACHE_R4K_I|CACHEOP_R4K_INDEX_INV);
		va += 8 * 32;
	}

	while (va < eva) {
		tmpva = va;
		for (i = 0; i < loopcount; i++, tmpva += stride)
			cache_op_r4k_line(tmpva,
			    CACHE_R4K_I|CACHEOP_R4K_INDEX_INV);
		va += 32;
	}
}