Пример #1
0
/*
 * Return true if we freed it, false if we didn't.
 */
bool
cpu_uarea_free(void *va)
{
#ifdef _LP64
	if (!MIPS_XKPHYS_P(va))
		return false;
	paddr_t pa = MIPS_XKPHYS_TO_PHYS(va);
#else
	if (!MIPS_KSEG0_P(va))
		return false;
	paddr_t pa = MIPS_KSEG0_TO_PHYS(va);
#endif

#ifdef MIPS3_PLUS
	if (MIPS_CACHE_VIRTUAL_ALIAS)
		mips_dcache_inv_range((vaddr_t)va, USPACE);
#endif

	for (const paddr_t epa = pa + USPACE; pa < epa; pa += PAGE_SIZE) {
		struct vm_page * const pg = PHYS_TO_VM_PAGE(pa);
		KASSERT(pg != NULL);
		uvm_pagefree(pg);
	}
	return true;
}
Пример #2
0
/*
 * Common function for unmapping DMA-safe memory.  May be called by
 * bus-specific DMA memory unmapping functions.
 */
void
_bus_dmamem_unmap(bus_dma_tag_t t, void *kva, size_t size)
{

#ifdef DIAGNOSTIC
	if ((uintptr_t)kva & PGOFSET)
		panic("_bus_dmamem_unmap: bad alignment on %p", kva);
#endif

	/*
	 * Nothing to do if we mapped it with KSEG0 or KSEG1 (i.e.
	 * not in KSEG2 or XKSEG).
	 */
	if (MIPS_KSEG0_P(kva) || MIPS_KSEG1_P(kva))
		return;
#ifdef _LP64
	if (MIPS_XKPHYS_P((vaddr_t)kva))
		return;
#endif

	size = round_page(size);
	pmap_remove(pmap_kernel(), (vaddr_t)kva, (vaddr_t)kva + size);
	pmap_update(pmap_kernel());
	uvm_km_free(kernel_map, (vaddr_t)kva, size, UVM_KMF_VAONLY);
}
static void *
__BS(vaddr)(void *v, bus_space_handle_t bsh)
{

#if (CHIP_ALIGN_STRIDE != 0)
	/* Linear mappings not possible. */
	return (NULL);
#elif defined(__mips_n32)
	if (MIPS_KSEG0_P(bsh) || MIPS_KSEG1_P(bsh) || MIPS_KSEG2_P(bsh))
		return ((void *)(intptr_t)bsh);
	return NULL;
#else
	return ((void *)bsh);
#endif
}
Пример #4
0
/*
 * Map a (kernel) virtual address to a physical address.
 *
 * MIPS processor has 3 distinct kernel address ranges:
 *
 * - kseg0 kernel "virtual address" for the   cached physical address space.
 * - kseg1 kernel "virtual address" for the uncached physical address space.
 * - kseg2 normal kernel "virtual address" mapped via the TLB.
 */
paddr_t
kvtophys(vaddr_t kva)
{
	pt_entry_t *pte;
	paddr_t phys;

	if (kva >= VM_MIN_KERNEL_ADDRESS) {
		if (kva >= VM_MAX_KERNEL_ADDRESS)
			goto overrun;

		pte = kvtopte(kva);
		if ((size_t) (pte - Sysmap) >= Sysmapsize)  {
			printf("oops: Sysmap overrun, max %d index %zd\n",
			       Sysmapsize, pte - Sysmap);
		}
		if (!mips_pg_v(pte->pt_entry)) {
			printf("kvtophys: pte not valid for %#"PRIxVADDR"\n",
			    kva);
		}
		phys = mips_tlbpfn_to_paddr(pte->pt_entry) | (kva & PGOFSET);
		return phys;
	}
	if (MIPS_KSEG1_P(kva))
		return MIPS_KSEG1_TO_PHYS(kva);

	if (MIPS_KSEG0_P(kva))
		return MIPS_KSEG0_TO_PHYS(kva);
#ifdef _LP64
	if (MIPS_XKPHYS_P(kva))
		return MIPS_XKPHYS_TO_PHYS(kva);
#endif
overrun:
	printf("Virtual address %#"PRIxVADDR": cannot map to physical\n", kva);
#ifdef DDB
	Debugger();
	return 0;	/* XXX */
#endif
	panic("kvtophys");
}
Пример #5
0
/*
 * Map a (kernel) virtual address to a physical address.
 *
 * MIPS processor has 3 distinct kernel address ranges:
 *
 * - kseg0 kernel "virtual address" for the   cached physical address space.
 * - kseg1 kernel "virtual address" for the uncached physical address space.
 * - kseg2 normal kernel "virtual address" mapped via the TLB.
 */
paddr_t
kvtophys(vaddr_t kva)
{
	paddr_t phys;

	if (MIPS_KSEG1_P(kva))
		return MIPS_KSEG1_TO_PHYS(kva);

	if (MIPS_KSEG0_P(kva))
		return MIPS_KSEG0_TO_PHYS(kva);

	if (kva >= VM_MIN_KERNEL_ADDRESS) {
		if (kva >= VM_MAX_KERNEL_ADDRESS)
			goto overrun;

		pt_entry_t * const ptep = pmap_pte_lookup(pmap_kernel(), kva);
		if (ptep == NULL)
			goto overrun;
		if (!pte_valid_p(*ptep)) {
			printf("kvtophys: pte not valid for %#"PRIxVADDR"\n",
			    kva);
		}
		phys = pte_to_paddr(*ptep) | (kva & PGOFSET);
		return phys;
	}
#ifdef _LP64
	if (MIPS_XKPHYS_P(kva))
		return MIPS_XKPHYS_TO_PHYS(kva);
#endif
overrun:
	printf("Virtual address %#"PRIxVADDR": cannot map to physical\n", kva);
#ifdef DDB
	Debugger();
	return 0;	/* XXX */
#endif
	panic("kvtophys");
}
Пример #6
0
/*
 * cpu_lwp_fork: Finish a fork operation, with lwp l2 nearly set up.
 * Copy and update the pcb and trapframe, making the child ready to run.
 *
 * First LWP (l1) is the lwp being forked.  If it is &lwp0, then we are
 * creating a kthread, where return path and argument are specified
 * with `func' and `arg'.
 *
 * Rig the child's kernel stack so that it will start out in lwp_trampoline()
 * and call child_return() with l2 as an argument. This causes the
 * newly-created child process to go directly to user level with an apparent
 * return value of 0 from fork(), while the parent process returns normally.
 *
 * If an alternate user-level stack is requested (with non-zero values
 * in both the stack and stacksize arguments), then set up the user stack
 * pointer accordingly.
 */
void
cpu_lwp_fork(struct lwp *l1, struct lwp *l2, void *stack, size_t stacksize,
    void (*func)(void *), void *arg)
{
	struct pcb * const pcb1 = lwp_getpcb(l1);
	struct pcb * const pcb2 = lwp_getpcb(l2);
	struct trapframe *tf;

	KASSERT(l1 == curlwp || l1 == &lwp0);

	l2->l_md.md_ss_addr = 0;
	l2->l_md.md_ss_instr = 0;
	l2->l_md.md_astpending = 0;

	/* Copy the PCB from parent. */
	*pcb2 = *pcb1;

	/*
	 * Copy the trapframe from parent, so that return to userspace
	 * will be to right address, with correct registers.
	 */
	vaddr_t ua2 = uvm_lwp_getuarea(l2);
	tf = (struct trapframe *)(ua2 + USPACE) - 1;
	*tf = *l1->l_md.md_utf;

	/* If specified, set a different user stack for a child. */
	if (stack != NULL)
		tf->tf_regs[_R_SP] = (intptr_t)stack + stacksize;

	l2->l_md.md_utf = tf;
#if USPACE > PAGE_SIZE
	bool direct_mapped_p = MIPS_KSEG0_P(ua2);
#ifdef _LP64
	direct_mapped_p = direct_mapped_p || MIPS_XKPHYS_P(ua2);
#endif
	if (!direct_mapped_p) {
		pt_entry_t * const pte = kvtopte(ua2);
		const uint32_t x = (MIPS_HAS_R4K_MMU) ?
		    (MIPS3_PG_G | MIPS3_PG_RO | MIPS3_PG_WIRED) : MIPS1_PG_G;

		for (u_int i = 0; i < UPAGES; i++) {
			l2->l_md.md_upte[i] = pte[i].pt_entry &~ x;
		}
	}
#endif
	/*
	 * Rig kernel stack so that it would start out in lwp_trampoline()
	 * and call child_return() with l as an argument.  This causes the
	 * newly-created child process to go directly to user level with a
	 * parent return value of 0 from fork(), while the parent process
	 * returns normally.
	 */

	pcb2->pcb_context.val[_L_S0] = (intptr_t)func;			/* S0 */
	pcb2->pcb_context.val[_L_S1] = (intptr_t)arg;			/* S1 */
	pcb2->pcb_context.val[MIPS_CURLWP_LABEL] = (intptr_t)l2;	/* T8 */
	pcb2->pcb_context.val[_L_SP] = (intptr_t)tf;			/* SP */
	pcb2->pcb_context.val[_L_RA] =
	   mips_locore_jumpvec.ljv_lwp_trampoline;			/* RA */
#ifdef _LP64
	KASSERT(pcb2->pcb_context.val[_L_SR] & MIPS_SR_KX);
#endif
	KASSERT(pcb2->pcb_context.val[_L_SR] & MIPS_SR_INT_IE);
}
Пример #7
0
struct cpu_info *
cpu_info_alloc(struct pmap_tlb_info *ti, cpuid_t cpu_id, cpuid_t cpu_package_id,
	cpuid_t cpu_core_id, cpuid_t cpu_smt_id)
{
	KASSERT(cpu_id < MAXCPUS);

#ifdef MIPS64_OCTEON
	vaddr_t exc_page = MIPS_UTLB_MISS_EXC_VEC + 0x1000*cpu_id;
	__CTASSERT(sizeof(struct cpu_info) + sizeof(struct pmap_tlb_info) <= 0x1000 - 0x280);

	struct cpu_info * const ci = ((struct cpu_info *)(exc_page + 0x1000)) - 1;
	memset((void *)exc_page, 0, PAGE_SIZE);

	if (ti == NULL) {
		ti = ((struct pmap_tlb_info *)ci) - 1;
		pmap_tlb_info_init(ti);
	}
#else
	const vaddr_t cpu_info_offset = (vaddr_t)&cpu_info_store & PAGE_MASK;
	struct pglist pglist;
	int error;

	/*
	* Grab a page from the first 512MB (mappable by KSEG0) to use to store
	* exception vectors and cpu_info for this cpu.
	*/
	error = uvm_pglistalloc(PAGE_SIZE,
	    0, MIPS_KSEG1_START - MIPS_KSEG0_START,
	    PAGE_SIZE, PAGE_SIZE, &pglist, 1, false);
	if (error)
		return NULL;

	const paddr_t pa = VM_PAGE_TO_PHYS(TAILQ_FIRST(&pglist));
	const vaddr_t va = MIPS_PHYS_TO_KSEG0(pa);
	struct cpu_info * const ci = (void *) (va + cpu_info_offset);
	memset((void *)va, 0, PAGE_SIZE);

	/*
	 * If we weren't passed a pmap_tlb_info to use, the caller wants us
	 * to take care of that for him.  Since we have room left over in the
	 * page we just allocated, just use a piece of that for it.
	 */
	if (ti == NULL) {
		if (cpu_info_offset >= sizeof(*ti)) {
			ti = (void *) va;
		} else {
			KASSERT(PAGE_SIZE - cpu_info_offset + sizeof(*ci) >= sizeof(*ti));
			ti = (struct pmap_tlb_info *)(va + PAGE_SIZE) - 1;
		}
		pmap_tlb_info_init(ti);
	}

	/*
	 * Attach its TLB info (which must be direct-mapped)
	 */
#ifdef _LP64
	KASSERT(MIPS_KSEG0_P(ti) || MIPS_XKPHYS_P(ti));
#else
	KASSERT(MIPS_KSEG0_P(ti));
#endif
#endif /* MIPS64_OCTEON */

	KASSERT(cpu_id != 0);
	ci->ci_cpuid = cpu_id;
	ci->ci_pmap_kern_segtab = &pmap_kern_segtab,
	ci->ci_data.cpu_package_id = cpu_package_id;
	ci->ci_data.cpu_core_id = cpu_core_id;
	ci->ci_data.cpu_smt_id = cpu_smt_id;
	ci->ci_cpu_freq = cpu_info_store.ci_cpu_freq;
	ci->ci_cctr_freq = cpu_info_store.ci_cctr_freq;
	ci->ci_cycles_per_hz = cpu_info_store.ci_cycles_per_hz;
	ci->ci_divisor_delay = cpu_info_store.ci_divisor_delay;
	ci->ci_divisor_recip = cpu_info_store.ci_divisor_recip;
	ci->ci_cpuwatch_count = cpu_info_store.ci_cpuwatch_count;

	pmap_md_alloc_ephemeral_address_space(ci);

	mi_cpu_attach(ci);

	pmap_tlb_info_attach(ti, ci);

	return ci;
}
Пример #8
0
struct cpu_info *
cpu_info_alloc(struct pmap_tlb_info *ti, cpuid_t cpu_id, cpuid_t cpu_package_id,
	cpuid_t cpu_core_id, cpuid_t cpu_smt_id)
{
	vaddr_t cpu_info_offset = (vaddr_t)&cpu_info_store & PAGE_MASK; 
	struct pglist pglist;
	int error;

	/*
	* Grab a page from the first 512MB (mappable by KSEG0) to use to store
	* exception vectors and cpu_info for this cpu.
	*/
	error = uvm_pglistalloc(PAGE_SIZE,
	    0, MIPS_KSEG1_START - MIPS_KSEG0_START,
	    PAGE_SIZE, PAGE_SIZE, &pglist, 1, false);
	if (error)
		return NULL;

	const paddr_t pa = VM_PAGE_TO_PHYS(TAILQ_FIRST(&pglist));
	const vaddr_t va = MIPS_PHYS_TO_KSEG0(pa);
	struct cpu_info * const ci = (void *) (va + cpu_info_offset);
	memset((void *)va, 0, PAGE_SIZE);

	/*
	 * If we weren't passed a pmap_tlb_info to use, the caller wants us
	 * to take care of that for him.  Since we have room left over in the
	 * page we just allocated, just use a piece of that for it.
	 */
	if (ti == NULL) {
		if (cpu_info_offset >= sizeof(*ti)) {
			ti = (void *) va;
		} else {
			KASSERT(PAGE_SIZE - cpu_info_offset + sizeof(*ci) >= sizeof(*ti));
			ti = (struct pmap_tlb_info *)(va + PAGE_SIZE) - 1;
		}
		pmap_tlb_info_init(ti);
	}

	ci->ci_cpuid = cpu_id;
	ci->ci_data.cpu_package_id = cpu_package_id;
	ci->ci_data.cpu_core_id = cpu_core_id;
	ci->ci_data.cpu_smt_id = cpu_smt_id;
	ci->ci_cpu_freq = cpu_info_store.ci_cpu_freq;
	ci->ci_cctr_freq = cpu_info_store.ci_cctr_freq;
        ci->ci_cycles_per_hz = cpu_info_store.ci_cycles_per_hz;
        ci->ci_divisor_delay = cpu_info_store.ci_divisor_delay;
        ci->ci_divisor_recip = cpu_info_store.ci_divisor_recip;
	ci->ci_cpuwatch_count = cpu_info_store.ci_cpuwatch_count;

	/*
	 * Attach its TLB info (which must be direct-mapped)
	 */
#ifdef _LP64
	KASSERT(MIPS_KSEG0_P(ti) || MIPS_XKPHYS_P(ti));
#else
	KASSERT(MIPS_KSEG0_P(ti));
#endif

#ifndef _LP64
	/*
	 * If we have more memory than can be mapped by KSEG0, we need to
	 * allocate enough VA so we can map pages with the right color
	 * (to avoid cache alias problems).
	 */
	if (mips_avail_end > MIPS_KSEG1_START - MIPS_KSEG0_START) {
		ci->ci_pmap_dstbase = uvm_km_alloc(kernel_map,
		    uvmexp.ncolors * PAGE_SIZE, 0, UVM_KMF_VAONLY);
		KASSERT(ci->ci_pmap_dstbase);
		ci->ci_pmap_srcbase = uvm_km_alloc(kernel_map,
		    uvmexp.ncolors * PAGE_SIZE, 0, UVM_KMF_VAONLY);
		KASSERT(ci->ci_pmap_srcbase);
	}
#endif

	mi_cpu_attach(ci);

	pmap_tlb_info_attach(ti, ci);

	return ci;
}
static void
__BS(unmap)(void *v, bus_space_handle_t h, bus_size_t size, int acct)
{
#if !defined(_LP64) || defined(CHIP_EXTENT)
	bus_addr_t addr = 0;	/* initialize to appease gcc */
#endif
#ifndef _LP64
	bool handle_is_km;

	/* determine if h is addr obtained from uvm_km_alloc */
	handle_is_km = !(MIPS_KSEG0_P(h) || MIPS_KSEG1_P(h));
#ifdef __mips_n32
	if (handle_is_km == true)
		handle_is_km = !MIPS_XKPHYS_P(h);
#endif
	if (handle_is_km == true) {
		paddr_t pa;
		vaddr_t va = (vaddr_t)trunc_page(h);
		vsize_t sz = (vsize_t)round_page((h % PAGE_SIZE) + size);
		int s;

		s = splhigh();

		if (pmap_extract(pmap_kernel(), (vaddr_t)h, &pa) == false)
			panic("%s: pmap_extract failed", __func__);
		addr = (bus_addr_t)pa;
#if 0
		printf("%s:%d: addr %#"PRIxBUSADDR", sz %#"PRIxVSIZE"\n",
			__func__, __LINE__, addr, sz);
#endif
		/* sanity check: this is why we couldn't map w/ kseg[0,1] */
		KASSERT (((addr + sz) & ~MIPS_PHYS_MASK) != 0);

		pmap_kremove(va, sz);
		pmap_update(pmap_kernel());
		uvm_km_free(kernel_map, va, sz, UVM_KMF_VAONLY);

		splx(s);
	}
#endif	/* _LP64 */

#ifdef CHIP_EXTENT

	if (acct == 0)
		return;

#ifdef EXTENT_DEBUG
	printf("%s: freeing handle %#"PRIxBSH" for %#"PRIxBUSSIZE"\n",
		__S(__BS(unmap)), h, size);
#endif

#ifdef _LP64
	KASSERT(MIPS_XKPHYS_P(h));
	addr = MIPS_XKPHYS_TO_PHYS(h);
#else
	if (handle_is_km == false) {
		if (MIPS_KSEG0_P(h))
			addr = MIPS_KSEG0_TO_PHYS(h);
#ifdef __mips_n32
		else if (MIPS_XKPHYS_P(h))
			addr = MIPS_XKPHYS_TO_PHYS(h);
#endif
		else
			addr = MIPS_KSEG1_TO_PHYS(h);
	}
#endif

#ifdef CHIP_W1_BUS_START
	if (addr >= CHIP_W1_SYS_START(v) && addr <= CHIP_W1_SYS_END(v)) {
		addr = CHIP_W1_BUS_START(v) + (addr - CHIP_W1_SYS_START(v));
	} else
#endif
#ifdef CHIP_W2_BUS_START
	if (addr >= CHIP_W2_SYS_START(v) && addr <= CHIP_W2_SYS_END(v)) {
		addr = CHIP_W2_BUS_START(v) + (addr - CHIP_W2_SYS_START(v));
	} else
#endif
#ifdef CHIP_W3_BUS_START
	if (addr >= CHIP_W3_SYS_START(v) && addr <= CHIP_W3_SYS_END(v)) {
		addr = CHIP_W3_BUS_START(v) + (addr - CHIP_W3_SYS_START(v));
	} else
#endif
	{
		printf("\n");
#ifdef CHIP_W1_BUS_START
		printf("%s: sys window[1]=0x%lx-0x%lx\n",
		    __S(__BS(unmap)), (u_long)CHIP_W1_SYS_START(v),
		    (u_long)CHIP_W1_SYS_END(v));
#endif
#ifdef CHIP_W2_BUS_START
		printf("%s: sys window[2]=0x%lx-0x%lx\n",
		    __S(__BS(unmap)), (u_long)CHIP_W2_SYS_START(v),
		    (u_long)CHIP_W2_SYS_END(v));
#endif
#ifdef CHIP_W3_BUS_START
		printf("%s: sys window[3]=0x%lx-0x%lx\n",
		    __S(__BS(unmap)), (u_long)CHIP_W3_SYS_START(v),
		    (u_long)CHIP_W3_SYS_END(v));
#endif
		panic("%s: don't know how to unmap %#"PRIxBSH, __S(__BS(unmap)), h);
	}

#ifdef EXTENT_DEBUG
	printf("%s: freeing %#"PRIxBUSADDR" to %#"PRIxBUSADDR"\n",
	    __S(__BS(unmap)), addr, addr + size - 1);
#endif
	int error = extent_free(CHIP_EXTENT(v), addr, size,
	    EX_NOWAIT | (CHIP_EX_MALLOC_SAFE(v) ? EX_MALLOCOK : 0));
	if (error) {
		printf("%s: WARNING: could not unmap"
		    " %#"PRIxBUSADDR"-%#"PRIxBUSADDR" (error %d)\n",
		    __S(__BS(unmap)), addr, addr + size - 1, error);
#ifdef EXTENT_DEBUG
		extent_print(CHIP_EXTENT(v));
#endif
	}
#endif /* CHIP_EXTENT */
#if !defined(_LP64) || defined(CHIP_EXTENT)
	__USE(addr);
#endif
}
Пример #10
0
static int
__BS(map)(void *v, bus_addr_t addr, bus_size_t size, int flags,
    bus_space_handle_t *hp, int acct)
{
	struct mips_bus_space_translation mbst;
	int error;

	/*
	 * Get the translation for this address.
	 */
	error = __BS(translate)(v, addr, size, flags, &mbst);
	if (error)
		return (error);

#ifdef CHIP_EXTENT
	if (acct == 0)
		goto mapit;

#ifdef EXTENT_DEBUG
	printf("%s: allocating %#"PRIxBUSADDR" to %#"PRIxBUSADDR"\n",
		__S(__BS(map)), addr, addr + size - 1);
#endif
	error = extent_alloc_region(CHIP_EXTENT(v), addr, size,
	    EX_NOWAIT | (CHIP_EX_MALLOC_SAFE(v) ? EX_MALLOCOK : 0));
	if (error) {
#ifdef EXTENT_DEBUG
		printf("%s: allocation failed (%d)\n", __S(__BS(map)), error);
		extent_print(CHIP_EXTENT(v));
#endif
		return (error);
	}

 mapit:
#endif /* CHIP_EXTENT */

	addr = mbst.mbst_sys_start + (addr - mbst.mbst_bus_start);

#if defined(__mips_n32) || defined(_LP64)
	if (flags & BUS_SPACE_MAP_CACHEABLE) {
#ifdef __mips_n32
		if (((addr + size) & ~MIPS_PHYS_MASK) == 0)
			*hp = (intptr_t)MIPS_PHYS_TO_KSEG0(addr);
		else
#endif
			*hp = MIPS_PHYS_TO_XKPHYS_CACHED(addr);
	} else if (flags & BUS_SPACE_MAP_PREFETCHABLE) {
		*hp = MIPS_PHYS_TO_XKPHYS_ACC(addr);
	} else {
#ifdef __mips_n32
		if (((addr + size) & ~MIPS_PHYS_MASK) == 0)
			*hp = (intptr_t)MIPS_PHYS_TO_KSEG1(addr);
		else
#endif
			*hp = MIPS_PHYS_TO_XKPHYS_UNCACHED(addr);
	}
#else
	if (((addr + size) & ~MIPS_PHYS_MASK) != 0) {
		vaddr_t va;
		paddr_t pa;
		int s;

		size = round_page((addr % PAGE_SIZE) + size);
		va = uvm_km_alloc(kernel_map, size, PAGE_SIZE,
			UVM_KMF_VAONLY | UVM_KMF_NOWAIT);
		if (va == 0)
			return ENOMEM;

		/* check use of handle_is_km in BS(unmap) */
		KASSERT(!(MIPS_KSEG0_P(va) || MIPS_KSEG1_P(va)));

		*hp = va + (addr & PAGE_MASK);
		pa = trunc_page(addr);

		s = splhigh();
		while (size != 0) {
			pmap_kenter_pa(va, pa, VM_PROT_READ | VM_PROT_WRITE, 0);
			pa += PAGE_SIZE;
			va += PAGE_SIZE;
			size -= PAGE_SIZE;
		}
		pmap_update(pmap_kernel());
		splx(s);
	} else {
		if (flags & BUS_SPACE_MAP_CACHEABLE)
			*hp = (intptr_t)MIPS_PHYS_TO_KSEG0(addr);
		else
			*hp = (intptr_t)MIPS_PHYS_TO_KSEG1(addr);
	}
#endif

	return (0);
}