コード例 #1
0
/*
 * Common function for unmapping DMA-safe memory.  May be called by
 * bus-specific DMA memory unmapping functions.
 */
void
_bus_dmamem_unmap(bus_dma_tag_t t, void *kva, size_t size)
{

#ifdef DIAGNOSTIC
	if ((uintptr_t)kva & PGOFSET)
		panic("_bus_dmamem_unmap: bad alignment on %p", kva);
#endif

	/*
	 * Nothing to do if we mapped it with KSEG0 or KSEG1 (i.e.
	 * not in KSEG2 or XKSEG).
	 */
	if (MIPS_KSEG0_P(kva) || MIPS_KSEG1_P(kva))
		return;
#ifdef _LP64
	if (MIPS_XKPHYS_P((vaddr_t)kva))
		return;
#endif

	size = round_page(size);
	pmap_remove(pmap_kernel(), (vaddr_t)kva, (vaddr_t)kva + size);
	pmap_update(pmap_kernel());
	uvm_km_free(kernel_map, (vaddr_t)kva, size, UVM_KMF_VAONLY);
}
コード例 #2
0
/*
 * Return true if we freed it, false if we didn't.
 */
bool
cpu_uarea_free(void *va)
{
#ifdef _LP64
	if (!MIPS_XKPHYS_P(va))
		return false;
	paddr_t pa = MIPS_XKPHYS_TO_PHYS(va);
#else
	if (!MIPS_KSEG0_P(va))
		return false;
	paddr_t pa = MIPS_KSEG0_TO_PHYS(va);
#endif

#ifdef MIPS3_PLUS
	if (MIPS_CACHE_VIRTUAL_ALIAS)
		mips_dcache_inv_range((vaddr_t)va, USPACE);
#endif

	for (const paddr_t epa = pa + USPACE; pa < epa; pa += PAGE_SIZE) {
		struct vm_page * const pg = PHYS_TO_VM_PAGE(pa);
		KASSERT(pg != NULL);
		uvm_pagefree(pg);
	}
	return true;
}
コード例 #3
0
/*
 * Map a (kernel) virtual address to a physical address.
 *
 * MIPS processor has 3 distinct kernel address ranges:
 *
 * - kseg0 kernel "virtual address" for the   cached physical address space.
 * - kseg1 kernel "virtual address" for the uncached physical address space.
 * - kseg2 normal kernel "virtual address" mapped via the TLB.
 */
paddr_t
kvtophys(vaddr_t kva)
{
	pt_entry_t *pte;
	paddr_t phys;

	if (kva >= VM_MIN_KERNEL_ADDRESS) {
		if (kva >= VM_MAX_KERNEL_ADDRESS)
			goto overrun;

		pte = kvtopte(kva);
		if ((size_t) (pte - Sysmap) >= Sysmapsize)  {
			printf("oops: Sysmap overrun, max %d index %zd\n",
			       Sysmapsize, pte - Sysmap);
		}
		if (!mips_pg_v(pte->pt_entry)) {
			printf("kvtophys: pte not valid for %#"PRIxVADDR"\n",
			    kva);
		}
		phys = mips_tlbpfn_to_paddr(pte->pt_entry) | (kva & PGOFSET);
		return phys;
	}
	if (MIPS_KSEG1_P(kva))
		return MIPS_KSEG1_TO_PHYS(kva);

	if (MIPS_KSEG0_P(kva))
		return MIPS_KSEG0_TO_PHYS(kva);
#ifdef _LP64
	if (MIPS_XKPHYS_P(kva))
		return MIPS_XKPHYS_TO_PHYS(kva);
#endif
overrun:
	printf("Virtual address %#"PRIxVADDR": cannot map to physical\n", kva);
#ifdef DDB
	Debugger();
	return 0;	/* XXX */
#endif
	panic("kvtophys");
}
コード例 #4
0
ファイル: vm_machdep.c プロジェクト: ryo/netbsd-src
/*
 * Map a (kernel) virtual address to a physical address.
 *
 * MIPS processor has 3 distinct kernel address ranges:
 *
 * - kseg0 kernel "virtual address" for the   cached physical address space.
 * - kseg1 kernel "virtual address" for the uncached physical address space.
 * - kseg2 normal kernel "virtual address" mapped via the TLB.
 */
paddr_t
kvtophys(vaddr_t kva)
{
	paddr_t phys;

	if (MIPS_KSEG1_P(kva))
		return MIPS_KSEG1_TO_PHYS(kva);

	if (MIPS_KSEG0_P(kva))
		return MIPS_KSEG0_TO_PHYS(kva);

	if (kva >= VM_MIN_KERNEL_ADDRESS) {
		if (kva >= VM_MAX_KERNEL_ADDRESS)
			goto overrun;

		pt_entry_t * const ptep = pmap_pte_lookup(pmap_kernel(), kva);
		if (ptep == NULL)
			goto overrun;
		if (!pte_valid_p(*ptep)) {
			printf("kvtophys: pte not valid for %#"PRIxVADDR"\n",
			    kva);
		}
		phys = pte_to_paddr(*ptep) | (kva & PGOFSET);
		return phys;
	}
#ifdef _LP64
	if (MIPS_XKPHYS_P(kva))
		return MIPS_XKPHYS_TO_PHYS(kva);
#endif
overrun:
	printf("Virtual address %#"PRIxVADDR": cannot map to physical\n", kva);
#ifdef DDB
	Debugger();
	return 0;	/* XXX */
#endif
	panic("kvtophys");
}
コード例 #5
0
/*
 * cpu_lwp_fork: Finish a fork operation, with lwp l2 nearly set up.
 * Copy and update the pcb and trapframe, making the child ready to run.
 *
 * First LWP (l1) is the lwp being forked.  If it is &lwp0, then we are
 * creating a kthread, where return path and argument are specified
 * with `func' and `arg'.
 *
 * Rig the child's kernel stack so that it will start out in lwp_trampoline()
 * and call child_return() with l2 as an argument. This causes the
 * newly-created child process to go directly to user level with an apparent
 * return value of 0 from fork(), while the parent process returns normally.
 *
 * If an alternate user-level stack is requested (with non-zero values
 * in both the stack and stacksize arguments), then set up the user stack
 * pointer accordingly.
 */
void
cpu_lwp_fork(struct lwp *l1, struct lwp *l2, void *stack, size_t stacksize,
    void (*func)(void *), void *arg)
{
	struct pcb * const pcb1 = lwp_getpcb(l1);
	struct pcb * const pcb2 = lwp_getpcb(l2);
	struct trapframe *tf;

	KASSERT(l1 == curlwp || l1 == &lwp0);

	l2->l_md.md_ss_addr = 0;
	l2->l_md.md_ss_instr = 0;
	l2->l_md.md_astpending = 0;

	/* Copy the PCB from parent. */
	*pcb2 = *pcb1;

	/*
	 * Copy the trapframe from parent, so that return to userspace
	 * will be to right address, with correct registers.
	 */
	vaddr_t ua2 = uvm_lwp_getuarea(l2);
	tf = (struct trapframe *)(ua2 + USPACE) - 1;
	*tf = *l1->l_md.md_utf;

	/* If specified, set a different user stack for a child. */
	if (stack != NULL)
		tf->tf_regs[_R_SP] = (intptr_t)stack + stacksize;

	l2->l_md.md_utf = tf;
#if USPACE > PAGE_SIZE
	bool direct_mapped_p = MIPS_KSEG0_P(ua2);
#ifdef _LP64
	direct_mapped_p = direct_mapped_p || MIPS_XKPHYS_P(ua2);
#endif
	if (!direct_mapped_p) {
		pt_entry_t * const pte = kvtopte(ua2);
		const uint32_t x = (MIPS_HAS_R4K_MMU) ?
		    (MIPS3_PG_G | MIPS3_PG_RO | MIPS3_PG_WIRED) : MIPS1_PG_G;

		for (u_int i = 0; i < UPAGES; i++) {
			l2->l_md.md_upte[i] = pte[i].pt_entry &~ x;
		}
	}
#endif
	/*
	 * Rig kernel stack so that it would start out in lwp_trampoline()
	 * and call child_return() with l as an argument.  This causes the
	 * newly-created child process to go directly to user level with a
	 * parent return value of 0 from fork(), while the parent process
	 * returns normally.
	 */

	pcb2->pcb_context.val[_L_S0] = (intptr_t)func;			/* S0 */
	pcb2->pcb_context.val[_L_S1] = (intptr_t)arg;			/* S1 */
	pcb2->pcb_context.val[MIPS_CURLWP_LABEL] = (intptr_t)l2;	/* T8 */
	pcb2->pcb_context.val[_L_SP] = (intptr_t)tf;			/* SP */
	pcb2->pcb_context.val[_L_RA] =
	   mips_locore_jumpvec.ljv_lwp_trampoline;			/* RA */
#ifdef _LP64
	KASSERT(pcb2->pcb_context.val[_L_SR] & MIPS_SR_KX);
#endif
	KASSERT(pcb2->pcb_context.val[_L_SR] & MIPS_SR_INT_IE);
}
コード例 #6
0
/*
 * Common function for loading a direct-mapped DMA map with a linear
 * buffer.  Called by bus-specific DMA map load functions with the
 * OR value appropriate for indicating "direct-mapped" for that
 * chipset.
 */
int
_bus_dmamap_load(bus_dma_tag_t t, bus_dmamap_t map, void *buf,
    bus_size_t buflen, struct proc *p, int flags)
{
	int seg, error;
	struct vmspace *vm;

	if (map->dm_nsegs > 0) {
#ifdef _MIPS_NEED_BUS_DMA_BOUNCE
		struct mips_bus_dma_cookie *cookie = map->_dm_cookie;
		if (cookie != NULL) {
			if (cookie->id_flags & _BUS_DMA_IS_BOUNCING) {
				STAT_INCR(bounced_unloads);
				cookie->id_flags &= ~_BUS_DMA_IS_BOUNCING;
			}
			cookie->id_buftype = _BUS_DMA_BUFTYPE_INVALID;
		} else
#endif
		STAT_INCR(unloads);
	}
	/*
	 * Make sure that on error condition we return "no valid mappings".
	 */
	map->dm_mapsize = 0;
	map->dm_nsegs = 0;
	KASSERT(map->dm_maxsegsz <= map->_dm_maxmaxsegsz);

	if (buflen > map->_dm_size)
		return (EINVAL);

	if (p != NULL) {
		vm = p->p_vmspace;
	} else {
		vm = vmspace_kernel();
	}

	seg = 0;
	error = _bus_dmamap_load_buffer(t, map, buf, buflen,
	    vm, flags, &seg, true);
	if (error == 0) {
		map->dm_mapsize = buflen;
		map->dm_nsegs = seg + 1;
		map->_dm_vmspace = vm;

		STAT_INCR(loads);

		/*
		 * For linear buffers, we support marking the mapping
		 * as COHERENT.
		 *
		 * XXX Check TLB entries for cache-inhibit bits?
		 */
		if (mips_options.mips_cpu_flags & CPU_MIPS_D_CACHE_COHERENT)
			map->_dm_flags |= _BUS_DMAMAP_COHERENT;
		else if (MIPS_KSEG1_P(buf))
			map->_dm_flags |= _BUS_DMAMAP_COHERENT;
#ifdef _LP64
		else if (MIPS_XKPHYS_P((vaddr_t)buf)
		    && MIPS_XKPHYS_TO_CCA((vaddr_t)buf) == MIPS3_PG_TO_CCA(MIPS3_PG_UNCACHED))
			map->_dm_flags |= _BUS_DMAMAP_COHERENT;
#endif
		return 0;
	}
#ifdef _MIPS_NEED_BUS_DMA_BOUNCE
	struct mips_bus_dma_cookie *cookie = map->_dm_cookie;
	if (cookie != NULL && (cookie->id_flags & _BUS_DMA_MIGHT_NEED_BOUNCE)) {
		error = _bus_dma_load_bouncebuf(t, map, buf, buflen,
		    _BUS_DMA_BUFTYPE_LINEAR, flags);
	}
#endif
	return (error);
}
コード例 #7
0
ファイル: cpu_subr.c プロジェクト: ryo/netbsd-src
struct cpu_info *
cpu_info_alloc(struct pmap_tlb_info *ti, cpuid_t cpu_id, cpuid_t cpu_package_id,
	cpuid_t cpu_core_id, cpuid_t cpu_smt_id)
{
	KASSERT(cpu_id < MAXCPUS);

#ifdef MIPS64_OCTEON
	vaddr_t exc_page = MIPS_UTLB_MISS_EXC_VEC + 0x1000*cpu_id;
	__CTASSERT(sizeof(struct cpu_info) + sizeof(struct pmap_tlb_info) <= 0x1000 - 0x280);

	struct cpu_info * const ci = ((struct cpu_info *)(exc_page + 0x1000)) - 1;
	memset((void *)exc_page, 0, PAGE_SIZE);

	if (ti == NULL) {
		ti = ((struct pmap_tlb_info *)ci) - 1;
		pmap_tlb_info_init(ti);
	}
#else
	const vaddr_t cpu_info_offset = (vaddr_t)&cpu_info_store & PAGE_MASK;
	struct pglist pglist;
	int error;

	/*
	* Grab a page from the first 512MB (mappable by KSEG0) to use to store
	* exception vectors and cpu_info for this cpu.
	*/
	error = uvm_pglistalloc(PAGE_SIZE,
	    0, MIPS_KSEG1_START - MIPS_KSEG0_START,
	    PAGE_SIZE, PAGE_SIZE, &pglist, 1, false);
	if (error)
		return NULL;

	const paddr_t pa = VM_PAGE_TO_PHYS(TAILQ_FIRST(&pglist));
	const vaddr_t va = MIPS_PHYS_TO_KSEG0(pa);
	struct cpu_info * const ci = (void *) (va + cpu_info_offset);
	memset((void *)va, 0, PAGE_SIZE);

	/*
	 * If we weren't passed a pmap_tlb_info to use, the caller wants us
	 * to take care of that for him.  Since we have room left over in the
	 * page we just allocated, just use a piece of that for it.
	 */
	if (ti == NULL) {
		if (cpu_info_offset >= sizeof(*ti)) {
			ti = (void *) va;
		} else {
			KASSERT(PAGE_SIZE - cpu_info_offset + sizeof(*ci) >= sizeof(*ti));
			ti = (struct pmap_tlb_info *)(va + PAGE_SIZE) - 1;
		}
		pmap_tlb_info_init(ti);
	}

	/*
	 * Attach its TLB info (which must be direct-mapped)
	 */
#ifdef _LP64
	KASSERT(MIPS_KSEG0_P(ti) || MIPS_XKPHYS_P(ti));
#else
	KASSERT(MIPS_KSEG0_P(ti));
#endif
#endif /* MIPS64_OCTEON */

	KASSERT(cpu_id != 0);
	ci->ci_cpuid = cpu_id;
	ci->ci_pmap_kern_segtab = &pmap_kern_segtab,
	ci->ci_data.cpu_package_id = cpu_package_id;
	ci->ci_data.cpu_core_id = cpu_core_id;
	ci->ci_data.cpu_smt_id = cpu_smt_id;
	ci->ci_cpu_freq = cpu_info_store.ci_cpu_freq;
	ci->ci_cctr_freq = cpu_info_store.ci_cctr_freq;
	ci->ci_cycles_per_hz = cpu_info_store.ci_cycles_per_hz;
	ci->ci_divisor_delay = cpu_info_store.ci_divisor_delay;
	ci->ci_divisor_recip = cpu_info_store.ci_divisor_recip;
	ci->ci_cpuwatch_count = cpu_info_store.ci_cpuwatch_count;

	pmap_md_alloc_ephemeral_address_space(ci);

	mi_cpu_attach(ci);

	pmap_tlb_info_attach(ti, ci);

	return ci;
}
コード例 #8
0
struct cpu_info *
cpu_info_alloc(struct pmap_tlb_info *ti, cpuid_t cpu_id, cpuid_t cpu_package_id,
	cpuid_t cpu_core_id, cpuid_t cpu_smt_id)
{
	vaddr_t cpu_info_offset = (vaddr_t)&cpu_info_store & PAGE_MASK; 
	struct pglist pglist;
	int error;

	/*
	* Grab a page from the first 512MB (mappable by KSEG0) to use to store
	* exception vectors and cpu_info for this cpu.
	*/
	error = uvm_pglistalloc(PAGE_SIZE,
	    0, MIPS_KSEG1_START - MIPS_KSEG0_START,
	    PAGE_SIZE, PAGE_SIZE, &pglist, 1, false);
	if (error)
		return NULL;

	const paddr_t pa = VM_PAGE_TO_PHYS(TAILQ_FIRST(&pglist));
	const vaddr_t va = MIPS_PHYS_TO_KSEG0(pa);
	struct cpu_info * const ci = (void *) (va + cpu_info_offset);
	memset((void *)va, 0, PAGE_SIZE);

	/*
	 * If we weren't passed a pmap_tlb_info to use, the caller wants us
	 * to take care of that for him.  Since we have room left over in the
	 * page we just allocated, just use a piece of that for it.
	 */
	if (ti == NULL) {
		if (cpu_info_offset >= sizeof(*ti)) {
			ti = (void *) va;
		} else {
			KASSERT(PAGE_SIZE - cpu_info_offset + sizeof(*ci) >= sizeof(*ti));
			ti = (struct pmap_tlb_info *)(va + PAGE_SIZE) - 1;
		}
		pmap_tlb_info_init(ti);
	}

	ci->ci_cpuid = cpu_id;
	ci->ci_data.cpu_package_id = cpu_package_id;
	ci->ci_data.cpu_core_id = cpu_core_id;
	ci->ci_data.cpu_smt_id = cpu_smt_id;
	ci->ci_cpu_freq = cpu_info_store.ci_cpu_freq;
	ci->ci_cctr_freq = cpu_info_store.ci_cctr_freq;
        ci->ci_cycles_per_hz = cpu_info_store.ci_cycles_per_hz;
        ci->ci_divisor_delay = cpu_info_store.ci_divisor_delay;
        ci->ci_divisor_recip = cpu_info_store.ci_divisor_recip;
	ci->ci_cpuwatch_count = cpu_info_store.ci_cpuwatch_count;

	/*
	 * Attach its TLB info (which must be direct-mapped)
	 */
#ifdef _LP64
	KASSERT(MIPS_KSEG0_P(ti) || MIPS_XKPHYS_P(ti));
#else
	KASSERT(MIPS_KSEG0_P(ti));
#endif

#ifndef _LP64
	/*
	 * If we have more memory than can be mapped by KSEG0, we need to
	 * allocate enough VA so we can map pages with the right color
	 * (to avoid cache alias problems).
	 */
	if (mips_avail_end > MIPS_KSEG1_START - MIPS_KSEG0_START) {
		ci->ci_pmap_dstbase = uvm_km_alloc(kernel_map,
		    uvmexp.ncolors * PAGE_SIZE, 0, UVM_KMF_VAONLY);
		KASSERT(ci->ci_pmap_dstbase);
		ci->ci_pmap_srcbase = uvm_km_alloc(kernel_map,
		    uvmexp.ncolors * PAGE_SIZE, 0, UVM_KMF_VAONLY);
		KASSERT(ci->ci_pmap_srcbase);
	}
#endif

	mi_cpu_attach(ci);

	pmap_tlb_info_attach(ti, ci);

	return ci;
}
コード例 #9
0
static void
__BS(unmap)(void *v, bus_space_handle_t h, bus_size_t size, int acct)
{
#if !defined(_LP64) || defined(CHIP_EXTENT)
	bus_addr_t addr = 0;	/* initialize to appease gcc */
#endif
#ifndef _LP64
	bool handle_is_km;

	/* determine if h is addr obtained from uvm_km_alloc */
	handle_is_km = !(MIPS_KSEG0_P(h) || MIPS_KSEG1_P(h));
#ifdef __mips_n32
	if (handle_is_km == true)
		handle_is_km = !MIPS_XKPHYS_P(h);
#endif
	if (handle_is_km == true) {
		paddr_t pa;
		vaddr_t va = (vaddr_t)trunc_page(h);
		vsize_t sz = (vsize_t)round_page((h % PAGE_SIZE) + size);
		int s;

		s = splhigh();

		if (pmap_extract(pmap_kernel(), (vaddr_t)h, &pa) == false)
			panic("%s: pmap_extract failed", __func__);
		addr = (bus_addr_t)pa;
#if 0
		printf("%s:%d: addr %#"PRIxBUSADDR", sz %#"PRIxVSIZE"\n",
			__func__, __LINE__, addr, sz);
#endif
		/* sanity check: this is why we couldn't map w/ kseg[0,1] */
		KASSERT (((addr + sz) & ~MIPS_PHYS_MASK) != 0);

		pmap_kremove(va, sz);
		pmap_update(pmap_kernel());
		uvm_km_free(kernel_map, va, sz, UVM_KMF_VAONLY);

		splx(s);
	}
#endif	/* _LP64 */

#ifdef CHIP_EXTENT

	if (acct == 0)
		return;

#ifdef EXTENT_DEBUG
	printf("%s: freeing handle %#"PRIxBSH" for %#"PRIxBUSSIZE"\n",
		__S(__BS(unmap)), h, size);
#endif

#ifdef _LP64
	KASSERT(MIPS_XKPHYS_P(h));
	addr = MIPS_XKPHYS_TO_PHYS(h);
#else
	if (handle_is_km == false) {
		if (MIPS_KSEG0_P(h))
			addr = MIPS_KSEG0_TO_PHYS(h);
#ifdef __mips_n32
		else if (MIPS_XKPHYS_P(h))
			addr = MIPS_XKPHYS_TO_PHYS(h);
#endif
		else
			addr = MIPS_KSEG1_TO_PHYS(h);
	}
#endif

#ifdef CHIP_W1_BUS_START
	if (addr >= CHIP_W1_SYS_START(v) && addr <= CHIP_W1_SYS_END(v)) {
		addr = CHIP_W1_BUS_START(v) + (addr - CHIP_W1_SYS_START(v));
	} else
#endif
#ifdef CHIP_W2_BUS_START
	if (addr >= CHIP_W2_SYS_START(v) && addr <= CHIP_W2_SYS_END(v)) {
		addr = CHIP_W2_BUS_START(v) + (addr - CHIP_W2_SYS_START(v));
	} else
#endif
#ifdef CHIP_W3_BUS_START
	if (addr >= CHIP_W3_SYS_START(v) && addr <= CHIP_W3_SYS_END(v)) {
		addr = CHIP_W3_BUS_START(v) + (addr - CHIP_W3_SYS_START(v));
	} else
#endif
	{
		printf("\n");
#ifdef CHIP_W1_BUS_START
		printf("%s: sys window[1]=0x%lx-0x%lx\n",
		    __S(__BS(unmap)), (u_long)CHIP_W1_SYS_START(v),
		    (u_long)CHIP_W1_SYS_END(v));
#endif
#ifdef CHIP_W2_BUS_START
		printf("%s: sys window[2]=0x%lx-0x%lx\n",
		    __S(__BS(unmap)), (u_long)CHIP_W2_SYS_START(v),
		    (u_long)CHIP_W2_SYS_END(v));
#endif
#ifdef CHIP_W3_BUS_START
		printf("%s: sys window[3]=0x%lx-0x%lx\n",
		    __S(__BS(unmap)), (u_long)CHIP_W3_SYS_START(v),
		    (u_long)CHIP_W3_SYS_END(v));
#endif
		panic("%s: don't know how to unmap %#"PRIxBSH, __S(__BS(unmap)), h);
	}

#ifdef EXTENT_DEBUG
	printf("%s: freeing %#"PRIxBUSADDR" to %#"PRIxBUSADDR"\n",
	    __S(__BS(unmap)), addr, addr + size - 1);
#endif
	int error = extent_free(CHIP_EXTENT(v), addr, size,
	    EX_NOWAIT | (CHIP_EX_MALLOC_SAFE(v) ? EX_MALLOCOK : 0));
	if (error) {
		printf("%s: WARNING: could not unmap"
		    " %#"PRIxBUSADDR"-%#"PRIxBUSADDR" (error %d)\n",
		    __S(__BS(unmap)), addr, addr + size - 1, error);
#ifdef EXTENT_DEBUG
		extent_print(CHIP_EXTENT(v));
#endif
	}
#endif /* CHIP_EXTENT */
#if !defined(_LP64) || defined(CHIP_EXTENT)
	__USE(addr);
#endif
}