Exemple #1
0
/*
 * Perform assorted dump-related initialization tasks.  Assumes that
 * the maximum physical memory address will not increase afterwards.
 */
static void
dump_misc_init(void)
{
	int i;

	if (dump_headerbuf != NULL)
		return; /* already called */

	for (i = 0; i < mem_cluster_cnt; ++i) {
		paddr_t top = mem_clusters[i].start + mem_clusters[i].size;
		if (max_paddr < top)
			max_paddr = top;
	}
#ifdef DEBUG
	printf("dump_misc_init: max_paddr = 0x%lx\n",
	    (unsigned long)max_paddr);
#endif

	sparse_dump_physmap = (void*)uvm_km_alloc(kernel_map,
	    roundup(max_paddr / (PAGE_SIZE * NBBY), PAGE_SIZE),
	    PAGE_SIZE, UVM_KMF_WIRED|UVM_KMF_ZERO);
	dump_headerbuf = (void*)uvm_km_alloc(kernel_map,
	    dump_headerbuf_size,
	    PAGE_SIZE, UVM_KMF_WIRED|UVM_KMF_ZERO);
	/* XXXjld should check for failure here, disable dumps if so. */
}
Exemple #2
0
void
cpu_set_tss_gates(struct cpu_info *ci)
{
	struct segment_descriptor sd;

	ci->ci_doubleflt_stack = (char *)uvm_km_alloc(kernel_map, USPACE);
	cpu_init_tss(&ci->ci_doubleflt_tss, ci->ci_doubleflt_stack,
	    IDTVEC(tss_trap08));
	setsegment(&sd, &ci->ci_doubleflt_tss, sizeof(struct i386tss) - 1,
	    SDT_SYS386TSS, SEL_KPL, 0, 0);
	ci->ci_gdt[GTRAPTSS_SEL].sd = sd;
	setgate(&idt[8], NULL, 0, SDT_SYSTASKGT, SEL_KPL,
	    GSEL(GTRAPTSS_SEL, SEL_KPL));

#if defined(DDB) && defined(MULTIPROCESSOR)
	/*
	 * Set up separate handler for the DDB IPI, so that it doesn't
	 * stomp on a possibly corrupted stack.
	 *
	 * XXX overwriting the gate set in db_machine_init.
	 * Should rearrange the code so that it's set only once.
	 */
	ci->ci_ddbipi_stack = (char *)uvm_km_alloc(kernel_map, USPACE);
	cpu_init_tss(&ci->ci_ddbipi_tss, ci->ci_ddbipi_stack,
	    Xintrddbipi);

	setsegment(&sd, &ci->ci_ddbipi_tss, sizeof(struct i386tss) - 1,
	    SDT_SYS386TSS, SEL_KPL, 0, 0);
	ci->ci_gdt[GIPITSS_SEL].sd = sd;

	setgate(&idt[ddb_vec], NULL, 0, SDT_SYSTASKGT, SEL_KPL,
	    GSEL(GIPITSS_SEL, SEL_KPL));
#endif
}
Exemple #3
0
void
kvm86_init(void)
{
	size_t vmdsize;
	char *buf;
	struct kvm86_data *vmd;
	struct pcb *pcb;
	paddr_t pa;
	int i;

	vmdsize = round_page(sizeof(struct kvm86_data)) + PAGE_SIZE;

	if ((buf = (char *)uvm_km_zalloc(kernel_map, vmdsize)) == NULL)
		return;
	
	/* first page is stack */
	vmd = (struct kvm86_data *)(buf + PAGE_SIZE);
	pcb = &vmd->pcb;

	/*
	 * derive pcb and TSS from proc0
	 * we want to access all IO ports, so we need a full-size
	 *  permission bitmap
	 * XXX do we really need the pcb or just the TSS?
	 */
	memcpy(pcb, &proc0.p_addr->u_pcb, sizeof(struct pcb));
	pcb->pcb_tss.tss_esp0 = (int)vmd;
	pcb->pcb_tss.tss_ss0 = GSEL(GDATA_SEL, SEL_KPL);
	for (i = 0; i < sizeof(vmd->iomap) / 4; i++)
		vmd->iomap[i] = 0;
	pcb->pcb_tss.tss_ioopt =
	    ((caddr_t)vmd->iomap - (caddr_t)&pcb->pcb_tss) << 16;

	/* setup TSS descriptor (including our iomap) */
	setsegment(&vmd->sd, &pcb->pcb_tss,
	    sizeof(struct pcb) + sizeof(vmd->iomap) - 1,
	    SDT_SYS386TSS, SEL_KPL, 0, 0);

	/* prepare VM for BIOS calls */
	kvm86_mapbios(vmd);
	if ((bioscallscratchpage = (void *)uvm_km_alloc(kernel_map, PAGE_SIZE))
	    == 0)
		return;

	pmap_extract(pmap_kernel(), (vaddr_t)bioscallscratchpage, &pa);
	kvm86_map(vmd, pa, BIOSCALLSCRATCHPAGE_VMVA);
	bioscallvmd = vmd;
	bioscalltmpva = uvm_km_alloc(kernel_map, PAGE_SIZE);
	mtx_init(&kvm86_mp_mutex, IPL_IPI);
}
int
mainbus_bs_map(void *t, bus_addr_t bpa, bus_size_t size, int flags, bus_space_handle_t *bshp)
{
	u_long startpa, endpa, pa;
	vaddr_t va;

	if ((u_long)bpa > (u_long)KERNEL_BASE) {
		/* XXX This is a temporary hack to aid transition. */
		*bshp = bpa;
		return(0);
	}

	startpa = trunc_page(bpa);
	endpa = round_page(bpa + size);

	/* XXX use extent manager to check duplicate mapping */

	va = uvm_km_alloc(kernel_map, endpa - startpa, 0,
	    UVM_KMF_VAONLY | UVM_KMF_NOWAIT);
	if (! va)
		return(ENOMEM);

	*bshp = (bus_space_handle_t)(va + (bpa - startpa));

	const int pmapflags =
	    (flags & (BUS_SPACE_MAP_CACHEABLE|BUS_SPACE_MAP_PREFETCHABLE))
		? 0
		: PMAP_NOCACHE; 
	for (pa = startpa; pa < endpa; pa += PAGE_SIZE, va += PAGE_SIZE) {
		pmap_kenter_pa(va, pa, VM_PROT_READ|VM_PROT_WRITE, pmapflags);
	}
	pmap_update(pmap_kernel());

	return(0);
}
/*
 * uvm_emap_init: initialize subsystem.
 */
void
uvm_emap_sysinit(void)
{
	struct uvm_cpu *ucpu;
	size_t qmax;
	u_int i;

	uvm_emap_size = roundup(uvm_emap_size, PAGE_SIZE);
	qmax = 16 * PAGE_SIZE;
#if 0
	uvm_emap_va = uvm_km_alloc(kernel_map, uvm_emap_size, 0,
	    UVM_KMF_VAONLY | UVM_KMF_WAITVA);
	if (uvm_emap_va == 0) {
		panic("uvm_emap_init: KVA allocation failed");
	}

	uvm_emap_vmem = vmem_create("emap", uvm_emap_va, uvm_emap_size,
	    PAGE_SIZE, NULL, NULL, NULL, qmax, VM_SLEEP, IPL_NONE);
	if (uvm_emap_vmem == NULL) {
		panic("uvm_emap_init: vmem creation failed");
	}
#else
	uvm_emap_va = 0;
	uvm_emap_vmem = NULL;
#endif
	/* Initial generation value is 1. */
	uvm_emap_gen = 1;
	for (i = 0; i < maxcpus; i++) {
		ucpu = uvm.cpus[i];
		if (ucpu != NULL) {
			ucpu->emap_gen = 1;
		}
	}
}
Exemple #6
0
/*
 * Map an IO request into kernel virtual address space.
 */
int
vmapbuf(struct buf *bp, vsize_t len)
{
	vaddr_t uva, kva;
	paddr_t pa;
	vsize_t size, off;
	int npf;
	struct pmap *upmap, *kpmap;

#ifdef DIAGNOSTIC
	if ((bp->b_flags & B_PHYS) == 0)
		panic("vmapbuf");
#endif
	upmap = vm_map_pmap(&bp->b_proc->p_vmspace->vm_map);
	kpmap = vm_map_pmap(phys_map);
	bp->b_saveaddr = bp->b_data;
	uva = trunc_page((vaddr_t)bp->b_data);
	off = (vaddr_t)bp->b_data - uva;
	size = round_page(off + len);
	kva = uvm_km_alloc(phys_map, len, 0, UVM_KMF_VAONLY | UVM_KMF_WAITVA);
	bp->b_data = (void *)(kva + off);
	npf = btoc(size);
	while (npf--) {
		if (pmap_extract(upmap, uva, &pa) == false)
			panic("vmapbuf: null page frame");
		pmap_enter(kpmap, kva, pa,
		    VM_PROT_READ | VM_PROT_WRITE, PMAP_WIRED);
		uva += PAGE_SIZE;
		kva += PAGE_SIZE;
	}
	pmap_update(kpmap);

	return 0;
}
/* This code was originally stolen from the alpha port. */
int
vmapbuf(struct buf *bp, vsize_t len)
{
	vaddr_t faddr, taddr, off;
	paddr_t pa;
	struct proc *p;
	vm_prot_t prot;

	if ((bp->b_flags & B_PHYS) == 0)
		panic("vmapbuf");
	p = bp->b_proc;
	bp->b_saveaddr = bp->b_data;
	faddr = trunc_page((vaddr_t)bp->b_data);
	off = (vaddr_t)bp->b_data - faddr;
	len = round_page(off + len);
	taddr = uvm_km_alloc(phys_map, len, 0, UVM_KMF_VAONLY | UVM_KMF_WAITVA);
	bp->b_data = (void *)(taddr + off);
	len = atop(len);
	prot = bp->b_flags & B_READ ? VM_PROT_READ | VM_PROT_WRITE :
				      VM_PROT_READ;
	while (len--) {
		if (pmap_extract(vm_map_pmap(&p->p_vmspace->vm_map), faddr,
		    &pa) == false)
			panic("vmapbuf: null page frame");
		pmap_enter(vm_map_pmap(phys_map), taddr, trunc_page(pa),
		    prot, prot | PMAP_WIRED);
		faddr += PAGE_SIZE;
		taddr += PAGE_SIZE;
	}
	pmap_update(vm_map_pmap(phys_map));

	return 0;
}
Exemple #8
0
/*
 * Map a user I/O request into kernel virtual address space.
 * Note: the pages are already locked by uvm_vslock(), so we
 * do not need to pass an access_type to pmap_enter().
 */
int
vmapbuf(struct buf *bp, vsize_t len)
{
    struct pmap *upmap;
    vaddr_t uva;	/* User VA (map from) */
    vaddr_t kva;	/* Kernel VA (new to) */
    paddr_t pa; 	/* physical address */
    vsize_t off;

    if ((bp->b_flags & B_PHYS) == 0)
        panic("vmapbuf");

    bp->b_saveaddr = bp->b_data;
    uva = trunc_page((vaddr_t)bp->b_data);
    off = (vaddr_t)bp->b_data - uva;
    len = round_page(off + len);
    kva = uvm_km_alloc(kernel_map, len, 0, UVM_KMF_VAONLY | UVM_KMF_WAITVA);
    bp->b_data = (void *)(kva + off);

    upmap = vm_map_pmap(&bp->b_proc->p_vmspace->vm_map);
    do {
        if (pmap_extract(upmap, uva, &pa) == FALSE)
            panic("vmapbuf: null page frame");
        /* Now map the page into kernel space. */
        pmap_kenter_pa(kva, pa, VM_PROT_READ | VM_PROT_WRITE, 0);

        uva += PAGE_SIZE;
        kva += PAGE_SIZE;
        len -= PAGE_SIZE;
    } while (len);
    pmap_update(pmap_kernel());

    return 0;
}
Exemple #9
0
/*
 * Allocate shadow GDT for a slave CPU.
 */
void
gdt_alloc_cpu(struct cpu_info *ci)
{
    int max_len = MAXGDTSIZ * sizeof(gdt[0]);
    int min_len = MINGDTSIZ * sizeof(gdt[0]);
    struct vm_page *pg;
    vaddr_t va;

    ci->ci_gdt = (union descriptor *)uvm_km_alloc(kernel_map, max_len,
                 0, UVM_KMF_VAONLY);
    for (va = (vaddr_t)ci->ci_gdt; va < (vaddr_t)ci->ci_gdt + min_len;
            va += PAGE_SIZE) {
        while ((pg = uvm_pagealloc(NULL, 0, NULL, UVM_PGA_ZERO))
                == NULL) {
            uvm_wait("gdt_alloc_cpu");
        }
        pmap_kenter_pa(va, VM_PAGE_TO_PHYS(pg),
                       VM_PROT_READ | VM_PROT_WRITE);
    }
    pmap_update(pmap_kernel());
    memset(ci->ci_gdt, 0, min_len);
    memcpy(ci->ci_gdt, gdt, gdt_count[0] * sizeof(gdt[0]));
    setsegment(&ci->ci_gdt[GCPU_SEL].sd, ci, 0xfffff,
               SDT_MEMRWA, SEL_KPL, 1, 1);
}
Exemple #10
0
/*
 * Common function for mapping DMA-safe memory.  May be called by
 * bus-specific DMA memory map functions.
 */
int
_bus_dmamem_map(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs,
    size_t size, void **kvap, int flags)
{
	vaddr_t va;
	bus_addr_t addr;
	int curseg;
	const uvm_flag_t kmflags =
	    (flags & BUS_DMA_NOWAIT) != 0 ? UVM_KMF_NOWAIT : 0;

	/*
	 * If we're only mapping 1 segment, use K0SEG, to avoid
	 * TLB thrashing.
	 */
#ifdef _LP64
	if (nsegs == 1) {
		if (((mips_options.mips_cpu_flags & CPU_MIPS_D_CACHE_COHERENT) == 0)
		&&  (flags & BUS_DMA_COHERENT))
			*kvap = (void *)MIPS_PHYS_TO_XKPHYS_UNCACHED(
			    segs[0].ds_addr);
		else
			*kvap = (void *)MIPS_PHYS_TO_XKPHYS_CACHED(
			    segs[0].ds_addr);
		return 0;
	}
#else
	if ((nsegs == 1) && (segs[0].ds_addr < MIPS_PHYS_MASK)) {
		if (((mips_options.mips_cpu_flags & CPU_MIPS_D_CACHE_COHERENT) == 0)
		&&  (flags & BUS_DMA_COHERENT))
			*kvap = (void *)MIPS_PHYS_TO_KSEG1(segs[0].ds_addr);
		else
			*kvap = (void *)MIPS_PHYS_TO_KSEG0(segs[0].ds_addr);
		return (0);
	}
#endif	/* _LP64 */

	size = round_page(size);

	va = uvm_km_alloc(kernel_map, size, 0, UVM_KMF_VAONLY | kmflags);

	if (va == 0)
		return (ENOMEM);

	*kvap = (void *)va;

	for (curseg = 0; curseg < nsegs; curseg++) {
		for (addr = segs[curseg].ds_addr;
		    addr < (segs[curseg].ds_addr + segs[curseg].ds_len);
		    addr += PAGE_SIZE, va += PAGE_SIZE, size -= PAGE_SIZE) {
			if (size == 0)
				panic("_bus_dmamem_map: size botch");
			pmap_enter(pmap_kernel(), va, addr,
			    VM_PROT_READ | VM_PROT_WRITE,
			    PMAP_WIRED | VM_PROT_READ | VM_PROT_WRITE);
		}
	}
	pmap_update(pmap_kernel());

	return (0);
}
/*
 * Map a user I/O request into kernel virtual address space.
 */
int
vmapbuf(struct buf *bp, vsize_t len)
{
	vaddr_t kva;	/* Kernel VA (new to) */

	if ((bp->b_flags & B_PHYS) == 0)
		panic("vmapbuf");

	vaddr_t uva = mips_trunc_page(bp->b_data);
	const vaddr_t off = (vaddr_t)bp->b_data - uva;
        len = mips_round_page(off + len);

	kva = uvm_km_alloc(phys_map, len, atop(uva) & uvmexp.colormask,
	    UVM_KMF_VAONLY | UVM_KMF_WAITVA | UVM_KMF_COLORMATCH);
	KASSERT((atop(kva ^ uva) & uvmexp.colormask) == 0);
	bp->b_saveaddr = bp->b_data;
	bp->b_data = (void *)(kva + off);
	struct pmap * const upmap = vm_map_pmap(&bp->b_proc->p_vmspace->vm_map);
	do {
		paddr_t pa;	/* physical address */
		if (pmap_extract(upmap, uva, &pa) == false)
			panic("vmapbuf: null page frame");
		pmap_kenter_pa(kva, pa, VM_PROT_READ | VM_PROT_WRITE,
		    PMAP_WIRED);
		uva += PAGE_SIZE;
		kva += PAGE_SIZE;
		len -= PAGE_SIZE;
	} while (len);
	pmap_update(pmap_kernel());

	return 0;
}
Exemple #12
0
/*
 * allocate anons
 */
void
uvm_anon_init()
{
	struct vm_anon *anon;
	int nanon = uvmexp.free - (uvmexp.free / 16); /* XXXCDC ??? */
	int lcv;

	/*
	 * Allocate the initial anons.
	 */
	anon = (struct vm_anon *)uvm_km_alloc(kernel_map,
	    sizeof(*anon) * nanon);
	if (anon == NULL) {
		printf("uvm_anon_init: can not allocate %d anons\n", nanon);
		panic("uvm_anon_init");
	}

	memset(anon, 0, sizeof(*anon) * nanon);
	uvm.afree = NULL;
	uvmexp.nanon = uvmexp.nfreeanon = nanon;
	for (lcv = 0 ; lcv < nanon ; lcv++) {
		anon[lcv].u.an_nxt = uvm.afree;
		uvm.afree = &anon[lcv];
	}
	simple_lock_init(&uvm.afreelock);
}
int
mappedcopyout(void *f, void *t, size_t count)
{
	void *fromp = f, *top = t;
	vaddr_t kva;
	paddr_t upa;
	size_t len;
	int off, alignable;
	pmap_t upmap;
#define CADDR2 caddr1

#ifdef DEBUG
	if (mappedcopydebug & MDB_COPYOUT)
		printf("mappedcopyout(%p, %p, %lu), pid %d\n",
		    fromp, top, (u_long)count, curproc->p_pid);
	mappedcopyoutcount++;
#endif

	if (CADDR2 == 0)
		CADDR2 = (void *) uvm_km_alloc(kernel_map, PAGE_SIZE, 0,
		    UVM_KMF_VAONLY);

	kva = (vaddr_t) CADDR2;
	off = (int)((u_long)top & PAGE_MASK);
	alignable = (off == ((u_long)fromp & PAGE_MASK));
	upmap = vm_map_pmap(&curproc->p_vmspace->vm_map);
	while (count > 0) {
		/*
		 * First access of a page, use subyte to make sure
		 * page is faulted in and write access allowed.
		 */
		if (subyte(top, *((char *)fromp)) == -1)
			return EFAULT;
		/*
		 * Map in the page and memcpy data out to it
		 */
		if (pmap_extract(upmap, trunc_page((vaddr_t)top), &upa)
		    == false)
			panic("mappedcopyout: null page frame");
		len = min(count, (PAGE_SIZE - off));
		pmap_enter(pmap_kernel(), kva, upa,
		    VM_PROT_READ|VM_PROT_WRITE,
		    VM_PROT_READ|VM_PROT_WRITE|PMAP_WIRED);
		pmap_update(pmap_kernel());
		if (len == PAGE_SIZE && alignable && off == 0)
			copypage(fromp, (void *)kva);
		else
			memcpy((void *)(kva + off), fromp, len);
		fromp += len;
		top += len;
		count -= len;
		off = 0;
	}
	pmap_remove(pmap_kernel(), kva, kva + PAGE_SIZE);
	pmap_update(pmap_kernel());
	return 0;
#undef CADDR2
}
Exemple #14
0
int
au_himem_map(void *cookie, bus_addr_t addr, bus_size_t size,
    int flags, bus_space_handle_t *bshp, int acct)
{
	au_himem_cookie_t	*c = (au_himem_cookie_t *)cookie;
	int			err;
	paddr_t			pa;
	vaddr_t			va;
	vsize_t			realsz;
	int			s;

	/* make sure we can map this bus address */
	if (addr < c->c_start || (addr + size) > c->c_end) {
		return EINVAL;
	}

	/* physical address, page aligned */
	pa = TRUNC_PAGE(c->c_physoff + addr);

	/*
	 * we are only going to work with whole pages.  the
	 * calculation is the offset into the first page, plus the
	 * intended size, rounded up to a whole number of pages.
	 */
	realsz = ROUND_PAGE((addr % PAGE_SIZE) + size);

	va = uvm_km_alloc(kernel_map,
	    realsz, PAGE_SIZE, UVM_KMF_VAONLY | UVM_KMF_NOWAIT);
	if (va == 0) {
		return ENOMEM;
	}

	/* virtual address in handle (offset appropriately) */
	*bshp = va + (addr % PAGE_SIZE);

	/* map the pages in the kernel pmap */
	s = splhigh();
	while (realsz) {
		pmap_kenter_pa(va, pa, VM_PROT_READ | VM_PROT_WRITE);
		pa += PAGE_SIZE;
		va += PAGE_SIZE;
		realsz -= PAGE_SIZE;
	}
	pmap_update(pmap_kernel());
	splx(s);

	/* record our allocated range of bus addresses */
	if (acct && c->c_extent != NULL) {
		err = extent_alloc_region(c->c_extent, addr, size, EX_NOWAIT);
		if (err) {
			au_himem_unmap(cookie, *bshp, size, 0);
			return err;
		}
	}

	return 0;
}
Exemple #15
0
static SLJIT_INLINE void* alloc_chunk(sljit_uw size)
{
#ifdef _KERNEL
    return (void *)uvm_km_alloc(module_map, size,
                                PAGE_SIZE, UVM_KMF_WIRED | UVM_KMF_ZERO | UVM_KMF_EXEC);
#else
    void* retval = mmap(0, size, PROT_READ | PROT_WRITE | PROT_EXEC, MAP_PRIVATE | MAP_ANON, -1, 0);
    return (retval != MAP_FAILED) ? retval : NULL;
#endif
}
/*
 * dev_mem_getva: get a special virtual address.  If architecture requires,
 * allocate VA according to PA, which avoids cache-aliasing issues.  Use a
 * constant, general mapping address otherwise.
 */
static inline vaddr_t
dev_mem_getva(paddr_t pa)
{
#ifdef __HAVE_MM_MD_CACHE_ALIASING
	return uvm_km_alloc(kernel_map, PAGE_SIZE,
	    atop(pa) & uvmexp.colormask,
	    UVM_KMF_VAONLY | UVM_KMF_WAITVA | UVM_KMF_COLORMATCH);
#else
	return dev_mem_addr;
#endif
}
int
footbridge_mem_bs_map(void *t, bus_addr_t bpa, bus_size_t size, int flags,
    bus_space_handle_t *bshp)
{
	bus_addr_t startpa, endpa, pa;
	vaddr_t va;

	/* Round the allocation to page boundries */
	startpa = trunc_page(bpa);
	endpa = round_page(bpa + size);

	/*
	 * Check for mappings below 1MB as we have this space already
	 * mapped. In practice it is only the VGA hole that takes
	 * advantage of this.
	 */
	if (endpa < DC21285_PCI_ISA_MEM_VSIZE) {
		/* Store the bus space handle */
		*bshp = DC21285_PCI_ISA_MEM_VBASE + bpa;
		return 0;
	}

	/*
	 * Eventually this function will do the mapping check for overlapping / 
	 * multiple mappings
	 */

	va = uvm_km_alloc(kernel_map, endpa - startpa, 0,
	    UVM_KMF_VAONLY | UVM_KMF_NOWAIT);
	if (va == 0)
		return ENOMEM;

	/* Store the bus space handle */
	*bshp = va + (bpa & PGOFSET);

	/* Now map the pages */
	/* The cookie is the physical base address for the I/O area */
	const int pmapflags =
	    (flags & (BUS_SPACE_MAP_CACHEABLE|BUS_SPACE_MAP_PREFETCHABLE))
		? 0
		: PMAP_NOCACHE;

	for (pa = startpa; pa < endpa; pa += PAGE_SIZE, va += PAGE_SIZE) {
		pmap_kenter_pa(va, (bus_addr_t)t + pa,
		    VM_PROT_READ | VM_PROT_WRITE, pmapflags);
	}
	pmap_update(pmap_kernel());

/*	if (bpa >= DC21285_PCI_MEM_VSIZE && bpa != DC21285_ARMCSR_VBASE)
		panic("footbridge_bs_map: Address out of range (%08lx)", bpa);
*/
	return(0);
}
/*
 * mm_init: initialize memory device driver.
 */
void
mm_init(void)
{
	vaddr_t pg;

	mutex_init(&dev_mem_lock, MUTEX_DEFAULT, IPL_NONE);

	/* Read-only zero-page. */
	pg = uvm_km_alloc(kernel_map, PAGE_SIZE, 0, UVM_KMF_WIRED|UVM_KMF_ZERO);
	KASSERT(pg != 0);
	pmap_protect(pmap_kernel(), pg, pg + PAGE_SIZE, VM_PROT_READ);
	pmap_update(pmap_kernel());
	dev_zero_page = (void *)pg;

#ifndef __HAVE_MM_MD_CACHE_ALIASING
	/* KVA for mappings during I/O. */
	dev_mem_addr = uvm_km_alloc(kernel_map, PAGE_SIZE, 0,
	    UVM_KMF_VAONLY|UVM_KMF_WAITVA);
	KASSERT(dev_mem_addr != 0);
#else
	dev_mem_addr = 0;
#endif
}
Exemple #19
0
/*
 * Common function for mapping DMA-safe memory.  May be called by
 * bus-specific DMA memory map functions.
 */
int
_bus_dmamem_map(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs,
    size_t size, void **kvap, int flags)
{
	vaddr_t va;
	bus_addr_t addr;
	int curseg;
	const uvm_flag_t kmflags =
	    (flags & BUS_DMA_NOWAIT) != 0 ? UVM_KMF_NOWAIT : 0;

	/*
	 * If we're only mapping 1 segment, use KSEG0 or KSEG1, to avoid
	 * TLB thrashing.
	 */
	if (nsegs == 1) {
		if (flags & BUS_DMA_COHERENT)
			*kvap = (void *)MIPS_PHYS_TO_KSEG1(segs[0]._ds_paddr);
		else
			*kvap = (void *)MIPS_PHYS_TO_KSEG0(segs[0]._ds_paddr);
		return 0;
	}

	size = round_page(size);

	va = uvm_km_alloc(kernel_map, size, 0, UVM_KMF_VAONLY | kmflags);

	if (va == 0)
		return ENOMEM;

	*kvap = (void *)va;

	for (curseg = 0; curseg < nsegs; curseg++) {
		segs[curseg]._ds_vaddr = va;
		for (addr = segs[curseg]._ds_paddr;
		    addr < (segs[curseg]._ds_paddr + segs[curseg].ds_len);
		    addr += PAGE_SIZE, va += PAGE_SIZE, size -= PAGE_SIZE) {
			if (size == 0)
				panic("_bus_dmamem_map: size botch");
			pmap_enter(pmap_kernel(), va, addr,
			    VM_PROT_READ | VM_PROT_WRITE,
			    VM_PROT_READ | VM_PROT_WRITE | PMAP_WIRED);

			/* XXX Do something about COHERENT here. */
		}
	}
	pmap_update(pmap_kernel());

	return 0;
}
Exemple #20
0
int
sun68k_bus_map(bus_space_tag_t t, bus_type_t iospace, bus_addr_t addr,
    bus_size_t size, int flags, vaddr_t vaddr, bus_space_handle_t *hp)
{
	bus_size_t	offset;
	vaddr_t v;

	/*
	 * If we suspect there might be one, try to find
	 * and use a PROM mapping.
	 */
	if ((flags & _SUN68K_BUS_MAP_USE_PROM) != 0 &&
	     find_prom_map(addr, iospace, size, &v) == 0) {
		*hp = (bus_space_handle_t)v;
		return (0);
	}

	/*
	 * Adjust the user's request to be page-aligned.
	 */
	offset = addr & PGOFSET;
	addr -= offset;
	size += offset;
	size = m68k_round_page(size);
	if (size == 0) {
		printf("sun68k_bus_map: zero size\n");
		return (EINVAL);
	}

	/* Get some kernel virtual address space. */
	if (vaddr)
		v = vaddr;
	else
		v = uvm_km_alloc(kernel_map, size, 0,
		    UVM_KMF_VAONLY | UVM_KMF_WAITVA);
	if (v == 0)
		panic("sun68k_bus_map: no memory");

	/* note: preserve page offset */
	*hp = (bus_space_handle_t)(v | offset);

	/*
	 * Map the device.  
	 */
	addr |= iospace | PMAP_NC;
	pmap_map(v, addr, addr + size, VM_PROT_ALL);

	return (0);
}
Exemple #21
0
/*
 * Allocate actual memory pages in DVMA space.
 * (idea for implementation borrowed from Chris Torek.)
 */
void *
dvma_malloc(size_t bytes)
{
	void *new_mem;
	vsize_t new_size;

	if (bytes == 0)
		return NULL;
	new_size = m68k_round_page(bytes);
	new_mem = (void *)uvm_km_alloc(phys_map, new_size, 0, UVM_KMF_WIRED);
	if (new_mem == 0)
		panic("dvma_malloc: no space in phys_map");
	/* The pmap code always makes DVMA pages non-cached. */
	return new_mem;
}
Exemple #22
0
/*
 * Initialize the GDT subsystem.  Called from autoconf().
 */
void
gdt_init()
{
    size_t max_len, min_len;
    union descriptor *old_gdt;
    struct vm_page *pg;
    vaddr_t va;
    struct cpu_info *ci = &cpu_info_primary;

    mutex_init(&gdt_lock_store, MUTEX_DEFAULT, IPL_NONE);

    max_len = MAXGDTSIZ * sizeof(gdt[0]);
    min_len = MINGDTSIZ * sizeof(gdt[0]);

    gdt_size[0] = MINGDTSIZ;
    gdt_count[0] = NGDT;
    gdt_next[0] = NGDT;
    gdt_free[0] = GNULL_SEL;
#ifdef XEN
    max_len = max_len * 2;
    gdt_size[1] = 0;
    gdt_count[1] = MAXGDTSIZ;
    gdt_next[1] = MAXGDTSIZ;
    gdt_free[1] = GNULL_SEL;
#endif

    old_gdt = gdt;
    gdt = (union descriptor *)uvm_km_alloc(kernel_map, max_len,
                                           0, UVM_KMF_VAONLY);
    for (va = (vaddr_t)gdt; va < (vaddr_t)gdt + min_len; va += PAGE_SIZE) {
        pg = uvm_pagealloc(NULL, 0, NULL, UVM_PGA_ZERO);
        if (pg == NULL) {
            panic("gdt_init: no pages");
        }
        pmap_kenter_pa(va, VM_PAGE_TO_PHYS(pg),
                       VM_PROT_READ | VM_PROT_WRITE);
    }
    pmap_update(pmap_kernel());
    memcpy(gdt, old_gdt, NGDT * sizeof(gdt[0]));
    ci->ci_gdt = gdt;
    setsegment(&ci->ci_gdt[GCPU_SEL].sd, ci, 0xfffff,
               SDT_MEMRWA, SEL_KPL, 1, 1);

    gdt_init_cpu(ci);
}
Exemple #23
0
/*
 * Common function for mapping DMA-safe memory.  May be called by
 * bus-specific DMA memory map functions.
 */
int
_bus_dmamem_map(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs,
    size_t size, void **kvap, int flags)
{
	vaddr_t va;
	bus_addr_t addr;
	int curseg;
	const uvm_flag_t kmflags =
	    (flags & BUS_DMA_NOWAIT) != 0 ? UVM_KMF_NOWAIT : 0;

	size = round_page(size);

	va = uvm_km_alloc(kernel_map, size, 0, UVM_KMF_VAONLY | kmflags);

	if (va == 0)
		return ENOMEM;

	*kvap = (void *)va;

	for (curseg = 0; curseg < nsegs; curseg++) {
		for (addr = segs[curseg].ds_addr;
		    addr < (segs[curseg].ds_addr + segs[curseg].ds_len);
		    addr += PAGE_SIZE, va += PAGE_SIZE, size -= PAGE_SIZE) {
			if (size == 0)
				panic("_bus_dmamem_map: size botch");
			pmap_enter(pmap_kernel(), va, addr,
			    VM_PROT_READ | VM_PROT_WRITE,
			    VM_PROT_READ | VM_PROT_WRITE | PMAP_WIRED);

			/* Cache-inhibit the page if necessary */
			if ((flags & BUS_DMA_COHERENT) != 0)
				_pmap_set_page_cacheinhibit(pmap_kernel(), va);

			segs[curseg]._ds_flags &= ~BUS_DMA_COHERENT;
			segs[curseg]._ds_flags |= (flags & BUS_DMA_COHERENT);
		}
	}
	pmap_update(pmap_kernel());

	if ((flags & BUS_DMA_COHERENT) != 0)
		TBIAS();

	return 0;
}
Exemple #24
0
void
seminit(void)
{
	int i, sz;
	vaddr_t v;

	mutex_init(&semlock, MUTEX_DEFAULT, IPL_NONE);
	cv_init(&sem_realloc_cv, "semrealc");
	sem_realloc_state = false;
	semtot = 0;
	sem_waiters = 0;

	/* Allocate the wired memory for our structures */
	sz = ALIGN(seminfo.semmni * sizeof(struct semid_ds)) +
	    ALIGN(seminfo.semmns * sizeof(struct __sem)) +
	    ALIGN(seminfo.semmni * sizeof(kcondvar_t)) +
	    ALIGN(seminfo.semmnu * seminfo.semusz);
	sz = round_page(sz);
	v = uvm_km_alloc(kernel_map, sz, 0, UVM_KMF_WIRED|UVM_KMF_ZERO);
	if (v == 0)
		panic("sysv_sem: cannot allocate memory");
	sema = (void *)v;
	sem = (void *)((uintptr_t)sema +
	    ALIGN(seminfo.semmni * sizeof(struct semid_ds)));
	semcv = (void *)((uintptr_t)sem +
	    ALIGN(seminfo.semmns * sizeof(struct __sem)));
	semu = (void *)((uintptr_t)semcv +
	    ALIGN(seminfo.semmni * sizeof(kcondvar_t)));

	for (i = 0; i < seminfo.semmni; i++) {
		sema[i]._sem_base = 0;
		sema[i].sem_perm.mode = 0;
		cv_init(&semcv[i], "semwait");
	}
	for (i = 0; i < seminfo.semmnu; i++) {
		struct sem_undo *suptr = SEMU(semu, i);
		suptr->un_proc = NULL;
	}
	semu_list = NULL;
	exithook_establish(semexit, NULL);

	sysvipcinit();
}
Exemple #25
0
/*
 * Utility to allocate an aligned kernel virtual address range
 */
vaddr_t 
_bus_dma_valloc_skewed(size_t size, u_long boundary, u_long align, u_long skew)
{
	vaddr_t va;

	/*
	 * Find a region of kernel virtual addresses that is aligned
	 * to the given address modulo the requested alignment, i.e.
	 *
	 *	(va - skew) == 0 mod align
	 *
	 * The following conditions apply to the arguments:
	 *
	 *	- `size' must be a multiple of the VM page size
	 *	- `align' must be a power of two
	 *	   and greater than or equal to the VM page size
	 *	- `skew' must be smaller than `align'
	 *	- `size' must be smaller than `boundary'
	 */

#ifdef DIAGNOSTIC
	if ((size & PAGE_MASK) != 0)
		panic("_bus_dma_valloc_skewed: invalid size %lx", (unsigned long) size);
	if ((align & PAGE_MASK) != 0)
		panic("_bus_dma_valloc_skewed: invalid alignment %lx", align);
	if (align < skew)
		panic("_bus_dma_valloc_skewed: align %lx < skew %lx",
			align, skew);
#endif

	/* XXX - Implement this! */
	if (boundary || skew)
		panic("_bus_dma_valloc_skewed: not implemented");

	/*
	 * First, find a region large enough to contain any aligned chunk
	 */
	va = uvm_km_alloc(kernel_map, size, align, UVM_KMF_VAONLY);
	if (va == 0)
		return (ENOMEM);

	return (va);
}
Exemple #26
0
/*
 * Handle ioctl MD_SETCONF for (sc_type == MD_KMEM_ALLOCATED)
 * Just allocate some kernel memory and return.
 */
static int
md_ioctl_kalloc(struct md_softc *sc, struct md_conf *umd,
                struct lwp *l)
{
    vaddr_t addr;
    vsize_t size;

    /* Sanity check the size. */
    size = umd->md_size;
    addr = uvm_km_alloc(kernel_map, size, 0, UVM_KMF_WIRED|UVM_KMF_ZERO);
    if (!addr)
        return ENOMEM;

    /* This unit is now configured. */
    sc->sc_addr = (void *)addr; 	/* kernel space */
    sc->sc_size = (size_t)size;
    sc->sc_type = MD_KMEM_ALLOCATED;
    return 0;
}
Exemple #27
0
/* mem bs */
int
ixp425_pci_mem_bs_map(void *t, bus_addr_t bpa, bus_size_t size,
	      int cacheable, bus_space_handle_t *bshp)
{
	const struct pmap_devmap	*pd;

	paddr_t		startpa;
	paddr_t		endpa;
	paddr_t		pa;
	paddr_t		offset;
	vaddr_t		va;
	pt_entry_t	*pte;

	if ((pd = pmap_devmap_find_pa(bpa, size)) != NULL) {
		/* Device was statically mapped. */
		*bshp = pd->pd_va + (bpa - pd->pd_pa);
		return 0;
	}

	endpa = round_page(bpa + size);
	offset = bpa & PAGE_MASK;
	startpa = trunc_page(bpa);

	/* Get some VM.  */
	va = uvm_km_alloc(kernel_map, endpa - startpa, 0,
	    UVM_KMF_VAONLY | UVM_KMF_NOWAIT);
	if (va == 0)
		return ENOMEM;

	/* Store the bus space handle */
	*bshp = va + offset;

	/* Now map the pages */
	for (pa = startpa; pa < endpa; pa += PAGE_SIZE, va += PAGE_SIZE) {
		pmap_kenter_pa(va, pa, VM_PROT_READ | VM_PROT_WRITE, 0);
		pte = vtopte(va);
		*pte &= ~L2_S_CACHE_MASK;
		PTE_SYNC(pte);
	}
	pmap_update(pmap_kernel());

	return(0);
}
Exemple #28
0
/*
 * Common function for mapping DMA-safe memory.  May be called by
 * bus-specific DMA memory map functions.
 */
int 
_bus_dmamem_map(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs,
    size_t size, void **kvap, int flags)
{
	struct vm_page *m;
	vaddr_t va;
	struct pglist *mlist;
	const uvm_flag_t kmflags =
	    (flags & BUS_DMA_NOWAIT) != 0 ? UVM_KMF_NOWAIT : 0;

	if (nsegs != 1)
		panic("_bus_dmamem_map: nsegs = %d", nsegs);

	size = m68k_round_page(size);

	va = uvm_km_alloc(kernel_map, size, 0, UVM_KMF_VAONLY | kmflags);
	if (va == 0)
		return (ENOMEM);

	segs[0]._ds_va = va;
	*kvap = (void *)va;

	mlist = segs[0]._ds_mlist;
	for (m = TAILQ_FIRST(mlist); m != NULL; m = TAILQ_NEXT(m,pageq.queue)) {
		paddr_t pa;

		if (size == 0)
			panic("_bus_dmamem_map: size botch");

		pa = VM_PAGE_TO_PHYS(m);
		pmap_enter(pmap_kernel(), va, pa | PMAP_NC,
			   VM_PROT_READ | VM_PROT_WRITE,
			   VM_PROT_READ | VM_PROT_WRITE | PMAP_WIRED);

		va += PAGE_SIZE;
		size -= PAGE_SIZE;
	}
	pmap_update(pmap_kernel());

	return (0);
}
Exemple #29
0
int
ifpga_mem_bs_map(void *t, bus_addr_t bpa, bus_size_t size, int cacheable, bus_space_handle_t *bshp)
{
    bus_addr_t startpa, endpa;
    vaddr_t va;
    const struct pmap_devmap *pd;
    bus_addr_t pa = bpa + (bus_addr_t) t;

    if ((pd = pmap_devmap_find_pa(pa, size)) != NULL) {
        /* Device was statically mapped. */
        *bshp = pd->pd_va + (pa - pd->pd_pa);
        return 0;
    }

    /* Round the allocation to page boundries */
    startpa = trunc_page(bpa);
    endpa = round_page(bpa + size);

    /* Get some VM.  */
    va = uvm_km_alloc(kernel_map, endpa - startpa, 0,
                      UVM_KMF_VAONLY | UVM_KMF_NOWAIT);
    if (va == 0)
        return ENOMEM;

    /* Store the bus space handle */
    *bshp = va + (bpa & PGOFSET);

    /* Now map the pages */
    /* The cookie is the physical base address for the I/O area */
    while (startpa < endpa) {
        /* XXX pmap_kenter_pa maps pages cacheable -- not what
           we want.  */
        pmap_enter(pmap_kernel(), va, (bus_addr_t)t + startpa,
                   VM_PROT_READ | VM_PROT_WRITE, 0);
        va += PAGE_SIZE;
        startpa += PAGE_SIZE;
    }
    pmap_update(pmap_kernel());

    return 0;
}
/*
 * Map a user I/O request into kernel virtual address space.
 * Note: the pages are already locked by uvm_vslock(), so we
 * do not need to pass an access_type to pmap_enter().
 */
int
vmapbuf(struct buf *bp, vsize_t len)
{
	vaddr_t faddr, taddr, off;
	paddr_t fpa;


#ifdef PMAP_DEBUG
	if (pmap_debug_level > 0)
		printf("vmapbuf: bp=%08x buf=%08x len=%08x\n", (u_int)bp,
		    (u_int)bp->b_data, (u_int)len);
#endif	/* PMAP_DEBUG */

	if ((bp->b_flags & B_PHYS) == 0)
		panic("vmapbuf");

	bp->b_saveaddr = bp->b_data;
	faddr = trunc_page((vaddr_t)bp->b_data);
	off = (vaddr_t)bp->b_data - faddr;
	len = round_page(off + len);
	taddr = uvm_km_alloc(phys_map, len, atop(faddr) & uvmexp.colormask,
	    UVM_KMF_VAONLY | UVM_KMF_WAITVA | UVM_KMF_COLORMATCH);
	bp->b_data = (void *)(taddr + off);

	/*
	 * The region is locked, so we expect that pmap_pte() will return
	 * non-NULL.
	 */
	while (len) {
		(void) pmap_extract(vm_map_pmap(&bp->b_proc->p_vmspace->vm_map),
		    faddr, &fpa);
		pmap_enter(pmap_kernel(), taddr, fpa,
			VM_PROT_READ|VM_PROT_WRITE, VM_PROT_READ|VM_PROT_WRITE|PMAP_WIRED);
		faddr += PAGE_SIZE;
		taddr += PAGE_SIZE;
		len -= PAGE_SIZE;
	}
	pmap_update(pmap_kernel());

	return 0;
}