/*
 * Quiesce CPUs in a multiprocessor machine before resuming. We need to do
 * this since the APs will be hatched (but waiting for CPUF_GO), and we don't
 * want the APs to be executing code and causing side effects during the
 * unpack operation.
 */
void
hibernate_quiesce_cpus(void)
{
	struct cpu_info *ci;
	u_long i;

	KASSERT(CPU_IS_PRIMARY(curcpu()));

	pmap_kenter_pa(ACPI_TRAMPOLINE, ACPI_TRAMPOLINE, PROT_READ | PROT_EXEC);
	pmap_kenter_pa(ACPI_TRAMP_DATA, ACPI_TRAMP_DATA,
		PROT_READ | PROT_WRITE);

	for (i = 0; i < MAXCPUS; i++) {
		ci = cpu_info[i];
		if (ci == NULL)
			continue;
		if (ci->ci_idle_pcb == NULL)
			continue;
		if ((ci->ci_flags & CPUF_PRESENT) == 0)
			continue;
		if (ci->ci_flags & (CPUF_BSP | CPUF_SP | CPUF_PRIMARY))
			continue;
		atomic_setbits_int(&ci->ci_flags, CPUF_GO | CPUF_PARK);
	}

	/* Wait a bit for the APs to park themselves */
	delay(500000);

	pmap_kremove(ACPI_TRAMPOLINE, PAGE_SIZE);
	pmap_kremove(ACPI_TRAMP_DATA, PAGE_SIZE);
}
Esempio n. 2
0
/*
 * Grow the GDT.
 */
void
gdt_grow(int which)
{
    size_t old_len, new_len;
    CPU_INFO_ITERATOR cii;
    struct cpu_info *ci;
    struct vm_page *pg;
    vaddr_t va;

    old_len = gdt_size[which] * sizeof(gdt[0]);
    gdt_size[which] <<= 1;
    new_len = old_len << 1;

#ifdef XEN
    if (which != 0) {
        size_t max_len = MAXGDTSIZ * sizeof(gdt[0]);
        if (old_len == 0) {
            gdt_size[which] = MINGDTSIZ;
            new_len = gdt_size[which] * sizeof(gdt[0]);
        }
        for(va = (vaddr_t)(cpu_info_primary.ci_gdt) + old_len + max_len;
                va < (vaddr_t)(cpu_info_primary.ci_gdt) + new_len + max_len;
                va += PAGE_SIZE) {
            while ((pg = uvm_pagealloc(NULL, 0, NULL, UVM_PGA_ZERO))
                    == NULL) {
                uvm_wait("gdt_grow");
            }
            pmap_kenter_pa(va, VM_PAGE_TO_PHYS(pg),
                           VM_PROT_READ | VM_PROT_WRITE);
        }
        return;
    }
#endif

    for (CPU_INFO_FOREACH(cii, ci)) {
        for (va = (vaddr_t)(ci->ci_gdt) + old_len;
                va < (vaddr_t)(ci->ci_gdt) + new_len;
                va += PAGE_SIZE) {
            while ((pg = uvm_pagealloc(NULL, 0, NULL, UVM_PGA_ZERO)) ==
                    NULL) {
                uvm_wait("gdt_grow");
            }
            pmap_kenter_pa(va, VM_PAGE_TO_PHYS(pg),
                           VM_PROT_READ | VM_PROT_WRITE);
        }
    }

    pmap_update(pmap_kernel());
}
Esempio n. 3
0
/*
 * Initialize the GDT.
 */
void
gdt_init(void)
{
	char *old_gdt;
	struct vm_page *pg;
	vaddr_t va;
	struct cpu_info *ci = &cpu_info_primary;

	gdt_next = 0;
	gdt_free = GNULL_SEL;

	old_gdt = gdtstore;
	gdtstore = (char *)uvm_km_valloc(kernel_map, MAXGDTSIZ);
	for (va = (vaddr_t)gdtstore; va < (vaddr_t)gdtstore + MAXGDTSIZ;
	    va += PAGE_SIZE) {
		pg = uvm_pagealloc(NULL, 0, NULL, UVM_PGA_ZERO);
		if (pg == NULL) {
			panic("gdt_init: no pages");
		}
		pmap_kenter_pa(va, VM_PAGE_TO_PHYS(pg),
		    VM_PROT_READ | VM_PROT_WRITE);
	}
	bcopy(old_gdt, gdtstore, DYNSEL_START);
	ci->ci_gdt = gdtstore;
	set_sys_segment(GDT_ADDR_SYS(gdtstore, GLDT_SEL), ldtstore,
	    LDT_SIZE - 1, SDT_SYSLDT, SEL_KPL, 0);

	gdt_init_cpu(ci);
}
int
mainbus_bs_map(void *t, bus_addr_t bpa, bus_size_t size, int flags, bus_space_handle_t *bshp)
{
	u_long startpa, endpa, pa;
	vaddr_t va;

	if ((u_long)bpa > (u_long)KERNEL_BASE) {
		/* XXX This is a temporary hack to aid transition. */
		*bshp = bpa;
		return(0);
	}

	startpa = trunc_page(bpa);
	endpa = round_page(bpa + size);

	/* XXX use extent manager to check duplicate mapping */

	va = uvm_km_alloc(kernel_map, endpa - startpa, 0,
	    UVM_KMF_VAONLY | UVM_KMF_NOWAIT);
	if (! va)
		return(ENOMEM);

	*bshp = (bus_space_handle_t)(va + (bpa - startpa));

	const int pmapflags =
	    (flags & (BUS_SPACE_MAP_CACHEABLE|BUS_SPACE_MAP_PREFETCHABLE))
		? 0
		: PMAP_NOCACHE; 
	for (pa = startpa; pa < endpa; pa += PAGE_SIZE, va += PAGE_SIZE) {
		pmap_kenter_pa(va, pa, VM_PROT_READ|VM_PROT_WRITE, pmapflags);
	}
	pmap_update(pmap_kernel());

	return(0);
}
Esempio n. 5
0
int
acpi_map(paddr_t pa, size_t len, struct acpi_mem_map *handle)
{
	paddr_t pgpa = trunc_page(pa);
	paddr_t endpa = round_page(pa + len);
	vaddr_t va = (vaddr_t)km_alloc(endpa - pgpa, &kv_any, &kp_none,
	    &kd_nowait);

	if (va == 0)
		return (ENOMEM);

	handle->baseva = va;
	handle->va = (u_int8_t *)(va + (pa & PGOFSET));
	handle->vsize = endpa - pgpa;
	handle->pa = pa;

	do {
		pmap_kenter_pa(va, pgpa, VM_PROT_READ | VM_PROT_WRITE);
		va += NBPG;
		pgpa += NBPG;
	} while (pgpa < endpa);
	pmap_update(pmap_kernel());

	return 0;
}
Esempio n. 6
0
/*
 * Map a user I/O request into kernel virtual address space.
 * Note: the pages are already locked by uvm_vslock(), so we
 * do not need to pass an access_type to pmap_enter().
 */
int
vmapbuf(struct buf *bp, vsize_t len)
{
    struct pmap *upmap;
    vaddr_t uva;	/* User VA (map from) */
    vaddr_t kva;	/* Kernel VA (new to) */
    paddr_t pa; 	/* physical address */
    vsize_t off;

    if ((bp->b_flags & B_PHYS) == 0)
        panic("vmapbuf");

    bp->b_saveaddr = bp->b_data;
    uva = trunc_page((vaddr_t)bp->b_data);
    off = (vaddr_t)bp->b_data - uva;
    len = round_page(off + len);
    kva = uvm_km_alloc(kernel_map, len, 0, UVM_KMF_VAONLY | UVM_KMF_WAITVA);
    bp->b_data = (void *)(kva + off);

    upmap = vm_map_pmap(&bp->b_proc->p_vmspace->vm_map);
    do {
        if (pmap_extract(upmap, uva, &pa) == FALSE)
            panic("vmapbuf: null page frame");
        /* Now map the page into kernel space. */
        pmap_kenter_pa(kva, pa, VM_PROT_READ | VM_PROT_WRITE, 0);

        uva += PAGE_SIZE;
        kva += PAGE_SIZE;
        len -= PAGE_SIZE;
    } while (len);
    pmap_update(pmap_kernel());

    return 0;
}
Esempio n. 7
0
/*
 * Map a user I/O request into kernel virtual address space.
 */
int
vmapbuf(struct buf *bp, vsize_t len)
{
	vaddr_t kva;	/* Kernel VA (new to) */

	if ((bp->b_flags & B_PHYS) == 0)
		panic("vmapbuf");

	vaddr_t uva = mips_trunc_page(bp->b_data);
	const vaddr_t off = (vaddr_t)bp->b_data - uva;
        len = mips_round_page(off + len);

	kva = uvm_km_alloc(phys_map, len, atop(uva) & uvmexp.colormask,
	    UVM_KMF_VAONLY | UVM_KMF_WAITVA | UVM_KMF_COLORMATCH);
	KASSERT((atop(kva ^ uva) & uvmexp.colormask) == 0);
	bp->b_saveaddr = bp->b_data;
	bp->b_data = (void *)(kva + off);
	struct pmap * const upmap = vm_map_pmap(&bp->b_proc->p_vmspace->vm_map);
	do {
		paddr_t pa;	/* physical address */
		if (pmap_extract(upmap, uva, &pa) == false)
			panic("vmapbuf: null page frame");
		pmap_kenter_pa(kva, pa, VM_PROT_READ | VM_PROT_WRITE,
		    PMAP_WIRED);
		uva += PAGE_SIZE;
		kva += PAGE_SIZE;
		len -= PAGE_SIZE;
	} while (len);
	pmap_update(pmap_kernel());

	return 0;
}
Esempio n. 8
0
/*
 * Initialize the GDT subsystem.  Called from autoconf().
 */
void
gdt_init()
{
	size_t max_len, min_len;
	struct vm_page *pg;
	vaddr_t va;
	struct cpu_info *ci = &cpu_info_primary;

	simple_lock_init(&gdt_simplelock);
	lockinit(&gdt_lock_store, PZERO, "gdtlck", 0, 0);

	max_len = MAXGDTSIZ * sizeof(union descriptor);
	min_len = MINGDTSIZ * sizeof(union descriptor);

	gdt_size = MINGDTSIZ;
	gdt_count = NGDT;
	gdt_next = NGDT;
	gdt_free = GNULL_SEL;

	gdt = (union descriptor *)uvm_km_valloc(kernel_map, max_len);
	for (va = (vaddr_t)gdt; va < (vaddr_t)gdt + min_len; va += PAGE_SIZE) {
		pg = uvm_pagealloc(NULL, 0, NULL, UVM_PGA_ZERO);
		if (pg == NULL)
			panic("gdt_init: no pages");
		pmap_kenter_pa(va, VM_PAGE_TO_PHYS(pg),
		    VM_PROT_READ | VM_PROT_WRITE);
	}
	bcopy(bootstrap_gdt, gdt, NGDT * sizeof(union descriptor));
	ci->ci_gdt = gdt;
	setsegment(&ci->ci_gdt[GCPU_SEL].sd, ci, sizeof(struct cpu_info)-1,
	    SDT_MEMRWA, SEL_KPL, 0, 0);

	gdt_init_cpu(ci);
}
Esempio n. 9
0
/*
 * Grow the GDT.
 */
void
gdt_grow()
{
	size_t old_len, new_len;
	CPU_INFO_ITERATOR cii;
	struct cpu_info *ci;
	struct vm_page *pg;
	vaddr_t va;

	old_len = gdt_size * sizeof(union descriptor);
	gdt_size <<= 1;
	new_len = old_len << 1;

	CPU_INFO_FOREACH(cii, ci) {
		for (va = (vaddr_t)(ci->ci_gdt) + old_len;
		     va < (vaddr_t)(ci->ci_gdt) + new_len;
		     va += PAGE_SIZE) {
			while (
			    (pg =
			    uvm_pagealloc(NULL, 0, NULL, UVM_PGA_ZERO)) ==
			    NULL) {
				uvm_wait("gdt_grow");
			}
			pmap_kenter_pa(va, VM_PAGE_TO_PHYS(pg),
			    VM_PROT_READ | VM_PROT_WRITE);
		}
	}
}
Esempio n. 10
0
vaddr_t vmm_km_zalloc(size_t size) {
  // Pre kernel heap unmanaged memory allocator
  // This should not only be used before kheap_init has been called
  static vaddr_t placement_addr = 0;
  if(placement_addr == 0) {
    pmap_virtual_space(NULL, &kernel_vend);
    placement_addr = kernel_vend;
  }

  // Make sure enough memory is left!
  kassert((UINT32_MAX-placement_addr) >= size);

	vaddr_t start = placement_addr;
	vaddr_t end = placement_addr + size;

	// Allocate a new page if there is not enough memory
	if(end >= kernel_vend) {
    // Loop through and allocate pages until we have enough memory to serve the requested size
    for( ; kernel_vend < end; kernel_vend+=PAGESIZE) {
      paddr_t pa = pmm_alloc();
      pmap_kenter_pa(kernel_vend, pa, VM_PROT_DEFAULT, PMAP_WRITE_BACK);
    }
	}

	// Zero the memory
	memset((void*)placement_addr, 0x0, size);

	placement_addr = end;

	return(start);
}
Esempio n. 11
0
// TODO: This function shouldn't need to exist. Find another way
vaddr_t vmm_km_heap_extend(size_t size) {
  vregion_t *region = &vmap_kernel()->regions[2];
  kassert((UINT32_MAX - region->vend) > ROUND_PAGE(size));


  vaddr_t prev_vend = region->vend;
  region->vend += ROUND_PAGE(size);

  for(vaddr_t va = prev_vend; va < region->vend; va += PAGESIZE) {
    // Allocate a free page if one should be available else panic
    paddr_t pa = pmm_alloc();
    kassert(pa != UINTPTR_MAX);

    // TODO: Use pmap_enter here instead
    pmap_kenter_pa(va, pa, region->vm_prot, PMAP_WIRED | PMAP_WRITE_COMBINE); 

    // Enter the information into the amap
    region->aref.amap->aslots[(uint32_t)((double)(va-region->vstart)/(double)PAGESIZE)]->page->vaddr = va;
  }

  memset((vaddr_t*)prev_vend, 0, PAGESIZE);
  vmap_kernel()->heap_end = region->vend;
  
  uint32_t new_size = region->vend - region->vstart;
  region->aref.amap->maxslots = region->aref.amap->nslots = (uint32_t)((double)new_size/(double)PAGESIZE);

  return prev_vend;
}
Esempio n. 12
0
int
vme_map_r(const struct vme_range *r, paddr_t pa, psize_t len, int flags,
    vm_prot_t prot, vaddr_t *rva)
{
	vaddr_t ova, va;
	u_int pg;

	ova = va = uvm_km_valloc(kernel_map, len);
	if (va == 0)
		return ENOMEM;

	pa += r->vr_base;
	for (pg = atop(len); pg != 0; pg--) {
		pmap_kenter_pa(va, pa, prot);
		va += PAGE_SIZE;
		pa += PAGE_SIZE;
	}
	if (flags & BUS_SPACE_MAP_CACHEABLE)
		pmap_cache_ctrl(ova, ova + len, CACHE_GLOBAL);
	pmap_update(pmap_kernel());

	*rva = ova;

	return 0;
}
Esempio n. 13
0
vaddr_t pmap_steal_memory(size_t vsize) {
	// pmap_init must be called before this function can be used, otherwise
	// kernel_vend will be an incorrect value
	// kernel_vend and kernel_pend should be page aligned
  // This function should only be used before pmm_init is called
	static vaddr_t placement_addr = 0;
	placement_addr = (placement_addr == 0) ? kernel_vend : placement_addr;

  // Make sure enough memory is left!
  kassert((UINT32_MAX-placement_addr) >= vsize);

	vaddr_t start = placement_addr;
	vaddr_t end = placement_addr + vsize;

	// Allocate a new page if there is not enough memory
	if(end >= kernel_vend) {
    // Loop through and map the pages using pmap_kenter_pa while incrementing kernel_pend and kernel_vend
    for(; kernel_vend < end; kernel_vend+=PAGESIZE, kernel_pend+=PAGESIZE) {
      pmap_kenter_pa(kernel_vend, kernel_pend,  VM_PROT_DEFAULT, PMAP_WRITE_BACK);
    }
	}

	// Zero the memory
	memset((void*)placement_addr, 0x0, vsize);

	placement_addr = end;

	return(start);
}
Esempio n. 14
0
/*
 * MD-specific resume preparation (creating resume time pagetables,
 * stacks, etc).
 */
void
hibernate_prepare_resume_machdep(union hibernate_info *hib_info)
{
	paddr_t pa, piglet_end;
	vaddr_t va;

	/*
	 * At this point, we are sure that the piglet's phys space is going to
	 * have been unused by the suspending kernel, but the vaddrs used by
	 * the suspending kernel may or may not be available to us here in the
	 * resuming kernel, so we allocate a new range of VAs for the piglet.
	 * Those VAs will be temporary and will cease to exist as soon as we
	 * switch to the resume PT, so we need to ensure that any VAs required
	 * during inflate are also entered into that map.
	 */

        hib_info->piglet_va = (vaddr_t)km_alloc(HIBERNATE_CHUNK_SIZE*3,
	    &kv_any, &kp_none, &kd_nowait);
        if (!hib_info->piglet_va)
                panic("Unable to allocate vaddr for hibernate resume piglet\n");

	piglet_end = hib_info->piglet_pa + HIBERNATE_CHUNK_SIZE*3;

	for (pa = hib_info->piglet_pa,va = hib_info->piglet_va;
	    pa <= piglet_end; pa += PAGE_SIZE, va += PAGE_SIZE)
		pmap_kenter_pa(va, pa, VM_PROT_ALL);

	pmap_activate(curproc);
}
Esempio n. 15
0
/*
 * Map an IO request into kernel virtual address space.
 */
void
vmapbuf(struct buf *bp, vsize_t len)
{
	struct pmap *pm = vm_map_pmap(&bp->b_proc->p_vmspace->vm_map);
	vaddr_t kva, uva;
	vsize_t size, off;

#ifdef DIAGNOSTIC
	if ((bp->b_flags & B_PHYS) == 0)
		panic("vmapbuf");
#endif
	bp->b_saveaddr = bp->b_data;
	uva = trunc_page((vaddr_t)bp->b_data);
	off = (vaddr_t)bp->b_data - uva;
	size = round_page(off + len);

	kva = uvm_km_valloc_prefer_wait(phys_map, size, uva);
	bp->b_data = (caddr_t)(kva + off);
	while (size > 0) {
		paddr_t pa;

		if (pmap_extract(pm, uva, &pa) == FALSE)
			panic("vmapbuf: null page frame");
		else
			pmap_kenter_pa(kva, pa, UVM_PROT_RW);
		uva += PAGE_SIZE;
		kva += PAGE_SIZE;
		size -= PAGE_SIZE;
	}
	pmap_update(pmap_kernel());
}
Esempio n. 16
0
/*
 * Allocate shadow GDT for a slave CPU.
 */
void
gdt_alloc_cpu(struct cpu_info *ci)
{
    int max_len = MAXGDTSIZ * sizeof(gdt[0]);
    int min_len = MINGDTSIZ * sizeof(gdt[0]);
    struct vm_page *pg;
    vaddr_t va;

    ci->ci_gdt = (union descriptor *)uvm_km_alloc(kernel_map, max_len,
                 0, UVM_KMF_VAONLY);
    for (va = (vaddr_t)ci->ci_gdt; va < (vaddr_t)ci->ci_gdt + min_len;
            va += PAGE_SIZE) {
        while ((pg = uvm_pagealloc(NULL, 0, NULL, UVM_PGA_ZERO))
                == NULL) {
            uvm_wait("gdt_alloc_cpu");
        }
        pmap_kenter_pa(va, VM_PAGE_TO_PHYS(pg),
                       VM_PROT_READ | VM_PROT_WRITE);
    }
    pmap_update(pmap_kernel());
    memset(ci->ci_gdt, 0, min_len);
    memcpy(ci->ci_gdt, gdt, gdt_count[0] * sizeof(gdt[0]));
    setsegment(&ci->ci_gdt[GCPU_SEL].sd, ci, 0xfffff,
               SDT_MEMRWA, SEL_KPL, 1, 1);
}
Esempio n. 17
0
/*
 * Map a user I/O request into kernel virtual address space.
 * Note: the pages are already locked by uvm_vslock(), so we
 * do not need to pass an access_type to pmap_enter().   
 */
void
vmapbuf(struct buf *bp, vsize_t len)
{
	vaddr_t faddr, taddr, off;
	paddr_t fpa;

	if ((bp->b_flags & B_PHYS) == 0)
		panic("vmapbuf");
	faddr = trunc_page((vaddr_t)(bp->b_saveaddr = bp->b_data));
	off = (vaddr_t)bp->b_data - faddr;
	len = round_page(off + len);
	taddr= uvm_km_valloc_wait(phys_map, len);
	bp->b_data = (caddr_t)(taddr + off);
	/*
	 * The region is locked, so we expect that pmap_pte() will return
	 * non-NULL.
	 * XXX: unwise to expect this in a multithreaded environment.
	 * anything can happen to a pmap between the time we lock a 
	 * region, release the pmap lock, and then relock it for
	 * the pmap_extract().
	 *
	 * no need to flush TLB since we expect nothing to be mapped
	 * where we we just allocated (TLB will be flushed when our
	 * mapping is removed).
	 */
	while (len) {
		(void) pmap_extract(vm_map_pmap(&bp->b_proc->p_vmspace->vm_map),
		    faddr, &fpa);
		pmap_kenter_pa(taddr, fpa, PROT_READ | PROT_WRITE);
		faddr += PAGE_SIZE;
		taddr += PAGE_SIZE;
		len -= PAGE_SIZE;
	}
}
Esempio n. 18
0
/*
 * Initialize the GDT subsystem.  Called from autoconf().
 */
void
gdt_init(void)
{
	struct vm_page *pg;
	vaddr_t va;
	struct cpu_info *ci = &cpu_info_primary;

	gdt_next = NGDT;
	gdt_free = GNULL_SEL;

	gdt = (union descriptor *)uvm_km_valloc(kernel_map, MAXGDTSIZ);
	for (va = (vaddr_t)gdt; va < (vaddr_t)gdt + MAXGDTSIZ;
	    va += PAGE_SIZE) {
		pg = uvm_pagealloc(NULL, 0, NULL, UVM_PGA_ZERO);
		if (pg == NULL)
			panic("gdt_init: no pages");
		pmap_kenter_pa(va, VM_PAGE_TO_PHYS(pg),
		    PROT_READ | PROT_WRITE);
	}
	bcopy(bootstrap_gdt, gdt, NGDT * sizeof(union descriptor));
	ci->ci_gdt = gdt;
	setsegment(&ci->ci_gdt[GCPU_SEL].sd, ci, sizeof(struct cpu_info)-1,
	    SDT_MEMRWA, SEL_KPL, 0, 0);

	gdt_init_cpu(ci);
}
Esempio n. 19
0
int
obio_bs_map(void *t, bus_addr_t bpa, bus_size_t size, int flags,
    bus_space_handle_t *bshp)
{
	const struct pmap_devmap *pd;
	paddr_t startpa, endpa, pa, offset;
	vaddr_t va;
	pt_entry_t *pte;

	if ((pd = pmap_devmap_find_pa(bpa, size)) != NULL) {
		/* Device was statically mapped. */
		*bshp = pd->pd_va + (bpa - pd->pd_pa);
		return (0);
	}

	endpa = round_page(bpa + size);
	offset = bpa & PAGE_MASK;
	startpa = trunc_page(bpa);
		
	va = uvm_km_valloc(kernel_map, endpa - startpa);
	if (va == 0)
		return ENOMEM;

	*bshp = va + offset;

	for (pa = startpa; pa < endpa; pa += PAGE_SIZE, va += PAGE_SIZE) {
		pmap_kenter_pa(va, pa, VM_PROT_READ | VM_PROT_WRITE);
		pte = vtopte(va);
		*pte &= ~L2_S_CACHE_MASK;
		PTE_SYNC(pte);
	}
	pmap_update(pmap_kernel());

	return (0);
}
Esempio n. 20
0
/*
 * dev_kmem_readwrite: helper for DEV_MEM (/dev/mem) case of R/W.
 */
static int
dev_mem_readwrite(struct uio *uio, struct iovec *iov)
{
	paddr_t paddr;
	vaddr_t vaddr;
	vm_prot_t prot;
	size_t len, offset;
	bool have_direct;
	int error;

	/* Check for wrap around. */
	if ((intptr_t)uio->uio_offset != uio->uio_offset) {
		return EFAULT;
	}
	paddr = uio->uio_offset & ~PAGE_MASK;
	prot = (uio->uio_rw == UIO_WRITE) ? VM_PROT_WRITE : VM_PROT_READ;
	error = mm_md_physacc(paddr, prot);
	if (error) {
		return error;
	}
	offset = uio->uio_offset & PAGE_MASK;
	len = MIN(uio->uio_resid, PAGE_SIZE - offset);

#ifdef __HAVE_MM_MD_DIRECT_MAPPED_PHYS
	/* Is physical address directly mapped?  Return VA. */
	have_direct = mm_md_direct_mapped_phys(paddr, &vaddr);
#else
	vaddr = 0;
	have_direct = false;
#endif
	if (!have_direct) {
		/* Get a special virtual address. */
		const vaddr_t va = dev_mem_getva(paddr);

		/* Map selected KVA to physical address. */
		mutex_enter(&dev_mem_lock);
		pmap_kenter_pa(va, paddr, prot, 0);
		pmap_update(pmap_kernel());

		/* Perform I/O. */
		vaddr = va + offset;
		error = uiomove((void *)vaddr, len, uio);

		/* Unmap, flush before unlock. */
		pmap_kremove(va, PAGE_SIZE);
		pmap_update(pmap_kernel());
		mutex_exit(&dev_mem_lock);

		/* "Release" the virtual address. */
		dev_mem_relva(paddr, va);
	} else {
		/* Direct map, just perform I/O. */
		vaddr += offset;
		error = uiomove((void *)vaddr, len, uio);
	}
	return error;
}
Esempio n. 21
0
int
au_himem_map(void *cookie, bus_addr_t addr, bus_size_t size,
    int flags, bus_space_handle_t *bshp, int acct)
{
	au_himem_cookie_t	*c = (au_himem_cookie_t *)cookie;
	int			err;
	paddr_t			pa;
	vaddr_t			va;
	vsize_t			realsz;
	int			s;

	/* make sure we can map this bus address */
	if (addr < c->c_start || (addr + size) > c->c_end) {
		return EINVAL;
	}

	/* physical address, page aligned */
	pa = TRUNC_PAGE(c->c_physoff + addr);

	/*
	 * we are only going to work with whole pages.  the
	 * calculation is the offset into the first page, plus the
	 * intended size, rounded up to a whole number of pages.
	 */
	realsz = ROUND_PAGE((addr % PAGE_SIZE) + size);

	va = uvm_km_alloc(kernel_map,
	    realsz, PAGE_SIZE, UVM_KMF_VAONLY | UVM_KMF_NOWAIT);
	if (va == 0) {
		return ENOMEM;
	}

	/* virtual address in handle (offset appropriately) */
	*bshp = va + (addr % PAGE_SIZE);

	/* map the pages in the kernel pmap */
	s = splhigh();
	while (realsz) {
		pmap_kenter_pa(va, pa, VM_PROT_READ | VM_PROT_WRITE);
		pa += PAGE_SIZE;
		va += PAGE_SIZE;
		realsz -= PAGE_SIZE;
	}
	pmap_update(pmap_kernel());
	splx(s);

	/* record our allocated range of bus addresses */
	if (acct && c->c_extent != NULL) {
		err = extent_alloc_region(c->c_extent, addr, size, EX_NOWAIT);
		if (err) {
			au_himem_unmap(cookie, *bshp, size, 0);
			return err;
		}
	}

	return 0;
}
Esempio n. 22
0
/*
 * Create writeable aliases of memory we need
 * to write to as kernel is mapped read-only
 */
void *codepatch_maprw(vaddr_t *nva, vaddr_t dest)
{
	paddr_t kva = trunc_page((paddr_t)dest);
	paddr_t po = (paddr_t)dest & PAGE_MASK;
	paddr_t pa1, pa2;

	if (*nva == 0)
		*nva = (vaddr_t)km_alloc(2 * PAGE_SIZE, &kv_any, &kp_none,
					&kd_waitok);

	pmap_extract(pmap_kernel(), kva, &pa1);
	pmap_extract(pmap_kernel(), kva + PAGE_SIZE, &pa2);
	pmap_kenter_pa(*nva, pa1, PROT_READ | PROT_WRITE);
	pmap_kenter_pa(*nva + PAGE_SIZE, pa2, PROT_READ | PROT_WRITE);
	pmap_update(pmap_kernel());

	return (void *)(*nva + po);
}
Esempio n. 23
0
/*
 * Make a kernel mapping valid for I/O, e.g. non-cachable.
 * Alignment and length constraints are as-if NBPG==PAGE_SIZE.
 */
int
ioaccess(vaddr_t vaddr, paddr_t paddr, vsize_t len)
{

	while (len > PAGE_SIZE) {
		pmap_kenter_pa(vaddr, paddr, VM_PROT_WRITE, 0);
		len -= PAGE_SIZE;
		vaddr += PAGE_SIZE;
		paddr += PAGE_SIZE;
	}

	if (len) {
		/* We could warn.. */
		pmap_kenter_pa(vaddr, paddr, VM_PROT_WRITE, 0);
	}

	/* BUGBUG should use pmap_enter() instead and check results! */
	return 0;
}
Esempio n. 24
0
int
obio_iomem_add_mapping(bus_addr_t bpa, bus_size_t size, int type,
    bus_space_handle_t *bshp)
{
	u_long pa, endpa;
	vaddr_t va;
	pt_entry_t *pte;
	unsigned int m = 0;
	int io_type = type & ~OBIO_IOMEM_PCMCIA_8BIT;

	pa = trunc_page(bpa);
	endpa = round_page(bpa + size);

#ifdef DIAGNOSTIC
	if (endpa <= pa)
		panic("obio_iomem_add_mapping: overflow");
#endif

	va = uvm_km_valloc(kernel_map, endpa - pa);
	if (va == 0)
		return (ENOMEM);

	*bshp = (bus_space_handle_t)(va + (bpa & PGOFSET));

#define MODE(t, s)							\
	((t) & OBIO_IOMEM_PCMCIA_8BIT) ?				\
		_PG_PCMCIA_ ## s ## 8 :					\
		_PG_PCMCIA_ ## s ## 16
	switch (io_type) {
	default:
		panic("unknown pcmcia space.");
		/* NOTREACHED */
	case OBIO_IOMEM_PCMCIA_IO:
		m = MODE(type, IO);
		break;
	case OBIO_IOMEM_PCMCIA_MEM:
		m = MODE(type, MEM);
		break;
	case OBIO_IOMEM_PCMCIA_ATT:
		m = MODE(type, ATTR);
		break;
	}
#undef MODE

	for (; pa < endpa; pa += PAGE_SIZE, va += PAGE_SIZE) {
		pmap_kenter_pa(va, pa, PROT_READ | PROT_WRITE);
		pte = __pmap_kpte_lookup(va);
		KDASSERT(pte);
		*pte |= m;  /* PTEA PCMCIA assistant bit */
		sh_tlb_update(0, va, *pte);
	}

	return (0);
}
int
footbridge_mem_bs_map(void *t, bus_addr_t bpa, bus_size_t size, int flags,
    bus_space_handle_t *bshp)
{
	bus_addr_t startpa, endpa, pa;
	vaddr_t va;

	/* Round the allocation to page boundries */
	startpa = trunc_page(bpa);
	endpa = round_page(bpa + size);

	/*
	 * Check for mappings below 1MB as we have this space already
	 * mapped. In practice it is only the VGA hole that takes
	 * advantage of this.
	 */
	if (endpa < DC21285_PCI_ISA_MEM_VSIZE) {
		/* Store the bus space handle */
		*bshp = DC21285_PCI_ISA_MEM_VBASE + bpa;
		return 0;
	}

	/*
	 * Eventually this function will do the mapping check for overlapping / 
	 * multiple mappings
	 */

	va = uvm_km_alloc(kernel_map, endpa - startpa, 0,
	    UVM_KMF_VAONLY | UVM_KMF_NOWAIT);
	if (va == 0)
		return ENOMEM;

	/* Store the bus space handle */
	*bshp = va + (bpa & PGOFSET);

	/* Now map the pages */
	/* The cookie is the physical base address for the I/O area */
	const int pmapflags =
	    (flags & (BUS_SPACE_MAP_CACHEABLE|BUS_SPACE_MAP_PREFETCHABLE))
		? 0
		: PMAP_NOCACHE;

	for (pa = startpa; pa < endpa; pa += PAGE_SIZE, va += PAGE_SIZE) {
		pmap_kenter_pa(va, (bus_addr_t)t + pa,
		    VM_PROT_READ | VM_PROT_WRITE, pmapflags);
	}
	pmap_update(pmap_kernel());

/*	if (bpa >= DC21285_PCI_MEM_VSIZE && bpa != DC21285_ARMCSR_VBASE)
		panic("footbridge_bs_map: Address out of range (%08lx)", bpa);
*/
	return(0);
}
Esempio n. 26
0
void
buf_map(struct buf *bp)
{
	vaddr_t va;

	splassert(IPL_BIO);

	if (bp->b_data == NULL) {
		unsigned long i;

		/*
		 * First, just use the pre-allocated space until we run out.
		 */
		if (buf_kva_start < buf_kva_end) {
			va = buf_kva_start;
			buf_kva_start += MAXPHYS;
			bcstats.kvaslots_avail--;
		} else {
			struct buf *vbp;

			/*
			 * Find some buffer we can steal the space from.
			 */
			while ((vbp = TAILQ_FIRST(&buf_valist)) == NULL) {
				buf_needva++;
				buf_nkvmsleep++;
				tsleep(&buf_needva, PRIBIO, "buf_needva", 0);
			}
			va = buf_unmap(vbp);
		}

		mtx_enter(&bp->b_pobj->vmobjlock);
		for (i = 0; i < atop(bp->b_bufsize); i++) {
			struct vm_page *pg = uvm_pagelookup(bp->b_pobj,
			    bp->b_poffs + ptoa(i));

			KASSERT(pg != NULL);

			pmap_kenter_pa(va + ptoa(i), VM_PAGE_TO_PHYS(pg),
			    VM_PROT_READ|VM_PROT_WRITE);
		}
		mtx_leave(&bp->b_pobj->vmobjlock);
		pmap_update(pmap_kernel());
		bp->b_data = (caddr_t)va;
	} else {
		TAILQ_REMOVE(&buf_valist, bp, b_valist);
		bcstats.kvaslots_avail--;
	}

	bcstats.busymapped++;

	CLR(bp->b_flags, B_NOTMAPPED);
}
void
uvm_emap_enter(vaddr_t va, struct vm_page **pgs, u_int npages)
{
	paddr_t pa;
	u_int n;

	for (n = 0; n < npages; n++, va += PAGE_SIZE) {
		pa = VM_PAGE_TO_PHYS(pgs[n]);
		pmap_kenter_pa(va, pa, VM_PROT_READ, 0);
	}
	pmap_update(pmap_kernel());
}
Esempio n. 28
0
int
mp_cpu_start(struct cpu_info *ci)
{
	unsigned short dwordptr[2];

	/*
	 * "The BSP must initialize CMOS shutdown code to 0Ah ..."
	 */

	outb(IO_RTC, NVRAM_RESET);
	outb(IO_RTC+1, NVRAM_RESET_JUMP);

	/*
	 * "and the warm reset vector (DWORD based at 40:67) to point
	 * to the AP startup code ..."
	 */

	dwordptr[0] = 0;
	dwordptr[1] = MP_TRAMPOLINE >> 4;

	pmap_activate(curproc);

	pmap_kenter_pa(0, 0, PROT_READ | PROT_WRITE);
	memcpy((u_int8_t *)0x467, dwordptr, 4);
	pmap_kremove(0, PAGE_SIZE);

#if NLAPIC > 0
	/*
	 * ... prior to executing the following sequence:"
	 */

	if (ci->ci_flags & CPUF_AP) {
		i386_ipi_init(ci->ci_apicid);

		delay(10000);

		if (cpu_feature & CPUID_APIC) {
			i386_ipi(MP_TRAMPOLINE / PAGE_SIZE, ci->ci_apicid,
			    LAPIC_DLMODE_STARTUP);
			delay(200);

			i386_ipi(MP_TRAMPOLINE / PAGE_SIZE, ci->ci_apicid,
			    LAPIC_DLMODE_STARTUP);
			delay(200);
		}
	}
#endif
	return (0);
}
Esempio n. 29
0
int
_bus_dmamem_map(bus_dma_tag_t t, bus_dma_segment_t *segs, int nsegs,
    size_t size, caddr_t *kvap, int flags)
{
	vaddr_t va;
	bus_addr_t addr;
	int curseg;
	const struct kmem_dyn_mode *kd;

	DPRINTF(("bus_dmamem_map: t = %p, segs = %p, nsegs = %d, size = %d, kvap = %p, flags = %x\n", t, segs, nsegs, size, kvap, flags));

        /*
	 * If we're only mapping 1 segment, use P2SEG, to avoid
	 * TLB thrashing.
	 */
	if (nsegs == 1) {
		if (flags & BUS_DMA_COHERENT) {
			*kvap = (caddr_t)SH3_PHYS_TO_P2SEG(segs[0].ds_addr);
		} else {
			*kvap = (caddr_t)SH3_PHYS_TO_P1SEG(segs[0].ds_addr);
		}
		DPRINTF(("bus_dmamem_map: addr = 0x%08lx, kva = %p\n", segs[0].ds_addr, *kvap));
		return 0;
	}

	/* Always round the size. */
	size = round_page(size);
	kd = flags & BUS_DMA_NOWAIT ? &kd_trylock : &kd_waitok;
	va = (vaddr_t)km_alloc(size, &kv_any, &kp_none, kd);
	if (va == 0)
		return (ENOMEM);

	*kvap = (caddr_t)va;
	for (curseg = 0; curseg < nsegs; curseg++) {
		DPRINTF(("bus_dmamem_map: segs[%d]: ds_addr = 0x%08lx, ds_len = %ld\n", curseg, segs[curseg].ds_addr, segs[curseg].ds_len));
		for (addr = segs[curseg].ds_addr;
		     addr < segs[curseg].ds_addr + segs[curseg].ds_len;
		     addr += PAGE_SIZE, va += PAGE_SIZE, size -= PAGE_SIZE) {
			if (size == 0)
				panic("_bus_dmamem_map: size botch");
			pmap_kenter_pa(va, addr,
			    PROT_READ | PROT_WRITE);
		}
	}
	pmap_update(pmap_kernel());

	return (0);
}
Esempio n. 30
0
int
mbus_add_mapping(bus_addr_t bpa, bus_size_t size, int flags,
    bus_space_handle_t *bshp)
{
	paddr_t spa, epa;
	int bank, off;

	if ((bank = vm_physseg_find(atop(bpa), &off)) >= 0)
		panic("mbus_add_mapping: mapping real memory @0x%lx", bpa);

	for (spa = trunc_page(bpa), epa = bpa + size;
	     spa < epa; spa += PAGE_SIZE)
		pmap_kenter_pa(spa, spa, UVM_PROT_RW);

	*bshp = bpa;
	return (0);
}