/*
 * Quiesce CPUs in a multiprocessor machine before resuming. We need to do
 * this since the APs will be hatched (but waiting for CPUF_GO), and we don't
 * want the APs to be executing code and causing side effects during the
 * unpack operation.
 */
void
hibernate_quiesce_cpus(void)
{
	struct cpu_info *ci;
	u_long i;

	KASSERT(CPU_IS_PRIMARY(curcpu()));

	pmap_kenter_pa(ACPI_TRAMPOLINE, ACPI_TRAMPOLINE, PROT_READ | PROT_EXEC);
	pmap_kenter_pa(ACPI_TRAMP_DATA, ACPI_TRAMP_DATA,
		PROT_READ | PROT_WRITE);

	for (i = 0; i < MAXCPUS; i++) {
		ci = cpu_info[i];
		if (ci == NULL)
			continue;
		if (ci->ci_idle_pcb == NULL)
			continue;
		if ((ci->ci_flags & CPUF_PRESENT) == 0)
			continue;
		if (ci->ci_flags & (CPUF_BSP | CPUF_SP | CPUF_PRIMARY))
			continue;
		atomic_setbits_int(&ci->ci_flags, CPUF_GO | CPUF_PARK);
	}

	/* Wait a bit for the APs to park themselves */
	delay(500000);

	pmap_kremove(ACPI_TRAMPOLINE, PAGE_SIZE);
	pmap_kremove(ACPI_TRAMP_DATA, PAGE_SIZE);
}
Example #2
0
/*
 * This function is expected to be called in a critical section since it
 * changes the per-cpu pci config space va-to-pa mappings.
 */
static vm_offset_t
zbpci_config_space_va(int bus, int slot, int func, int reg, int bytes)
{
	int cpu;
	vm_offset_t va_page;
	vm_paddr_t pa, pa_page;

	if (bus <= PCI_BUSMAX && slot <= PCI_SLOTMAX && func <= PCI_FUNCMAX &&
	    reg <= PCI_REGMAX && (bytes == 1 || bytes == 2 || bytes == 4) &&
	    ((reg & (bytes - 1)) == 0)) {
		cpu = PCPU_GET(cpuid);
		va_page = zbpci_config_space[cpu].vaddr;
		pa = CFG_PADDR_BASE |
		     (bus << 16) | (slot << 11) | (func << 8) | reg;
#if _BYTE_ORDER == _BIG_ENDIAN
		pa = pa ^ (4 - bytes);
#endif
		pa_page = rounddown2(pa, PAGE_SIZE);
		if (zbpci_config_space[cpu].paddr != pa_page) {
			pmap_kremove(va_page);
			pmap_kenter_attr(va_page, pa_page, PTE_C_UNCACHED);
			zbpci_config_space[cpu].paddr = pa_page;
		}
		return (va_page + (pa - pa_page));
	} else {
		return (0);
	}
}
Example #3
0
void
mpbios_unmap(struct mp_map *handle)
{
    pmap_kremove(handle->baseva, handle->vsize);
    pmap_update(pmap_kernel());
    km_free((void *)handle->baseva, handle->vsize, &kv_any, &kp_none);
}
Example #4
0
void
obio_iomem_unmap(void *v, bus_space_handle_t bsh, bus_size_t size)
{
	u_long va, endva;
	bus_addr_t bpa;

	if (bsh >= SH3_P2SEG_BASE && bsh <= SH3_P2SEG_END) {
		/* maybe CS0,1,2,3,4,7 */
		return;
	}

	/* CS5,6 */
	va = trunc_page(bsh);
	endva = round_page(bsh + size);

#ifdef DIAGNOSTIC
	if (endva <= va)
		panic("obio_io_unmap: overflow");
#endif

	pmap_extract(pmap_kernel(), va, &bpa);
	bpa += bsh & PGOFSET;

	pmap_kremove(va, endva - va);

	/*
	 * Free the kernel virtual mapping.
	 */
	uvm_km_free(kernel_map, va, endva - va);
}
Example #5
0
void codepatch_unmaprw(vaddr_t nva)
{
	if (nva == 0)
		return;
	pmap_kremove(nva, 2 * PAGE_SIZE);
	km_free((void *)nva, 2 * PAGE_SIZE, &kv_any, &kp_none);
}
inline static void
mpbios_unmap(struct mp_map *handle)
{
	pmap_kremove(handle->baseva, handle->vsize);
	pmap_update(pmap_kernel());
	uvm_km_free(kernel_map, handle->baseva, handle->vsize, UVM_KMF_VAONLY);
}
/*
 * Opposite to the above: just forget the mapping.
 */
int
iounaccess(vaddr_t vaddr, vsize_t len)
{

	pmap_kremove(vaddr, len);
	return 0;
}
void
uvm_emap_remove(vaddr_t sva, vsize_t len)
{

	pmap_kremove(sva, len);
	pmap_update(pmap_kernel());
}
Example #9
0
/*
 * dev_kmem_readwrite: helper for DEV_MEM (/dev/mem) case of R/W.
 */
static int
dev_mem_readwrite(struct uio *uio, struct iovec *iov)
{
	paddr_t paddr;
	vaddr_t vaddr;
	vm_prot_t prot;
	size_t len, offset;
	bool have_direct;
	int error;

	/* Check for wrap around. */
	if ((intptr_t)uio->uio_offset != uio->uio_offset) {
		return EFAULT;
	}
	paddr = uio->uio_offset & ~PAGE_MASK;
	prot = (uio->uio_rw == UIO_WRITE) ? VM_PROT_WRITE : VM_PROT_READ;
	error = mm_md_physacc(paddr, prot);
	if (error) {
		return error;
	}
	offset = uio->uio_offset & PAGE_MASK;
	len = MIN(uio->uio_resid, PAGE_SIZE - offset);

#ifdef __HAVE_MM_MD_DIRECT_MAPPED_PHYS
	/* Is physical address directly mapped?  Return VA. */
	have_direct = mm_md_direct_mapped_phys(paddr, &vaddr);
#else
	vaddr = 0;
	have_direct = false;
#endif
	if (!have_direct) {
		/* Get a special virtual address. */
		const vaddr_t va = dev_mem_getva(paddr);

		/* Map selected KVA to physical address. */
		mutex_enter(&dev_mem_lock);
		pmap_kenter_pa(va, paddr, prot, 0);
		pmap_update(pmap_kernel());

		/* Perform I/O. */
		vaddr = va + offset;
		error = uiomove((void *)vaddr, len, uio);

		/* Unmap, flush before unlock. */
		pmap_kremove(va, PAGE_SIZE);
		pmap_update(pmap_kernel());
		mutex_exit(&dev_mem_lock);

		/* "Release" the virtual address. */
		dev_mem_relva(paddr, va);
	} else {
		/* Direct map, just perform I/O. */
		vaddr += offset;
		error = uiomove((void *)vaddr, len, uio);
	}
	return error;
}
Example #10
0
int
gnttab_suspend(void)
{
    int i;

    for (i = 0; i < nr_grant_frames; i++)
        pmap_kremove((vm_offset_t) shared + i * PAGE_SIZE);

    return (0);
}
Example #11
0
static void
dpt_unphysmap(u_int8_t * vaddr, vm_size_t size)
{
	int             ndx;

	for (ndx = 0; ndx < size; ndx += PAGE_SIZE) {
		pmap_kremove((vm_offset_t) vaddr + ndx);
	}

	kmem_free(kernel_map, (vm_offset_t) vaddr, size);
}
Example #12
0
/*
 * Undo vmaprange.
 */
void
vunmaprange(vaddr_t kaddr, vsize_t len)
{
	vaddr_t addr;
	vsize_t off;

	addr = trunc_page(kaddr);
	off = kaddr - addr;
	len = round_page(off + len);
	pmap_kremove(addr, len);
	uvm_km_free(phys_map, addr, len, UVM_KMF_VAONLY);
}
Example #13
0
void
i80321_mem_bs_unmap(void *t, bus_space_handle_t bsh, bus_size_t size)
{
	vaddr_t va, endva;

	va = trunc_page((vaddr_t)bsh);
	endva = round_page(va + size);

	pmap_kremove(va, endva - va);
	pmap_update(pmap_kernel());
	km_free((void *)va, endva - va, &kv_any, &kp_none);
}
Example #14
0
int
mp_cpu_start(struct cpu_info *ci)
{
	unsigned short dwordptr[2];

	/*
	 * "The BSP must initialize CMOS shutdown code to 0Ah ..."
	 */

	outb(IO_RTC, NVRAM_RESET);
	outb(IO_RTC+1, NVRAM_RESET_JUMP);

	/*
	 * "and the warm reset vector (DWORD based at 40:67) to point
	 * to the AP startup code ..."
	 */

	dwordptr[0] = 0;
	dwordptr[1] = MP_TRAMPOLINE >> 4;

	pmap_activate(curproc);

	pmap_kenter_pa(0, 0, PROT_READ | PROT_WRITE);
	memcpy((u_int8_t *)0x467, dwordptr, 4);
	pmap_kremove(0, PAGE_SIZE);

#if NLAPIC > 0
	/*
	 * ... prior to executing the following sequence:"
	 */

	if (ci->ci_flags & CPUF_AP) {
		i386_ipi_init(ci->ci_apicid);

		delay(10000);

		if (cpu_feature & CPUID_APIC) {
			i386_ipi(MP_TRAMPOLINE / PAGE_SIZE, ci->ci_apicid,
			    LAPIC_DLMODE_STARTUP);
			delay(200);

			i386_ipi(MP_TRAMPOLINE / PAGE_SIZE, ci->ci_apicid,
			    LAPIC_DLMODE_STARTUP);
			delay(200);
		}
	}
#endif
	return (0);
}
Example #15
0
/*
 * Only used by bread_cluster. 
 */
void
buf_fix_mapping(struct buf *bp, vsize_t newsize)
{
	vaddr_t va = (vaddr_t)bp->b_data;

	if (newsize < bp->b_bufsize) {
		pmap_kremove(va + newsize, bp->b_bufsize - newsize);
		pmap_update(pmap_kernel());
		/*
		 * Note: the size we lost is actually with the other
		 * buffers read in by bread_cluster
		 */
		bp->b_bufsize = newsize;
	}
}
Example #16
0
void
arm_unmap_nocache(void *addr, vm_size_t size)
{
	vm_offset_t raddr = (vm_offset_t)addr;
	int i;

	size = round_page(size);
	i = (raddr - arm_nocache_startaddr) / (PAGE_SIZE);
	for (; size > 0; size -= PAGE_SIZE, i++) {
		arm_nocache_allocated[i / BITS_PER_INT] &= ~(1 << (i % 
		    BITS_PER_INT));
		pmap_kremove(raddr);
		raddr += PAGE_SIZE;
	}
}
Example #17
0
void
bus_space_unmap(bus_space_tag_t t, bus_space_handle_t bsh, bus_size_t size)
{
	struct extent *ex;
	u_long va, endva;
	bus_addr_t bpa;

	/*
	 * Find the correct extent and bus physical address.
	 */
	if (t == X86_BUS_SPACE_IO) {
		ex = ioport_ex;
		bpa = bsh;
	} else if (t == X86_BUS_SPACE_MEM) {
		ex = iomem_ex;
		bpa = (bus_addr_t)ISA_PHYSADDR(bsh);
		if (IOM_BEGIN <= bpa && bpa <= IOM_END)
			goto ok;

		va = trunc_page(bsh);
		endva = round_page(bsh + size);

#ifdef DIAGNOSTIC
		if (endva <= va)
			panic("bus_space_unmap: overflow");
#endif

		(void)pmap_extract(pmap_kernel(), va, &bpa);
		bpa += (bsh & PGOFSET);

		pmap_kremove(va, endva - va);
		pmap_update(pmap_kernel());

		/*
		 * Free the kernel virtual mapping.
		 */
		uvm_km_free(kernel_map, va, endva - va);
	} else
		panic("bus_space_unmap: bad bus space tag");

ok:
	if (extent_free(ex, bpa, size,
	    EX_NOWAIT | (ioport_malloc_safe ? EX_MALLOCOK : 0))) {
		printf("bus_space_unmap: %s 0x%lx, size 0x%lx\n",
		    (t == X86_BUS_SPACE_IO) ? "port" : "pa", bpa, size);
		printf("bus_space_unmap: can't free region\n");
	}
}
/*
 * Unmap a previously-mapped user I/O request.
 */
void
vunmapbuf(struct buf *bp, vsize_t len)
{
	vaddr_t addr, off;

	KASSERT((bp->b_flags & B_PHYS) != 0);

	addr = trunc_page((vaddr_t)bp->b_data);
	off = (vaddr_t)bp->b_data - addr;
	len = round_page(off + len);
	pmap_kremove(addr, len);
	pmap_update(pmap_kernel());
	uvm_km_free(phys_map, addr, len, UVM_KMF_VAONLY);
	bp->b_data = bp->b_saveaddr;
	bp->b_saveaddr = 0;
}
Example #19
0
/* Unmap memory previously mapped with table_map(). */
static void
table_unmap(void *data, vm_offset_t length)
{
	vm_offset_t va, off;

	va = (vm_offset_t)data;
	off = va & PAGE_MASK;
	length = roundup(length + off, PAGE_SIZE);
	va &= ~PAGE_MASK;
	while (length > 0) {
		pmap_kremove(va);
		invlpg(va);
		va += PAGE_SIZE;
		length -= PAGE_SIZE;
	}
}
Example #20
0
void
obio_bs_unmap(void *t, bus_space_handle_t bsh, bus_size_t size)
{
	vaddr_t va, endva;

	if (pmap_devmap_find_va(bsh, size) != NULL) {
		/* Device was statically mapped; nothing to do. */
		return;
	}

	endva = round_page(bsh + size);
	va = trunc_page(bsh);

	pmap_kremove(va, endva - va);
	uvm_km_free(kernel_map, va, endva - va, UVM_KMF_VAONLY);
}
Example #21
0
/*
 * Unmap a previously-mapped user I/O request.
 */
void
vunmapbuf(struct buf *bp, vsize_t len)
{
	vaddr_t addr, off;

	if ((bp->b_flags & B_PHYS) == 0)
		panic("vunmapbuf");
	addr = trunc_page((vaddr_t)bp->b_data);
	off = (vaddr_t)bp->b_data - addr;
	len = round_page(off + len);
	pmap_kremove(addr, len);
	pmap_update(pmap_kernel());
	uvm_km_free_wakeup(phys_map, addr, len);
	bp->b_data = bp->b_saveaddr;
	bp->b_saveaddr = 0;
}
Example #22
0
/*
 * Unmap a previously-mapped user I/O request.
 */
void
vunmapbuf(struct buf *bp, vsize_t len)
{
	vaddr_t kva;

	if ((bp->b_flags & B_PHYS) == 0)
		panic("vunmapbuf");

	kva = mips_trunc_page(bp->b_data);
	len = mips_round_page((vaddr_t)bp->b_data - kva + len);
	pmap_kremove(kva, len);
	pmap_update(pmap_kernel());
	uvm_km_free(phys_map, kva, len, UVM_KMF_VAONLY);
	bp->b_data = bp->b_saveaddr;
	bp->b_saveaddr = NULL;
}
Example #23
0
void
vme_unmap(struct vme_softc *sc, struct extent *ext, u_int awidth,
    vaddr_t vaddr, paddr_t paddr, bus_size_t size)
{
	const struct vme_range *r;
	vaddr_t va;
	paddr_t pa, addr;
	psize_t len;

	va = trunc_page(vaddr);
	pa = trunc_page(paddr);
	len = round_page(paddr + size) - pa;

	/*
	 * Retrieve the address range this mapping comes from.
	 */
	for (r = sc->sc_ranges; r->vr_width != 0; r++) {
		if (r->vr_width != awidth)
			continue;
		addr = paddr - r->vr_base;
		if (r->vr_width == awidth &&
		    r->vr_start <= addr && r->vr_end >= addr + size - 1)
			break;
	}
	if (r->vr_width == 0) {
#ifdef DIAGNOSTIC
		printf("%s: nonsensical A%d mapping at va 0x%08lx pa 0x%08lx\n",
		    __func__, AWIDTH(awidth), vaddr, paddr);
#endif
		return;
	}

	/*
	 * Undo the mapping.
	 */
	pmap_kremove(va, len);
	pmap_update(pmap_kernel());
	uvm_km_free(kernel_map, va, len);

	/*
	 * Unregister mapping.
	 */
	if (ext != NULL) {
		pa -= r->vr_base;
		extent_free(ext, atop(pa), atop(len), EX_NOWAIT | EX_MALLOCOK);
	}
}
Example #24
0
/*
 * Unmap a previously-mapped user I/O request.
 */
void
vunmapbuf(struct buf *bp, vsize_t len)
{
    vaddr_t kva;
    vsize_t off;

    if ((bp->b_flags & B_PHYS) == 0)
        panic("vunmapbuf");

    kva = trunc_page((vaddr_t)bp->b_data);
    off = (vaddr_t)bp->b_data - kva;
    len = round_page(off + len);
    pmap_kremove(kva, len);
    uvm_km_free(kernel_map, kva, len, UVM_KMF_VAONLY);
    bp->b_data = bp->b_saveaddr;
    bp->b_saveaddr = NULL;
}
Example #25
0
/*
 * Detatch mapped page and release resources back to the system.
 */
void
sf_buf_free(struct sf_buf *sf)
{

	 mtx_lock(&sf_buf_lock);
	 sf->ref_count--;
	 if (sf->ref_count == 0) {
		 TAILQ_INSERT_TAIL(&sf_buf_freelist, sf, free_entry);
		 nsfbufsused--;
		 pmap_kremove(sf->kva);
		 sf->m = NULL;
		 LIST_REMOVE(sf, list_entry);
		 if (sf_buf_alloc_want > 0)
			 wakeup(&sf_buf_freelist);
	 }
	 mtx_unlock(&sf_buf_lock);
}
Example #26
0
void
i80321_bs_unmap(void *t, bus_space_handle_t bsh, bus_size_t size)
{
	vaddr_t va, endva;

	if (pmap_devmap_find_va(bsh, size) != NULL) {
		/* Device was statically mapped; nothing to do. */
		return;
	}

	va = trunc_page((vaddr_t)bsh);
	endva = round_page(bsh + size);

	pmap_kremove(va, endva - va);
	pmap_update(pmap_kernel());
	km_free((void *)va, endva - va, &kv_any, &kp_none);
}
void
mpcore_bs_unmap(void *t, bus_space_handle_t bsh, bus_size_t size)
{
	vaddr_t	va;
	vsize_t	sz;

	if (pmap_devmap_find_va(bsh, size) != NULL) {
		/* Device was statically mapped; nothing to do. */
		return;
	}

	va = trunc_page(bsh);
	sz = round_page(bsh + size) - va;

	pmap_kremove(va, sz);
	pmap_update(pmap_kernel());
	uvm_km_free(kernel_map, va, sz, UVM_KMF_VAONLY);
}
void
generic_bs_unmap(void *t, bus_space_handle_t h, bus_size_t size)
{
	vm_offset_t va, endva;

	if (pmap_devmap_find_va((vm_offset_t)t, size) != NULL) {
		/* Device was statically mapped; nothing to do. */
		return;
	}

	endva = round_page((vm_offset_t)t + size);
	va = trunc_page((vm_offset_t)t);

	while (va < endva) {
		pmap_kremove(va);
		va += PAGE_SIZE;
	}
	kmem_free(kernel_map, va, endva - va);
}
Example #29
0
/*
 * void _bus_space_unmap(bus_space_tag bst, bus_space_handle bsh,
 *                        bus_size_t size, bus_addr_t *adrp)
 *
 *   This function unmaps memory- or io-space mapped by the function
 *   _bus_space_map().  This function works nearly as same as
 *   bus_space_unmap(), but this function does not ask kernel
 *   built-in extents and returns physical address of the bus space,
 *   for the convenience of the extra extent manager.
 */
void
_bus_space_unmap(bus_space_tag_t t, bus_space_handle_t bsh, bus_size_t size,
    bus_addr_t *adrp)
{
	u_long va, endva;
	bus_addr_t bpa;

	/*
	 * Find the correct bus physical address.
	 */
	if (t == X86_BUS_SPACE_IO) {
		bpa = bsh;
	} else if (t == X86_BUS_SPACE_MEM) {
		bpa = (bus_addr_t)ISA_PHYSADDR(bsh);
		if (IOM_BEGIN <= bpa && bpa <= IOM_END)
			goto ok;

		va = trunc_page(bsh);
		endva = round_page(bsh + size);

#ifdef DIAGNOSTIC
		if (endva <= va)
			panic("_bus_space_unmap: overflow");
#endif

		(void) pmap_extract(pmap_kernel(), va, &bpa);
		bpa += (bsh & PGOFSET);

		pmap_kremove(va, endva - va);
		pmap_update(pmap_kernel());

		/*
		 * Free the kernel virtual mapping.
		 */
		uvm_km_free(kernel_map, va, endva - va);
	} else
		panic("bus_space_unmap: bad bus space tag");

ok:
	if (adrp != NULL)
		*adrp = bpa;
}
Example #30
0
/*
 * Move pages from one kernel virtual address to another.
 * Both addresses are assumed to reside in the Sysmap.
 */
void
pagemove(caddr_t from, caddr_t to, size_t size)
{
	paddr_t pa;
	boolean_t rv;

	KASSERT(((vaddr_t)from & PGOFSET) == 0);
	KASSERT(((vaddr_t)to & PGOFSET) == 0);
	KASSERT((size & PGOFSET) == 0);
	while (size > 0) {
		rv = pmap_extract(pmap_kernel(), (vaddr_t)from, &pa);
		KASSERT(rv);
		KASSERT(!pmap_extract(pmap_kernel(), (vaddr_t)to, NULL));
		pmap_kremove((vaddr_t)from, PAGE_SIZE);
		pmap_kenter_pa((vaddr_t)to, pa,
			       VM_PROT_READ|VM_PROT_WRITE);
		from += PAGE_SIZE;
		to += PAGE_SIZE;
		size -= PAGE_SIZE;
	}
}