void uma_small_free(void *mem, int size, u_int8_t flags) { vm_page_t m; m = PHYS_TO_VM_PAGE(IA64_RR_MASK((u_int64_t)mem)); m->wire_count--; vm_page_free(m); atomic_subtract_int(&cnt.v_wire_count, 1); }
static u_long acpi_get_root_from_efi(void) { static struct uuid acpi_root_uuid = EFI_TABLE_ACPI20; void *acpi_root; acpi_root = efi_get_table(&acpi_root_uuid); if (acpi_root != NULL) return (IA64_RR_MASK((uintptr_t)acpi_root)); return (0); }
static void * va2pa(vm_offset_t va, size_t *len) { uint64_t pa; if (va >= IA64_RR_BASE(7)) { pa = IA64_RR_MASK(va); return ((void *)pa); } printf("\n%s: va=%lx, *len=%lx\n", __func__, va, *len); *len = 0; return (NULL); }
/* ARGSUSED */ int memrw(struct cdev *dev, struct uio *uio, int flags) { struct iovec *iov; off_t ofs; vm_offset_t addr; void *ptr; u_long limit; int count, error, phys, rw; error = 0; rw = (uio->uio_rw == UIO_READ) ? VM_PROT_READ : VM_PROT_WRITE; while (uio->uio_resid > 0 && !error) { iov = uio->uio_iov; if (iov->iov_len == 0) { uio->uio_iov++; uio->uio_iovcnt--; if (uio->uio_iovcnt < 0) panic("memrw"); continue; } ofs = uio->uio_offset; phys = (dev2unit(dev) == CDEV_MINOR_MEM) ? 1 : 0; if (phys == 0 && ofs >= IA64_RR_BASE(6)) { ofs = IA64_RR_MASK(ofs); phys++; } if (phys) { error = mem_phys2virt(ofs, rw, &ptr, &limit); if (error) return (error); count = min(uio->uio_resid, limit); error = uiomove(ptr, count, uio); } else { ptr = (void *)ofs; count = iov->iov_len; /* * Make sure that all of the pages are currently * resident so that we don't create any zero-fill * pages. */ limit = round_page(ofs + count); addr = trunc_page(ofs); if (addr < VM_MAXUSER_ADDRESS) return (EINVAL); for (; addr < limit; addr += PAGE_SIZE) { if (pmap_kextract(addr) == 0) return (EFAULT); } if (!kernacc(ptr, count, rw)) return (EFAULT); error = uiomove(ptr, count, uio); } /* else panic! */ } return (error); }
/* ARGSUSED */ int memrw(struct cdev *dev, struct uio *uio, int flags) { struct iovec *iov; vm_offset_t addr, eaddr, o, v; int c, error, rw; error = 0; while (uio->uio_resid > 0 && !error) { iov = uio->uio_iov; if (iov->iov_len == 0) { uio->uio_iov++; uio->uio_iovcnt--; if (uio->uio_iovcnt < 0) panic("memrw"); continue; } if (dev2unit(dev) == CDEV_MINOR_MEM) { v = uio->uio_offset; kmemphys: /* Allow reads only in RAM. */ rw = (uio->uio_rw == UIO_READ) ? VM_PROT_READ : VM_PROT_WRITE; if ((ia64_pa_access(v) & rw) != rw) { error = EFAULT; c = 0; break; } o = uio->uio_offset & PAGE_MASK; c = min(uio->uio_resid, (int)(PAGE_SIZE - o)); error = uiomove((caddr_t)IA64_PHYS_TO_RR7(v), c, uio); continue; } else if (dev2unit(dev) == CDEV_MINOR_KMEM) { v = uio->uio_offset; if (v >= IA64_RR_BASE(6)) { v = IA64_RR_MASK(v); goto kmemphys; } c = min(iov->iov_len, MAXPHYS); /* * Make sure that all of the pages are currently * resident so that we don't create any zero-fill * pages. */ addr = trunc_page(v); eaddr = round_page(v + c); for (; addr < eaddr; addr += PAGE_SIZE) { if (pmap_extract(kernel_pmap, addr) == 0) return (EFAULT); } if (!kernacc((caddr_t)v, c, (uio->uio_rw == UIO_READ) ? VM_PROT_READ : VM_PROT_WRITE)) return (EFAULT); error = uiomove((caddr_t)v, c, uio); continue; } /* else panic! */ } return (error); }