void * uma_small_alloc(uma_zone_t zone, int bytes, u_int8_t *flags, int wait) { vm_paddr_t pa; vm_page_t m; int pflags; void *va; *flags = UMA_SLAB_PRIV; if ((wait & (M_NOWAIT|M_USE_RESERVE)) == M_NOWAIT) pflags = VM_ALLOC_INTERRUPT; else pflags = VM_ALLOC_SYSTEM; for (;;) { m = pmap_alloc_direct_page(0, pflags); if (m == NULL) { if (wait & M_NOWAIT) return (NULL); else pmap_grow_direct_page_cache(); } else break; } pa = VM_PAGE_TO_PHYS(m); va = (void *)MIPS_PHYS_TO_DIRECT(pa); if ((wait & M_ZERO) && (m->flags & PG_ZERO) == 0) bzero(va, PAGE_SIZE); return (va); }
void * uma_small_alloc(uma_zone_t zone, vm_size_t bytes, u_int8_t *flags, int wait) { vm_paddr_t pa; vm_page_t m; int pflags; void *va; *flags = UMA_SLAB_PRIV; pflags = malloc2vm_flags(wait) | VM_ALLOC_WIRED; for (;;) { m = vm_page_alloc_freelist(VM_FREELIST_DIRECT, pflags); if (m == NULL) { if (wait & M_NOWAIT) return (NULL); else pmap_grow_direct_page_cache(); } else break; } pa = VM_PAGE_TO_PHYS(m); va = (void *)MIPS_PHYS_TO_DIRECT(pa); if ((wait & M_ZERO) && (m->flags & PG_ZERO) == 0) bzero(va, PAGE_SIZE); return (va); }
void * uma_small_alloc(uma_zone_t zone, vm_size_t bytes, u_int8_t *flags, int wait) { vm_paddr_t pa; vm_page_t m; int pflags; void *va; *flags = UMA_SLAB_PRIV; pflags = malloc2vm_flags(wait) | VM_ALLOC_WIRED; for (;;) { #ifdef MIPS64_NEW_PMAP m = vm_page_alloc(NULL, 0, pflags | VM_ALLOC_NOOBJ); #else /* ! MIPS64_NEW_PMAP */ m = vm_page_alloc_freelist(VM_FREELIST_DIRECT, pflags); #endif /* ! MIPS64_NEW_PMAP */ #ifndef __mips_n64 if (m == NULL && vm_page_reclaim_contig(pflags, 1, 0, MIPS_KSEG0_LARGEST_PHYS, PAGE_SIZE, 0)) continue; #endif if (m == NULL) { if (wait & M_NOWAIT) return (NULL); else VM_WAIT; } else break; } pa = VM_PAGE_TO_PHYS(m); if ((wait & M_NODUMP) == 0) dump_add_page(pa); va = (void *)MIPS_PHYS_TO_DIRECT(pa); if ((wait & M_ZERO) && (m->flags & PG_ZERO) == 0) bzero(va, PAGE_SIZE); return (va); }
/* * Implement uiomove(9) from physical memory using a combination * of the direct mapping and sf_bufs to reduce the creation and * destruction of ephemeral mappings. */ int uiomove_fromphys(vm_page_t ma[], vm_offset_t offset, int n, struct uio *uio) { struct sf_buf *sf; struct thread *td = curthread; struct iovec *iov; void *cp; vm_offset_t page_offset; vm_paddr_t pa; vm_page_t m; size_t cnt; int error = 0; int save = 0; KASSERT(uio->uio_rw == UIO_READ || uio->uio_rw == UIO_WRITE, ("uiomove_fromphys: mode")); KASSERT(uio->uio_segflg != UIO_USERSPACE || uio->uio_td == curthread, ("uiomove_fromphys proc")); save = td->td_pflags & TDP_DEADLKTREAT; td->td_pflags |= TDP_DEADLKTREAT; while (n > 0 && uio->uio_resid) { iov = uio->uio_iov; cnt = iov->iov_len; if (cnt == 0) { uio->uio_iov++; uio->uio_iovcnt--; continue; } if (cnt > n) cnt = n; page_offset = offset & PAGE_MASK; cnt = ulmin(cnt, PAGE_SIZE - page_offset); m = ma[offset >> PAGE_SHIFT]; pa = VM_PAGE_TO_PHYS(m); if (MIPS_DIRECT_MAPPABLE(pa)) { sf = NULL; cp = (char *)MIPS_PHYS_TO_DIRECT(pa) + page_offset; /* * flush all mappings to this page, KSEG0 address first * in order to get it overwritten by correct data */ mips_dcache_wbinv_range((vm_offset_t)cp, cnt); pmap_flush_pvcache(m); } else { sf = sf_buf_alloc(m, 0); cp = (char *)sf_buf_kva(sf) + page_offset; } switch (uio->uio_segflg) { case UIO_USERSPACE: maybe_yield(); if (uio->uio_rw == UIO_READ) error = copyout(cp, iov->iov_base, cnt); else error = copyin(iov->iov_base, cp, cnt); if (error) { if (sf != NULL) sf_buf_free(sf); goto out; } break; case UIO_SYSSPACE: if (uio->uio_rw == UIO_READ) bcopy(cp, iov->iov_base, cnt); else bcopy(iov->iov_base, cp, cnt); break; case UIO_NOCOPY: break; } if (sf != NULL) sf_buf_free(sf); else mips_dcache_wbinv_range((vm_offset_t)cp, cnt); iov->iov_base = (char *)iov->iov_base + cnt; iov->iov_len -= cnt; uio->uio_resid -= cnt; uio->uio_offset += cnt; offset += cnt; n -= cnt; } out: if (save == 0) td->td_pflags &= ~TDP_DEADLKTREAT; return (error); }