int arch_phys_wc_add(unsigned long base, unsigned long size) { #if defined(MTRR) struct mtrr *mtrr; int n = 1; int id; int ret; mtrr = kmem_alloc(sizeof(*mtrr), KM_SLEEP); mtrr->base = base; mtrr->len = size; mtrr->type = MTRR_TYPE_WC; mtrr->flags = MTRR_VALID; /* XXX errno NetBSD->Linux */ ret = -mtrr_set(mtrr, &n, NULL, MTRR_GETSET_KERNEL); if (ret) { KASSERT(n == 0); goto fail0; } KASSERT(n == 1); idr_preload(GFP_KERNEL); mutex_spin_enter(&linux_writecomb.lock); id = idr_alloc(&linux_writecomb.idr, mtrr, 0, 0, GFP_NOWAIT); mutex_spin_exit(&linux_writecomb.lock); idr_preload_end(); if (id < 0) goto fail1; return id; fail1: KASSERT(id < 0); mtrr->type = 0; mtrr->flags = 0; /* XXX errno NetBSD->Linux */ ret = -mtrr_set(mtrr, &n, NULL, MTRR_GETSET_KERNEL); KASSERT(ret == 0); KASSERT(n == 1); ret = id; fail0: KASSERT(ret < 0); kmem_free(mtrr, sizeof(*mtrr)); return ret; #else return -1; #endif }
int amd64_set_mtrr(struct proc *p, void *args, register_t *retval) { int error, n; struct amd64_set_mtrr_args ua; if (mtrr_funcs == NULL) return ENOSYS; error = suser(p, 0); if (error != 0) return error; error = copyin(args, &ua, sizeof ua); if (error != 0) return error; error = copyin(ua.n, &n, sizeof n); if (error != 0) return error; error = mtrr_set(ua.mtrrp, &n, p, MTRR_GETSET_USER); if (n != 0) mtrr_commit(); copyout(&n, ua.n, sizeof n); return error; }
void arch_phys_wc_del(int id) { #if defined(MTRR) struct mtrr *mtrr; int n; int ret __diagused; KASSERT(0 <= id); mutex_spin_enter(&linux_writecomb.lock); mtrr = idr_find(&linux_writecomb.idr, id); idr_remove(&linux_writecomb.idr, id); mutex_spin_enter(&linux_writecomb.lock); if (mtrr != NULL) { mtrr->type = 0; mtrr->flags = 0; /* XXX errno NetBSD->Linux */ ret = -mtrr_set(mtrr, &n, NULL, MTRR_GETSET_KERNEL); KASSERT(ret == 0); KASSERT(n == 1); kmem_free(mtrr, sizeof(*mtrr)); } #endif }
int drm_mtrr_del(int __unused handle, unsigned long offset, size_t size, int flags) { #if defined(MTRR) && defined(MTRR_GETSET_KERNEL) struct mtrr mtrrmap; int one = 1; mtrrmap.base = offset; mtrrmap.len = size; mtrrmap.type = flags; mtrrmap.flags = 0; return mtrr_set(&mtrrmap, &one, NULL, MTRR_GETSET_KERNEL); #else return 0; #endif }
int drm_mtrr_del(int __unused handle, unsigned long offset, size_t size, int flags) { #ifndef DRM_NO_MTRR struct mtrr mtrrmap; int one = 1; DRM_DEBUG("offset=%lx size=%ld\n", (long)offset, (long)size); mtrrmap.base = offset; mtrrmap.len = size; mtrrmap.type = flags; mtrrmap.flags = 0; return mtrr_set(&mtrrmap, &one, NULL, MTRR_GETSET_KERNEL); #else return 0; #endif }
static int x86_64_set_mtrr32(struct lwp *l, void *args, register_t *retval) { struct x86_64_set_mtrr_args32 args32; struct mtrr32 *m32p, m32; struct mtrr *m64p, *mp; int error, i; int32_t n; size_t size; m64p = NULL; if (mtrr_funcs == NULL) return ENOSYS; error = kauth_authorize_machdep(l->l_cred, KAUTH_MACHDEP_MTRR_SET, NULL, NULL, NULL, NULL); if (error) return (error); error = copyin(args, &args32, sizeof args32); if (error != 0) return error; error = copyin((void *)(uintptr_t)args32.n, &n, sizeof n); if (error != 0) return error; if (n <= 0 || n > (MTRR_I686_NFIXED_SOFT + MTRR_I686_NVAR_MAX)) { error = EINVAL; goto fail; } size = n * sizeof(struct mtrr); m64p = kmem_zalloc(size, KM_SLEEP); if (m64p == NULL) { error = ENOMEM; goto fail; } m32p = (struct mtrr32 *)(uintptr_t)args32.mtrrp; mp = m64p; for (i = 0; i < n; i++) { error = copyin(m32p, &m32, sizeof m32); if (error != 0) goto fail; mp->base = m32.base; mp->len = m32.len; mp->type = m32.type; mp->flags = m32.flags; mp->owner = m32.owner; m32p++; mp++; } error = mtrr_set(m64p, &n, l->l_proc, 0); fail: if (m64p != NULL) kmem_free(m64p, size); if (error != 0) n = 0; copyout(&n, (void *)(uintptr_t)args32.n, sizeof n); return error; }