/* * svm_disable() - Called to disable SVM extensions on every processor. */ static int svm_disable(void) { uint64_t efer; int origcpu; int i; /* XXX Wait till no vmms are running? */ KKASSERT(svm_enabled == 1); origcpu = mycpuid; for (i = 0; i < ncpus; i++) { lwkt_migratecpu(i); wrmsr(MSR_AMD_VM_HSAVE_PA, 0); contigfree((void *) vm_hsave_va[i], 4096, M_TEMP); vm_hsave_va[i] = 0; efer = rdmsr(MSR_EFER); efer &= ~EFER_SVME; wrmsr(MSR_EFER, efer); } lwkt_migratecpu(origcpu); svm_enabled = 0; return (0); }
/* * svm_enable() - Called to enable SVM extensions on every processor. */ static int svm_enable(void) { uint64_t efer; int origcpu; int i; vm_paddr_t vm_hsave_pa; if (!svm_available) return (ENODEV); KKASSERT(svm_enabled == 0); /* Set EFER.SVME and allocate a VM Host Save Area on every cpu */ origcpu = mycpuid; for (i = 0; i < ncpus; i++) { lwkt_migratecpu(i); efer = rdmsr(MSR_EFER); efer |= EFER_SVME; wrmsr(MSR_EFER, efer); vm_hsave_va[i] = (vm_offset_t) contigmalloc(4096, M_TEMP, M_WAITOK | M_ZERO, 0, 0xffffffff, 4096, 0); vm_hsave_pa = vtophys(vm_hsave_va[i]); wrmsr(MSR_AMD_VM_HSAVE_PA, vm_hsave_pa); } lwkt_migratecpu(origcpu); svm_enabled = 1; return (0); }
static int udp_getcred(SYSCTL_HANDLER_ARGS) { struct sockaddr_in addrs[2]; struct ucred cred0, *cred = NULL; struct inpcb *inp; int error, cpu, origcpu; error = priv_check(req->td, PRIV_ROOT); if (error) return (error); error = SYSCTL_IN(req, addrs, sizeof addrs); if (error) return (error); origcpu = mycpuid; cpu = udp_addrcpu(addrs[1].sin_addr.s_addr, addrs[1].sin_port, addrs[0].sin_addr.s_addr, addrs[0].sin_port); lwkt_migratecpu(cpu); inp = in_pcblookup_hash(&udbinfo[cpu], addrs[1].sin_addr, addrs[1].sin_port, addrs[0].sin_addr, addrs[0].sin_port, TRUE, NULL); if (inp == NULL || inp->inp_socket == NULL) { error = ENOENT; } else if (inp->inp_socket->so_cred != NULL) { cred0 = *(inp->inp_socket->so_cred); cred = &cred0; } lwkt_migratecpu(origcpu); if (error) return error; return SYSCTL_OUT(req, cred, sizeof(struct ucred)); }