static void xen_hvm_init(enum xen_hvm_init_type init_type) { int error; int i; if (init_type == XEN_HVM_INIT_CANCELLED_SUSPEND) return; error = xen_hvm_init_hypercall_stubs(init_type); switch (init_type) { case XEN_HVM_INIT_LATE: if (error != 0) return; /* * If xen_domain_type is not set at this point * it means we are inside a (PV)HVM guest, because * for PVH the guest type is set much earlier * (see hammer_time_xen). */ if (!xen_domain()) { xen_domain_type = XEN_HVM_DOMAIN; vm_guest = VM_GUEST_XEN; } setup_xen_features(); #ifdef SMP cpu_ops = xen_hvm_cpu_ops; #endif break; case XEN_HVM_INIT_RESUME: if (error != 0) panic("Unable to init Xen hypercall stubs on resume"); /* Clear stale vcpu_info. */ CPU_FOREACH(i) DPCPU_ID_SET(i, vcpu_info, NULL); break; default: panic("Unsupported HVM initialization type"); } xen_vector_callback_enabled = 0; xen_evtchn_needs_ack = false; xen_hvm_set_callback(NULL); /* * On (PV)HVM domains we need to request the hypervisor to * fill the shared info page, for PVH guest the shared_info page * is passed inside the start_info struct and is already set, so this * functions are no-ops. */ xen_hvm_init_shared_info_page(); xen_hvm_disable_emulated_devices(); }
static void pefs_aesni_leave(struct pefs_session *xses) { struct pefs_aesni_ses *ses = &xses->o.ps_aesni; if (ses->fpu_saved <= 0) return; fpu_kern_leave(ses->td, ses->fpu_ctx); DPCPU_ID_SET(ses->fpu_cpuid, pefs_aesni_fpu, ses->fpu_ctx); }
void pefs_aesni_init(struct pefs_alg *pa) { struct fpu_kern_ctx *fpu_ctx; u_long enable = 1; u_int cpuid; TUNABLE_ULONG_FETCH(AESNI_ENABLE_ENV, &enable); if (enable != 0 && (cpu_feature2 & CPUID2_AESNI) != 0) { printf("pefs: AESNI hardware acceleration enabled\n"); pa->pa_uninit = pefs_aesni_uninit; pa->pa_enter = pefs_aesni_enter; pa->pa_leave = pefs_aesni_leave; pa->pa_keysetup = pefs_aesni_keysetup; pa->pa_encrypt = pefs_aesni_encrypt; pa->pa_decrypt = pefs_aesni_decrypt; CPU_FOREACH(cpuid) { fpu_ctx = fpu_kern_alloc_ctx(FPU_KERN_NORMAL); DPCPU_ID_SET(cpuid, pefs_aesni_fpu, fpu_ctx); } } else