static int handle_test_block(struct kvm_vcpu *vcpu) { unsigned long hva; gpa_t addr; int reg2; if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); kvm_s390_get_regs_rre(vcpu, NULL, ®2); addr = vcpu->run->s.regs.gprs[reg2] & PAGE_MASK; addr = kvm_s390_real_to_abs(vcpu, addr); hva = gfn_to_hva(vcpu->kvm, gpa_to_gfn(addr)); if (kvm_is_error_hva(hva)) return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); /* * We don't expect errors on modern systems, and do not care * about storage keys (yet), so let's just clear the page. */ if (clear_user((void __user *)hva, PAGE_SIZE) != 0) return -EFAULT; kvm_s390_set_psw_cc(vcpu, 0); vcpu->run->s.regs.gprs[0] = 0; return 0; }
void kvm_let_guest_timer_more_accurat(struct kvm_vcpu *vcpu) { gpa_t gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, vcpu->arch.regs[0]); gfn_t gfn = gpa >> 12; u32 hva_tmp = gfn_to_hva(vcpu->kvm, gfn); struct timeval *tv = hva_tmp | (gpa & ~PAGE_MASK); extern struct timezone sys_tz; do_gettimeofday(tv); if (vcpu->arch.regs[1] != 0) { gpa = vcpu->arch.mmu.gva_to_gpa(vcpu, vcpu->arch.regs[1]); gfn = gpa >> 12; hva_tmp = gfn_to_hva(vcpu->kvm, gfn); struct timezone *tz = hva_tmp | (gpa & ~PAGE_MASK); *tz = sys_tz; }
int kvm_setup_async_pf(struct kvm_vcpu *vcpu, gva_t gva, gfn_t gfn, struct kvm_arch_async_pf *arch) { struct kvm_async_pf *work; if (vcpu->async_pf.queued >= ASYNC_PF_PER_VCPU) return 0; /* setup delayed work */ /* * do alloc nowait since if we are going to sleep anyway we * may as well sleep faulting in page */ work = kmem_cache_zalloc(async_pf_cache, GFP_NOWAIT); if (!work) return 0; work->wakeup_all = false; work->vcpu = vcpu; work->gva = gva; work->addr = gfn_to_hva(vcpu->kvm, gfn); work->arch = *arch; work->mm = current->mm; atomic_inc(&work->mm->mm_users); kvm_get_kvm(work->vcpu->kvm); /* this can't really happen otherwise gfn_to_pfn_async would succeed */ if (unlikely(kvm_is_error_hva(work->addr))) goto retry_sync; INIT_WORK(&work->work, async_pf_execute); if (!schedule_work(&work->work)) goto retry_sync; list_add_tail(&work->queue, &vcpu->async_pf.queue); vcpu->async_pf.queued++; kvm_arch_async_page_not_present(vcpu, work); return 1; retry_sync: kvm_put_kvm(work->vcpu->kvm); mmput(work->mm); kmem_cache_free(async_pf_cache, work); return 0; }
static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, struct kvm_memory_slot *memslot, unsigned long fault_status) { int ret; bool write_fault, writable, hugetlb = false, force_pte = false; unsigned long mmu_seq; gfn_t gfn = fault_ipa >> PAGE_SHIFT; unsigned long hva = gfn_to_hva(vcpu->kvm, gfn); struct kvm *kvm = vcpu->kvm; struct kvm_mmu_memory_cache *memcache = &vcpu->arch.mmu_page_cache; struct vm_area_struct *vma; pfn_t pfn; write_fault = kvm_is_write_fault(kvm_vcpu_get_hsr(vcpu)); if (fault_status == FSC_PERM && !write_fault) { kvm_err("Unexpected L2 read permission error\n"); return -EFAULT; } /* Let's check if we will get back a huge page backed by hugetlbfs */ down_read(¤t->mm->mmap_sem); vma = find_vma_intersection(current->mm, hva, hva + 1); if (is_vm_hugetlb_page(vma)) { hugetlb = true; gfn = (fault_ipa & PMD_MASK) >> PAGE_SHIFT; } else { /* * Pages belonging to memslots that don't have the same * alignment for userspace and IPA cannot be mapped using * block descriptors even if the pages belong to a THP for * the process, because the stage-2 block descriptor will * cover more than a single THP and we loose atomicity for * unmapping, updates, and splits of the THP or other pages * in the stage-2 block range. */ if ((memslot->userspace_addr & ~PMD_MASK) !=
static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, gfn_t gfn, struct kvm_memory_slot *memslot, unsigned long fault_status) { pte_t new_pte; pfn_t pfn; int ret; bool write_fault, writable; unsigned long mmu_seq; unsigned long hva = gfn_to_hva(vcpu->kvm, gfn); struct kvm_mmu_memory_cache *memcache = &vcpu->arch.mmu_page_cache; pgprot_t mem_type = PAGE_S2; write_fault = kvm_is_write_fault(vcpu); if (fault_status == FSC_PERM && !write_fault) { kvm_err("Unexpected L2 read permission error\n"); return -EFAULT; } /* We need minimum second+third level pages */ ret = mmu_topup_memory_cache(memcache, 2, KVM_NR_MEM_OBJS); if (ret) return ret; mmu_seq = vcpu->kvm->mmu_notifier_seq; /* * Ensure the read of mmu_notifier_seq happens before we call * gfn_to_pfn_prot (which calls get_user_pages), so that we don't risk * the page we just got a reference to gets unmapped before we have a * chance to grab the mmu_lock, which ensure that if the page gets * unmapped afterwards, the call to kvm_unmap_hva will take it away * from us again properly. This smp_rmb() interacts with the smp_wmb() * in kvm_mmu_notifier_invalidate_<page|range_end>. */ smp_rmb(); pfn = gfn_to_pfn_prot(vcpu->kvm, gfn, write_fault, &writable); if (is_error_pfn(pfn)) return -EFAULT; if (kvm_is_device_pfn(pfn)) mem_type = PAGE_S2_DEVICE; new_pte = pfn_pte(pfn, mem_type); coherent_cache_guest_page(vcpu, hva, PAGE_SIZE); spin_lock(&vcpu->kvm->mmu_lock); if (mmu_notifier_retry(vcpu->kvm, mmu_seq)) goto out_unlock; if (writable) { kvm_set_s2pte_writable(&new_pte); kvm_set_pfn_dirty(pfn); } stage2_set_pte(vcpu->kvm, memcache, fault_ipa, &new_pte, pgprot_val(mem_type) == pgprot_val(PAGE_S2_DEVICE)); out_unlock: spin_unlock(&vcpu->kvm->mmu_lock); kvm_release_pfn_clean(pfn); return 0; }
static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, struct kvm_memory_slot *memslot, unsigned long fault_status) { int ret; bool write_fault, writable, hugetlb = false; unsigned long mmu_seq; gfn_t gfn = fault_ipa >> PAGE_SHIFT; unsigned long hva = gfn_to_hva(vcpu->kvm, gfn); struct kvm *kvm = vcpu->kvm; struct kvm_mmu_memory_cache *memcache = &vcpu->arch.mmu_page_cache; struct vm_area_struct *vma; pfn_t pfn; write_fault = kvm_is_write_fault(kvm_vcpu_get_hsr(vcpu)); if (fault_status == FSC_PERM && !write_fault) { kvm_err("Unexpected L2 read permission error\n"); return -EFAULT; } /* Let's check if we will get back a huge page backed by hugetlbfs */ down_read(¤t->mm->mmap_sem); vma = find_vma_intersection(current->mm, hva, hva + 1); if (is_vm_hugetlb_page(vma)) { hugetlb = true; gfn = (fault_ipa & PMD_MASK) >> PAGE_SHIFT; } up_read(¤t->mm->mmap_sem); /* We need minimum second+third level pages */ ret = mmu_topup_memory_cache(memcache, 2, KVM_NR_MEM_OBJS); if (ret) return ret; mmu_seq = vcpu->kvm->mmu_notifier_seq; /* * Ensure the read of mmu_notifier_seq happens before we call * gfn_to_pfn_prot (which calls get_user_pages), so that we don't risk * the page we just got a reference to gets unmapped before we have a * chance to grab the mmu_lock, which ensure that if the page gets * unmapped afterwards, the call to kvm_unmap_hva will take it away * from us again properly. This smp_rmb() interacts with the smp_wmb() * in kvm_mmu_notifier_invalidate_<page|range_end>. */ smp_rmb(); pfn = gfn_to_pfn_prot(kvm, gfn, write_fault, &writable); if (is_error_pfn(pfn)) return -EFAULT; spin_lock(&kvm->mmu_lock); if (mmu_notifier_retry(kvm, mmu_seq)) goto out_unlock; if (hugetlb) { pmd_t new_pmd = pfn_pmd(pfn, PAGE_S2); new_pmd = pmd_mkhuge(new_pmd); if (writable) { kvm_set_s2pmd_writable(&new_pmd); kvm_set_pfn_dirty(pfn); } coherent_icache_guest_page(kvm, hva & PMD_MASK, PMD_SIZE); ret = stage2_set_pmd_huge(kvm, memcache, fault_ipa, &new_pmd); } else { pte_t new_pte = pfn_pte(pfn, PAGE_S2); if (writable) { kvm_set_s2pte_writable(&new_pte); kvm_set_pfn_dirty(pfn); } coherent_icache_guest_page(kvm, hva, PAGE_SIZE); ret = stage2_set_pte(kvm, memcache, fault_ipa, &new_pte, false); } out_unlock: spin_unlock(&kvm->mmu_lock); kvm_release_pfn_clean(pfn); return ret; }