static unsigned long hva_to_gpa(struct vmx_vcpu *vcpu, struct mm_struct *mm, unsigned long hva) { uintptr_t mmap_start, stack_start; uintptr_t phys_end = (1ULL << boot_cpu_data.x86_phys_bits); uintptr_t gpa; BUG_ON(!mm); mmap_start = LG_ALIGN(mm->mmap_base) - GPA_MAP_SIZE; stack_start = LG_ALIGN(mm->start_stack) - GPA_STACK_SIZE; if (hva >= stack_start) { if (hva - stack_start >= GPA_STACK_SIZE) return ADDR_INVAL; gpa = hva - stack_start + phys_end - GPA_STACK_SIZE; } else if (hva >= mmap_start) { if (hva - mmap_start >= GPA_MAP_SIZE) return ADDR_INVAL; gpa = hva - mmap_start + phys_end - GPA_STACK_SIZE - GPA_MAP_SIZE; } else { if (hva >= phys_end - GPA_STACK_SIZE - GPA_MAP_SIZE) return ADDR_INVAL; gpa = hva; } return gpa; }
static long dune_dev_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg) { long r = -EINVAL; struct dune_config conf; struct dune_layout layout; switch (ioctl) { case DUNE_ENTER: r = copy_from_user(&conf, (int __user *) arg, sizeof(struct dune_config)); if (r) { r = -EIO; goto out; } r = dune_enter(&conf, &conf.ret); if (r) break; r = copy_to_user((void __user *)arg, &conf, sizeof(struct dune_config)); if (r) { r = -EIO; goto out; } break; case DUNE_GET_SYSCALL: rdmsrl(MSR_LSTAR, r); printk(KERN_INFO "R %lx\n", (unsigned long) r); break; case DUNE_GET_LAYOUT: layout.phys_limit = (1UL << boot_cpu_data.x86_phys_bits); layout.base_map = LG_ALIGN(current->mm->mmap_base) - GPA_MAP_SIZE; layout.base_stack = LG_ALIGN(current->mm->start_stack) - GPA_STACK_SIZE; r = copy_to_user((void __user *)arg, &layout, sizeof(struct dune_layout)); if (r) { r = -EIO; goto out; } break; case DUNE_TRAP_ENABLE: r = dune_trap_enable(arg); break; case DUNE_TRAP_DISABLE: r = dune_trap_disable(arg); break; default: return -ENOTTY; } out: return r; }
static unsigned long gpa_to_hva(struct vmx_vcpu *vcpu, struct mm_struct *mm, unsigned long gpa) { uintptr_t phys_end = (1ULL << boot_cpu_data.x86_phys_bits); if (gpa < phys_end - GPA_STACK_SIZE - GPA_MAP_SIZE) return gpa; else if (gpa < phys_end - GPA_STACK_SIZE) return gpa - (phys_end - GPA_STACK_SIZE - GPA_MAP_SIZE) + LG_ALIGN(mm->mmap_base) - GPA_MAP_SIZE; else if (gpa < phys_end) return gpa - (phys_end - GPA_STACK_SIZE) + LG_ALIGN(mm->start_stack) - GPA_STACK_SIZE; else return ADDR_INVAL; }
static unsigned long gpa_to_hva(struct vmx_vcpu *vcpu, struct mm_struct *mm, unsigned long addr) { if ((addr & ~GPA_MASK) == GPA_ADDR_PROC) return (addr & GPA_MASK); else if ((addr & ~GPA_MASK) == GPA_ADDR_MAP) return (addr & GPA_MASK) + LG_ALIGN(mm->mmap_base) - GPA_SIZE; else if ((addr & ~GPA_MASK) == GPA_ADDR_STACK) return (addr & GPA_MASK) | (mm->start_stack & ~GPA_MASK); else return GPA_ADDR_INVAL; }
static unsigned long hva_to_gpa(struct vmx_vcpu *vcpu, struct mm_struct *mm, unsigned long addr) { uintptr_t mmap_start; if (!mm) { printk(KERN_ERR "ept: proc has no MM %d\n", current->pid); return GPA_ADDR_INVAL; } BUG_ON(!mm); mmap_start = LG_ALIGN(mm->mmap_base) - GPA_SIZE; if ((addr & ~GPA_MASK) == 0) return (addr & GPA_MASK) | GPA_ADDR_PROC; else if (addr < LG_ALIGN(mm->mmap_base) && addr >= mmap_start) return (addr - mmap_start) | GPA_ADDR_MAP; else if ((addr & ~GPA_MASK) == (mm->start_stack & ~GPA_MASK)) return (addr & GPA_MASK) | GPA_ADDR_STACK; else return GPA_ADDR_INVAL; }