Beispiel #1
0
PRIVATE inline
Receiver::Rcv_state
Receiver::vcpu_async_ipc(Sender const *sender) const
{
  if (EXPECT_FALSE(state() & Thread_ipc_mask))
    return Rs_not_receiving;

  Vcpu_state *vcpu = vcpu_state().access();

  if (EXPECT_FALSE(!vcpu_irqs_enabled(vcpu)))
    return Rs_not_receiving;

  Receiver *self = const_cast<Receiver*>(this);

  if (this == current())
    self->spill_user_state();

  if (self->vcpu_enter_kernel_mode(vcpu))
    vcpu = vcpu_state().access();

  LOG_TRACE("VCPU events", "vcpu", this, Vcpu_log,
      l->type = 1;
      l->state = vcpu->_saved_state;
      l->ip = Mword(sender);
      l->sp = regs()->sp();
      l->space = ~0; //vcpu_user_space() ? static_cast<Task*>(vcpu_user_space())->dbg_id() : ~0;
      );
Beispiel #2
0
int main(int argc, char *argv[])
{
	struct kvm_run *run;
	struct kvm_vm *vm;
	struct kvm_sregs sregs;
	struct kvm_cpuid_entry2 *entry;
	struct ucall uc;
	int rc;

	entry = kvm_get_supported_cpuid_entry(1);
	if (!(entry->ecx & X86_FEATURE_XSAVE)) {
		printf("XSAVE feature not supported, skipping test\n");
		return 0;
	}

	/* Tell stdout not to buffer its content */
	setbuf(stdout, NULL);

	/* Create VM */
	vm = vm_create_default(VCPU_ID, 0, guest_code);
	vcpu_set_cpuid(vm, VCPU_ID, kvm_get_supported_cpuid());
	run = vcpu_state(vm, VCPU_ID);

	while (1) {
		rc = _vcpu_run(vm, VCPU_ID);

		TEST_ASSERT(rc == 0, "vcpu_run failed: %d\n", rc);
		TEST_ASSERT(run->exit_reason == KVM_EXIT_IO,
			    "Unexpected exit reason: %u (%s),\n",
			    run->exit_reason,
			    exit_reason_str(run->exit_reason));

		switch (get_ucall(vm, VCPU_ID, &uc)) {
		case UCALL_SYNC:
			/* emulate hypervisor clearing CR4.OSXSAVE */
			vcpu_sregs_get(vm, VCPU_ID, &sregs);
			sregs.cr4 &= ~X86_CR4_OSXSAVE;
			vcpu_sregs_set(vm, VCPU_ID, &sregs);
			break;
		case UCALL_ABORT:
			TEST_ASSERT(false, "Guest CR4 bit (OSXSAVE) unsynchronized with CPUID bit.");
			break;
		case UCALL_DONE:
			goto done;
		default:
			TEST_ASSERT(false, "Unknown ucall 0x%x.", uc.cmd);
		}
	}

	kvm_vm_free(vm);

done:
	return 0;
}
bool
Thread::vcpu_pagefault(Address pfa, Mword err, Mword ip)
{
  (void)ip;
  Vcpu_state *vcpu = vcpu_state().access();
  if (vcpu_pagefaults_enabled(vcpu))
    {
      spill_user_state();
      vcpu_enter_kernel_mode(vcpu);
      LOG_TRACE("VCPU events", "vcpu", this, Vcpu_log,
	  l->type = 3;
	  l->state = vcpu->_saved_state;
	  l->ip = ip;
	  l->sp = pfa;
	  l->space = vcpu_user_space() ? static_cast<Task*>(vcpu_user_space())->dbg_id() : ~0;
	  );
      vcpu->_ts.set_pagefault(pfa, err);
      vcpu_save_state_and_upcall();
      return true;
    }
Beispiel #4
0
int main(int argc, char *argv[])
{
	struct vmx_pages *vmx_pages = NULL;
	vm_vaddr_t vmx_pages_gva = 0;

	struct kvm_regs regs1, regs2;
	struct kvm_vm *vm;
	struct kvm_run *run;
	struct kvm_x86_state *state;
	struct ucall uc;
	int stage;

	struct kvm_cpuid_entry2 *entry = kvm_get_supported_cpuid_entry(1);

	/* Create VM */
	vm = vm_create_default(VCPU_ID, 0, guest_code);
	vcpu_set_cpuid(vm, VCPU_ID, kvm_get_supported_cpuid());
	run = vcpu_state(vm, VCPU_ID);

	vcpu_regs_get(vm, VCPU_ID, &regs1);

	if (kvm_check_cap(KVM_CAP_NESTED_STATE)) {
		vmx_pages = vcpu_alloc_vmx(vm, &vmx_pages_gva);
		vcpu_args_set(vm, VCPU_ID, 1, vmx_pages_gva);
	} else {
		printf("will skip nested state checks\n");
		vcpu_args_set(vm, VCPU_ID, 1, 0);
	}

	for (stage = 1;; stage++) {
		_vcpu_run(vm, VCPU_ID);
		TEST_ASSERT(run->exit_reason == KVM_EXIT_IO,
			    "Unexpected exit reason: %u (%s),\n",
			    run->exit_reason,
			    exit_reason_str(run->exit_reason));

		memset(&regs1, 0, sizeof(regs1));
		vcpu_regs_get(vm, VCPU_ID, &regs1);
		switch (get_ucall(vm, VCPU_ID, &uc)) {
		case UCALL_ABORT:
			TEST_ASSERT(false, "%s at %s:%d", (const char *)uc.args[0],
				    __FILE__, uc.args[1]);
			/* NOT REACHED */
		case UCALL_SYNC:
			break;
		case UCALL_DONE:
			goto done;
		default:
			TEST_ASSERT(false, "Unknown ucall 0x%x.", uc.cmd);
		}

		/* UCALL_SYNC is handled here.  */
		TEST_ASSERT(!strcmp((const char *)uc.args[0], "hello") &&
			    uc.args[1] == stage, "Unexpected register values vmexit #%lx, got %lx",
			    stage, (ulong)uc.args[1]);

		state = vcpu_save_state(vm, VCPU_ID);
		kvm_vm_release(vm);

		/* Restore state in a new VM.  */
		kvm_vm_restart(vm, O_RDWR);
		vm_vcpu_add(vm, VCPU_ID, 0, 0);
		vcpu_set_cpuid(vm, VCPU_ID, kvm_get_supported_cpuid());
		vcpu_load_state(vm, VCPU_ID, state);
		run = vcpu_state(vm, VCPU_ID);
		free(state);

		memset(&regs2, 0, sizeof(regs2));
		vcpu_regs_get(vm, VCPU_ID, &regs2);
		TEST_ASSERT(!memcmp(&regs1, &regs2, sizeof(regs2)),
			    "Unexpected register values after vcpu_load_state; rdi: %lx rsi: %lx",
			    (ulong) regs2.rdi, (ulong) regs2.rsi);
	}

done:
	kvm_vm_free(vm);
}
PRIVATE inline
L4_msg_tag
Thread_object::sys_vcpu_resume(L4_msg_tag const &tag, Utcb *utcb)
{
  if (this != current() || !(state() & Thread_vcpu_enabled))
    return commit_result(-L4_err::EInval);

  Space *s = space();
  Vcpu_state *vcpu = vcpu_state().access(true);

  L4_obj_ref user_task = vcpu->user_task;
  if (user_task.valid())
    {
      L4_fpage::Rights task_rights = L4_fpage::Rights(0);
      Task *task = Kobject::dcast<Task*>(s->lookup_local(user_task.cap(),
                                                         &task_rights));

      if (EXPECT_FALSE(task && !(task_rights & L4_fpage::Rights::W())))
        return commit_result(-L4_err::EPerm);

      if (task != vcpu_user_space())
        vcpu_set_user_space(task);

      vcpu->user_task = L4_obj_ref();
    }
  else if (user_task.op() == L4_obj_ref::Ipc_reply)
    vcpu_set_user_space(0);

  L4_snd_item_iter snd_items(utcb, tag.words());
  int items = tag.items();
  if (vcpu_user_space())
    for (; items && snd_items.more(); --items)
      {
        if (EXPECT_FALSE(!snd_items.next()))
          break;

        Lock_guard<Lock> guard;
        if (!guard.check_and_lock(&static_cast<Task *>(vcpu_user_space())->existence_lock))
          return commit_result(-L4_err::ENoent);

        cpu_lock.clear();

        L4_snd_item_iter::Item const *const item = snd_items.get();
        L4_fpage sfp(item->d);

        Reap_list rl;
        L4_error err = fpage_map(space(), sfp,
                                 vcpu_user_space(), L4_fpage::all_spaces(),
                                 item->b, &rl);
        rl.del();

        cpu_lock.lock();

        if (EXPECT_FALSE(!err.ok()))
          return commit_error(utcb, err);
      }

  if ((vcpu->_saved_state & Vcpu_state::F_irqs)
      && (vcpu->sticky_flags & Vcpu_state::Sf_irq_pending))
    {
      assert_kdb(cpu_lock.test());
      do_ipc(L4_msg_tag(), 0, 0, true, 0,
	     L4_timeout_pair(L4_timeout::Zero, L4_timeout::Zero),
	     &vcpu->_ipc_regs, L4_fpage::Rights::FULL());

      vcpu = vcpu_state().access(true);

      if (EXPECT_TRUE(!vcpu->_ipc_regs.tag().has_error()
	              || this->utcb().access(true)->error.error() == L4_error::R_timeout))
	{
	  vcpu->_ts.set_ipc_upcall();

	  Address sp;

	  // tried to resume to user mode, so an IRQ enters from user mode
	  if (vcpu->_saved_state & Vcpu_state::F_user_mode)
            sp = vcpu->_entry_sp;
	  else
            sp = vcpu->_ts.sp();

          arch_load_vcpu_kern_state(vcpu, true);

	  LOG_TRACE("VCPU events", "vcpu", this, Vcpu_log,
	      l->type = 4;
	      l->state = vcpu->state;
	      l->ip = vcpu->_entry_ip;
	      l->sp = sp;
	      l->space = static_cast<Task*>(_space.vcpu_aware())->dbg_id();
	      );
	  if (vcpu->_saved_state & Vcpu_state::F_user_mode)
            sp = vcpu->_entry_sp;
	  else
            sp = vcpu->_ts.sp();

          arch_load_vcpu_kern_state(vcpu, true);

	  LOG_TRACE("VCPU events", "vcpu", this, Vcpu_log,
	      l->type = 4;
	      l->state = vcpu->state;
	      l->ip = vcpu->_entry_ip;
	      l->sp = sp;
	      l->space = static_cast<Task*>(_space.vcpu_aware())->dbg_id();
	      );

	  fast_return_to_user(vcpu->_entry_ip, sp, vcpu_state().usr().get());
	}
    }

  vcpu->state = vcpu->_saved_state;
  Task *target_space = nonull_static_cast<Task*>(space());
  bool user_mode = false;

  if (vcpu->state & Vcpu_state::F_user_mode)
    {
      if (!vcpu_user_space())
        return commit_result(-L4_err::ENoent);

      user_mode = true;

      if (!(vcpu->state & Vcpu_state::F_fpu_enabled))
Beispiel #7
0
int main(int argc, char *argv[])
{
	vm_vaddr_t vmx_pages_gva = 0;

	struct kvm_regs regs1, regs2;
	struct kvm_vm *vm;
	struct kvm_run *run;
	struct kvm_x86_state *state;
	struct ucall uc;
	int stage;
	uint16_t evmcs_ver;
	struct kvm_enable_cap enable_evmcs_cap = {
		.cap = KVM_CAP_HYPERV_ENLIGHTENED_VMCS,
		 .args[0] = (unsigned long)&evmcs_ver
	};

	/* Create VM */
	vm = vm_create_default(VCPU_ID, 0, guest_code);

	vcpu_set_cpuid(vm, VCPU_ID, kvm_get_supported_cpuid());

	if (!kvm_check_cap(KVM_CAP_NESTED_STATE) ||
	    !kvm_check_cap(KVM_CAP_HYPERV_ENLIGHTENED_VMCS)) {
		printf("capabilities not available, skipping test\n");
		exit(KSFT_SKIP);
	}

	vcpu_ioctl(vm, VCPU_ID, KVM_ENABLE_CAP, &enable_evmcs_cap);

	/* KVM should return supported EVMCS version range */
	TEST_ASSERT(((evmcs_ver >> 8) >= (evmcs_ver & 0xff)) &&
		    (evmcs_ver & 0xff) > 0,
		    "Incorrect EVMCS version range: %x:%x\n",
		    evmcs_ver & 0xff, evmcs_ver >> 8);

	run = vcpu_state(vm, VCPU_ID);

	vcpu_regs_get(vm, VCPU_ID, &regs1);

	vcpu_alloc_vmx(vm, &vmx_pages_gva);
	vcpu_args_set(vm, VCPU_ID, 1, vmx_pages_gva);

	for (stage = 1;; stage++) {
		_vcpu_run(vm, VCPU_ID);
		TEST_ASSERT(run->exit_reason == KVM_EXIT_IO,
			    "Stage %d: unexpected exit reason: %u (%s),\n",
			    stage, run->exit_reason,
			    exit_reason_str(run->exit_reason));

		switch (get_ucall(vm, VCPU_ID, &uc)) {
		case UCALL_ABORT:
			TEST_ASSERT(false, "%s at %s:%d", (const char *)uc.args[0],
				    __FILE__, uc.args[1]);
			/* NOT REACHED */
		case UCALL_SYNC:
			break;
		case UCALL_DONE:
			goto done;
		default:
			TEST_ASSERT(false, "Unknown ucall 0x%x.", uc.cmd);
		}

		/* UCALL_SYNC is handled here.  */
		TEST_ASSERT(!strcmp((const char *)uc.args[0], "hello") &&
			    uc.args[1] == stage, "Unexpected register values vmexit #%lx, got %lx",
			    stage, (ulong)uc.args[1]);

		state = vcpu_save_state(vm, VCPU_ID);
		memset(&regs1, 0, sizeof(regs1));
		vcpu_regs_get(vm, VCPU_ID, &regs1);

		kvm_vm_release(vm);

		/* Restore state in a new VM.  */
		kvm_vm_restart(vm, O_RDWR);
		vm_vcpu_add(vm, VCPU_ID, 0, 0);
		vcpu_set_cpuid(vm, VCPU_ID, kvm_get_supported_cpuid());
		vcpu_load_state(vm, VCPU_ID, state);
		run = vcpu_state(vm, VCPU_ID);
		free(state);

		memset(&regs2, 0, sizeof(regs2));
		vcpu_regs_get(vm, VCPU_ID, &regs2);
		TEST_ASSERT(!memcmp(&regs1, &regs2, sizeof(regs2)),
			    "Unexpected register values after vcpu_load_state; rdi: %lx rsi: %lx",
			    (ulong) regs2.rdi, (ulong) regs2.rsi);
	}

done:
	kvm_vm_free(vm);
}