Exemplo n.º 1
0
PRIVATE inline
bool NO_INSTRUMENT
Switch_lock::set_lock_owner(Context *o)
{
  bool have_no_locks = access_once(&o->_lock_cnt) < 1;

  if (have_no_locks)
    {
      assert_kdb (current_cpu() == o->home_cpu());
      for (;;)
        {
          if (EXPECT_FALSE(access_once(&o->_running_under_lock)))
            continue;
          if (EXPECT_TRUE(mp_cas(&o->_running_under_lock, Mword(false), Mword(true))))
            break;
        }
    }
  else
    assert_kdb (o->_running_under_lock);

  Mem::mp_wmb();

  if (EXPECT_FALSE(!mp_cas(&_lock_owner, Mword(0), Address(o))))
    {
      if (have_no_locks)
        {
          Mem::mp_wmb();
          write_now(&o->_running_under_lock, Mword(false));
        }
      return false;
    }

  return true;
}
Exemplo n.º 2
0
bool
Sched_context::deblock(unsigned cpu, Sched_context *crs, bool lazy_q = false)
{
  assert_kdb(cpu_lock.test());

  Sched_context *cs = rq(cpu).current_sched();
  bool res = true;
  if (this == cs)
    {
      if (crs->dominates(this))
	res = false;
    }
  else
    {
      deblock_refill(cpu);

      if ((EXPECT_TRUE(cs != 0) && cs->dominates(this)) || crs->dominates(this))
	res = false;
    }

  if (res && lazy_q)
    return true;

  ready_enqueue(cpu);
  return res;
}
Exemplo n.º 3
0
void
Obj_space_virt<SPACE>::caps_free()
{
  Mem_space *ms = SPACE::mem_space(this);
  if (EXPECT_FALSE(!ms || !ms->dir()))
    return;

  Kmem_alloc *a = Kmem_alloc::allocator();
  for (Cap_index i = Cap_index(0); i < obj_map_max_address();
       i += Cap_diff(Obj::Caps_per_page))
    {
      Entry *c = get_cap(i);
      if (!c)
	continue;

      Address cp = Address(ms->virt_to_phys(Address(c)));
      assert_kdb (cp != ~0UL);
      void *cv = (void*)Mem_layout::phys_to_pmem(cp);
      Obj::remove_cap_page_dbg_info(cv);

      a->q_unaligned_free(SPACE::ram_quota(this), Config::PAGE_SIZE, cv);
    }
  ms->dir()->destroy(Virt_addr(Mem_layout::Caps_start),
                     Virt_addr(Mem_layout::Caps_end-1),
                     Pdir::Super_level,
                     Pdir::Depth,
                     Kmem_alloc::q_allocator(SPACE::ram_quota(this)));
}
Exemplo n.º 4
0
void
Generic_obj_space<SPACE>::caps_free()
{
  Mem_space *ms = mem_space();
  if (EXPECT_FALSE(!ms || !ms->dir()))
    return;

  Mapped_allocator *a = Mapped_allocator::allocator();
  for (unsigned long i = 0; i < map_max_address().value();
       i += Caps_per_page)
    {
      Entry *c = get_cap(i);
      if (!c)
	continue;

      Address cp = Address(ms->virt_to_phys(Address(c)));
      assert_kdb (cp != ~0UL);
      void *cv = (void*)Mem_layout::phys_to_pmem(cp);
      remove_dbg_info(cv);

      a->q_unaligned_free(ram_quota(), Config::PAGE_SIZE, cv);
    }
#if defined (CONFIG_ARM)
  ms->dir()->free_page_tables((void*)Mem_layout::Caps_start, (void*)Mem_layout::Caps_end);
#else
  ms->dir()->Pdir::alloc_cast<Mem_space_q_alloc>()
    ->destroy(Virt_addr(Mem_layout::Caps_start),
              Virt_addr(Mem_layout::Caps_end), Pdir::Depth - 1,
              Mem_space_q_alloc(ram_quota(), Mapped_allocator::allocator()));
#endif
}
Exemplo n.º 5
0
PUBLIC
void
Thread_object::destroy(Kobject ***rl)
{
  Kobject::destroy(rl);
  check_kdb(kill());
  assert_kdb(_magic == magic);
}
Exemplo n.º 6
0
inline
Mword v_delete(M &m, Mword flush_rights, bool full_flush)
{
  SPACE* child_space = m->space();
  assert_opt (child_space);
  Mword res = child_space->v_delete(m.page(), m.size(), flush_rights);
  (void) full_flush;
  assert_kdb (full_flush != child_space->v_lookup(m.page()));
  return res;
}
Exemplo n.º 7
0
/** Thread context switchin.  Called on every re-activation of a thread
    (switch_exec()).  This method is public only because it is called from
    from assembly code in switch_cpu().
 */
IMPLEMENT
void
Context::switchin_context(Context *from)
{
  assert_kdb (this == current());
  assert_kdb (state() & Thread_ready_mask);

  // Set kernel-esp in case we want to return to the user.
  // kmem::kernel_sp() returns a pointer to the kernel SP (in the
  // TSS) the CPU uses when next switching from user to kernel mode.
  // regs() + 1 returns a pointer to the end of our kernel stack.
  Cpu::cpus.cpu(cpu()).kernel_sp() = reinterpret_cast<Address>(regs() + 1);

  // switch to our page directory if necessary
  vcpu_aware_space()->switchin_context(from->vcpu_aware_space());

  // load new segment selectors
  load_segments();

  // update the global UTCB pointer to make the thread find its UTCB
  // using fs:[0]
  Mem_layout::user_utcb_ptr(current_cpu()) = utcb().usr();
}
Exemplo n.º 8
0
PUBLIC virtual
void
Thread::modify_label(Mword const *todo, int cnt)
{
  assert_kdb (_snd_regs);
  Mword l = _snd_regs->from_spec();
  for (int i = 0; i < cnt*4; i += 4)
    {
      Mword const test_mask = todo[i];
      Mword const test      = todo[i+1];
      if ((l & test_mask) == test)
	{
	  Mword const del_mask = todo[i+2];
	  Mword const add_mask = todo[i+3];

	  l = (l & ~del_mask) | add_mask;
	  _snd_regs->from(l);
	  return;
	}
    }
}
Exemplo n.º 9
0
PRIVATE inline
L4_msg_tag
Thread_object::sys_vcpu_resume(L4_msg_tag const &tag, Utcb *utcb)
{
  if (this != current() || !(state() & Thread_vcpu_enabled))
    return commit_result(-L4_err::EInval);

  Space *s = space();
  Vcpu_state *vcpu = vcpu_state().access(true);

  L4_obj_ref user_task = vcpu->user_task;
  if (user_task.valid())
    {
      L4_fpage::Rights task_rights = L4_fpage::Rights(0);
      Task *task = Kobject::dcast<Task*>(s->lookup_local(user_task.cap(),
                                                         &task_rights));

      if (EXPECT_FALSE(task && !(task_rights & L4_fpage::Rights::W())))
        return commit_result(-L4_err::EPerm);

      if (task != vcpu_user_space())
        vcpu_set_user_space(task);

      vcpu->user_task = L4_obj_ref();
    }
  else if (user_task.op() == L4_obj_ref::Ipc_reply)
    vcpu_set_user_space(0);

  L4_snd_item_iter snd_items(utcb, tag.words());
  int items = tag.items();
  if (vcpu_user_space())
    for (; items && snd_items.more(); --items)
      {
        if (EXPECT_FALSE(!snd_items.next()))
          break;

        Lock_guard<Lock> guard;
        if (!guard.check_and_lock(&static_cast<Task *>(vcpu_user_space())->existence_lock))
          return commit_result(-L4_err::ENoent);

        cpu_lock.clear();

        L4_snd_item_iter::Item const *const item = snd_items.get();
        L4_fpage sfp(item->d);

        Reap_list rl;
        L4_error err = fpage_map(space(), sfp,
                                 vcpu_user_space(), L4_fpage::all_spaces(),
                                 item->b, &rl);
        rl.del();

        cpu_lock.lock();

        if (EXPECT_FALSE(!err.ok()))
          return commit_error(utcb, err);
      }

  if ((vcpu->_saved_state & Vcpu_state::F_irqs)
      && (vcpu->sticky_flags & Vcpu_state::Sf_irq_pending))
    {
      assert_kdb(cpu_lock.test());
      do_ipc(L4_msg_tag(), 0, 0, true, 0,
	     L4_timeout_pair(L4_timeout::Zero, L4_timeout::Zero),
	     &vcpu->_ipc_regs, L4_fpage::Rights::FULL());

      vcpu = vcpu_state().access(true);

      if (EXPECT_TRUE(!vcpu->_ipc_regs.tag().has_error()
	              || this->utcb().access(true)->error.error() == L4_error::R_timeout))
	{
	  vcpu->_ts.set_ipc_upcall();

	  Address sp;

	  // tried to resume to user mode, so an IRQ enters from user mode
	  if (vcpu->_saved_state & Vcpu_state::F_user_mode)
            sp = vcpu->_entry_sp;
	  else
            sp = vcpu->_ts.sp();

          arch_load_vcpu_kern_state(vcpu, true);

	  LOG_TRACE("VCPU events", "vcpu", this, Vcpu_log,
	      l->type = 4;
	      l->state = vcpu->state;
	      l->ip = vcpu->_entry_ip;
	      l->sp = sp;
	      l->space = static_cast<Task*>(_space.vcpu_aware())->dbg_id();
	      );