コード例 #1
0
ファイル: cap_delete.c プロジェクト: achreto/barrelfish
/**
 * \brief Mark capabilities for a revoke operation.
 * \param base The data for the capability being revoked
 * \param revoked The revoke target if it is on this core. This specific
 *        capability copy will not be marked. If supplied, is_copy(base,
 *        &revoked->cap) must hold.
 * \returns
 *        - CAP_NOT_FOUND if no copies or desendants are present on this core.
 *        - SYS_ERR_OK otherwise.
 */
errval_t caps_mark_revoke(struct capability *base, struct cte *revoked)
{
    assert(base);
    assert(!revoked || revoked->mdbnode.owner == my_core_id);

    // to avoid multiple mdb_find_greater, we store the predecessor of the
    // current position
    struct cte *prev = mdb_find_greater(base, true), *next = NULL;
    if (!prev || !(is_copy(base, &prev->cap)
                   || is_ancestor(&prev->cap, base)))
    {
        return SYS_ERR_CAP_NOT_FOUND;
    }

    for (next = mdb_successor(prev);
         next && is_copy(base, &next->cap);
         next = mdb_successor(prev))
    {
        // note: if next is a copy of base, prev will also be a copy
        if (next == revoked) {
            // do not delete the revoked capability, use it as the new prev
            // instead, and delete the old prev.
            next = prev;
            prev = revoked;
        }
        assert(revoked || next->mdbnode.owner != my_core_id);
        caps_mark_revoke_copy(next);
    }

    for (next = mdb_successor(prev);
         next && is_ancestor(&next->cap, base);
         next = mdb_successor(prev))
    {
        caps_mark_revoke_generic(next);
        if (next->cap.type) {
            // the cap has not been deleted, so we must use it as the new prev
            prev = next;
        }
    }

    if (prev != revoked && !prev->mdbnode.in_delete) {
        if (is_copy(base, &prev->cap)) {
            caps_mark_revoke_copy(prev);
        }
        else {
            // due to early termination the condition, prev must be a
            // descendant
            assert(is_ancestor(&prev->cap, base));
            caps_mark_revoke_generic(prev);
        }
    }

    return SYS_ERR_OK;
}
コード例 #2
0
// TODO: XXX: multiple mappings?
static inline errval_t find_next_ptable(struct cte *mapping_cte, struct cte **next)
{
    assert(mapping_cte);
    struct Frame_Mapping *mapping = &mapping_cte->cap.u.frame_mapping;
    /*
    errval_t err;
    err = mdb_find_cap_for_address(
            local_phys_to_gen_phys(mapping->pte), next);
    if (err_no(err) == CAPS_ERR_CAP_NOT_FOUND ||
        err_no(err) == SYS_ERR_CAP_NOT_FOUND)
    {
        debug(SUBSYS_PAGING, "could not find cap associated "
                "with 0x%"PRIxLPADDR"\n", mapping->pte);
        return SYS_ERR_VNODE_NOT_INSTALLED;
    }
    if (err_is_fail(err)) {
        debug(SUBSYS_PAGING, "error in compile_vaddr:"
                " mdb_find_range: 0x%"PRIxERRV"\n", err);
        return err;
    }
    */
    if (!mapping->ptable || mapping->ptable->cap.type == ObjType_Null)
    {
        return SYS_ERR_VNODE_NOT_INSTALLED;
    }
    *next = mapping->ptable;

    if (!type_is_vnode((*next)->cap.type)) {
        struct cte *tmp = mdb_predecessor(*next);
        // check if there's a copy of *next that is a vnode, and return that
        // copy, if found.
        while(is_copy(&tmp->cap, &(*next)->cap)) {
            if (type_is_vnode(tmp->cap.type)) {
                *next = tmp;
                return SYS_ERR_OK;
            }
            tmp = mdb_predecessor(tmp);
        }
        tmp = mdb_successor(*next);
        while(is_copy(&tmp->cap, &(*next)->cap)) {
            if (type_is_vnode(tmp->cap.type)) {
                *next = tmp;
                return SYS_ERR_OK;
            }
            tmp = mdb_successor(tmp);
        }

        debug(SUBSYS_CAPS, "found cap not a VNode\n");
        // no copy was vnode
        return SYS_ERR_VNODE_LOOKUP_NEXT;
    }
    return SYS_ERR_OK;
}
コード例 #3
0
ファイル: monitor.c プロジェクト: MichaelFQuigley/barrelfish
static void sys_lock_cap_common(struct cte *cte, bool lock)
{
    struct cte *pred = cte;
    do {
        pred->mdbnode.locked = lock;
        pred = mdb_predecessor(pred);
    } while (is_copy(&pred->cap, &cte->cap));

    struct cte *succ = cte;
    do {
        succ->mdbnode.locked = lock;
        succ = mdb_successor(succ);
    } while (is_copy(&succ->cap, &cte->cap));
}
コード例 #4
0
void breakpoint_Relocation::set_copy_active(bool b) {
  assert(is_copy(), "must be operating on a copy");

  if (b) {
    pd_swap_in_breakpoint (addr(), NULL, instrlen());
  } else {
    fatal("cannot remove a breakpoint from a code copy");
  }
}
コード例 #5
0
ファイル: monitor.c プロジェクト: MichaelFQuigley/barrelfish
struct sysret sys_get_cap_owner(capaddr_t root_addr, uint8_t root_bits, capaddr_t cptr, uint8_t bits)
{
    errval_t err;

    struct cte *cte;
    err = sys_double_lookup(root_addr, root_bits, cptr, bits, &cte);
    if (err_is_fail(err)) {
        printf("%s: error in double_lookup: %"PRIuERRV"\n", __FUNCTION__, err);
        return SYSRET(err);
    }

    return (struct sysret) { .error = SYS_ERR_OK, .value = cte->mdbnode.owner };
}

struct sysret sys_set_cap_owner(capaddr_t root_addr, uint8_t root_bits, capaddr_t cptr, uint8_t bits, coreid_t owner)
{
    errval_t err;

    struct cte *cte;
    err = sys_double_lookup(root_addr, root_bits, cptr, bits, &cte);
    if (err_is_fail(err)) {
        printf("%s: error in double_lookup: %"PRIuERRV"\n", __FUNCTION__, err);
        return SYSRET(err);
    }

    cte->mdbnode.owner = owner;

    TRACE_CAP(cte);

    struct cte *pred = cte;
    do {
        pred->mdbnode.owner = owner;
        pred = mdb_predecessor(pred);
    } while (is_copy(&pred->cap, &cte->cap));

    struct cte *succ = cte;
    do {
        succ->mdbnode.owner = owner;
        succ = mdb_successor(succ);
    } while (is_copy(&succ->cap, &cte->cap));

    return SYSRET(SYS_ERR_OK);
}
コード例 #6
0
void CallRelocation::set_destination(address x, intptr_t o) {
#ifndef CORE
  #ifdef ASSERT
  if (!is_copy() && type() == relocInfo::runtime_call_type) {
    // the runtime stubs are part of CodeCache, therefore the assertion is not valid
    // assert(!CodeCache::contains(x), "new destination must be external");
  }
  #endif
  pd_set_call_destination(x, o);
#endif // !CORE
}
コード例 #7
0
ファイル: cap_delete.c プロジェクト: achreto/barrelfish
/**
 * \brief Delete all copies of a foreign cap.
 */
errval_t caps_delete_foreigns(struct cte *cte)
{
    errval_t err;
    struct cte *next;
    if (cte->mdbnode.owner == my_core_id) {
        debug(SUBSYS_CAPS, "%s called on %d for %p, owner=%d\n",
                __FUNCTION__, my_core_id, cte, cte->mdbnode.owner);
        return SYS_ERR_DELETE_REMOTE_LOCAL;
    }
    assert(cte->mdbnode.owner != my_core_id);
    if (cte->mdbnode.in_delete) {
        printk(LOG_WARN,
               "foreign caps with in_delete set,"
               " this should not happen");
    }

    TRACE_CAP_MSG("del copies of", cte);

    // XXX: should we go predecessor as well?
    for (next = mdb_successor(cte);
         next && is_copy(&cte->cap, &next->cap);
         next = mdb_successor(cte))
    {
        // XXX: should this be == or != ?
        assert(next->mdbnode.owner != my_core_id);
        if (next->mdbnode.in_delete) {
            printk(LOG_WARN,
                   "foreign caps with in_delete set,"
                   " this should not happen");
        }
        err = cleanup_copy(next);
        if (err_is_fail(err)) {
            panic("error while deleting extra foreign copy for remote_delete:"
                  " %"PRIuERRV"\n", err);
        }
    }

    // The capabilities should all be foreign, by nature of the request.
    // Foreign capabilities are rarely locked, since they can be deleted
    // immediately. The only time a foreign capability is locked is during
    // move and retrieve operations. In either case, the lock on the same
    // capability must also be acquired on the owner for the operation to
    // succeed. Thus, we can safely unlock any capability here iff the
    // monitor guarentees that this operation is only executed when the
    // capability is locked on the owner.
    cte->mdbnode.locked = false;
    err = caps_try_delete(cte);
    if (err_is_fail(err)) {
        panic("error while deleting foreign copy for remote_delete:"
              " %"PRIuERRV"\n", err);
    }

    return SYS_ERR_OK;
}
コード例 #8
0
void breakpoint_Relocation::set_enabled(bool b) {
  assert(!is_copy(), "cannot change breakpoint state when working on a copy");

  if (enabled() == b) return;

  if (b) {
    set_bits(bits() | enabled_state);
  } else {
    set_active(false);		// remove the actual breakpoint insn, if any
    set_bits(bits() & ~enabled_state);
  }
}
コード例 #9
0
void PatchingRelocIterator:: postpass() {
  // turn breakpoints back on after patching
  if (is_copy())
    // ... unless we are working with a copy of the code:
    return;
  (RelocIterator&)(*this) = _init_state;	// reset cursor again
  while (next()) {
    if (type() == relocInfo::breakpoint_type) {
      breakpoint_Relocation* bpt = breakpoint_reloc();
      bpt->set_active(bpt->enabled());
    }
  }
}
コード例 #10
0
void PatchingRelocIterator:: prepass() {
  // turn breakpoints off during patching
  _init_state = (*this);	// save cursor
  while (next()) {
    if (type() == relocInfo::breakpoint_type) {
      if (is_copy())
	breakpoint_reloc()->set_copy_active(false);
      else
	breakpoint_reloc()->set_active(false);
    }
  }
  (RelocIterator&)(*this) = _init_state;	// reset cursor for client
}
コード例 #11
0
void breakpoint_Relocation::set_active(bool b) {
  assert(!is_copy(), "cannot change breakpoint state when working on a copy");

  assert(!b || enabled(), "cannot activate a disabled breakpoint");

  if (active() == b) return;

  // %%% should probably seize a lock here (might not be the right lock)
  //MutexLockerEx ml_patch(Patching_lock, true);
  //if (active() == b)  return;		// recheck state after locking

  if (b) {
    set_bits(bits() | active_state);
    if (instrlen() == 0)
      fatal("breakpoints in original code must be undoable");
    pd_swap_in_breakpoint (addr(), instrs(), instrlen());
  } else {
    set_bits(bits() & ~active_state);
    pd_swap_out_breakpoint(addr(), instrs(), instrlen());
  }
}
コード例 #12
0
void jsr_Relocation::fix_relocation_at_move(intptr_t delta) {
#ifndef CORE
  // a self-relative reference to an internal routine:  no change
  assert(is_copy() || code()->contains(destination()), "destination must be internal");
#endif // CORE
}
コード例 #13
0
ファイル: ir_cp.cpp プロジェクト: stevenknown/xoc
//'usevec': for local used.
bool IR_CP::doProp(IN IRBB * bb, Vector<IR*> & usevec)
{
    bool change = false;
    C<IR*> * cur_iter, * next_iter;

    for (BB_irlist(bb).get_head(&cur_iter),
         next_iter = cur_iter; cur_iter != NULL; cur_iter = next_iter) {

        IR * def_stmt = cur_iter->val();

        BB_irlist(bb).get_next(&next_iter);

        if (!is_copy(def_stmt)) { continue; }

        DUSet const* useset = NULL;
        UINT num_of_use = 0;
        SSAInfo * ssainfo = NULL;
        bool ssadu = false;
        if ((ssainfo = def_stmt->get_ssainfo()) != NULL &&
            SSA_uses(ssainfo).get_elem_count() != 0) {
            //Record use_stmt in another vector to facilitate this function
            //if it is not in use-list any more after copy-propagation.
            SEGIter * sc;
            for    (INT u = SSA_uses(ssainfo).get_first(&sc);
                 u >= 0; u = SSA_uses(ssainfo).get_next(u, &sc)) {
                IR * use = m_ru->get_ir(u);
                ASSERT0(use);
                usevec.set(num_of_use, use);
                num_of_use++;
            }
            ssadu = true;
        } else if (def_stmt->get_exact_ref() == NULL &&
                   !def_stmt->is_void()) {
            //Allowing copy propagate exact or VOID value.
            continue;
        } else if ((useset = def_stmt->readDUSet()) != NULL &&
                   useset->get_elem_count() != 0) {
            //Record use_stmt in another vector to facilitate this function
            //if it is not in use-list any more after copy-propagation.
            DUIter di = NULL;
            for (INT u = useset->get_first(&di);
                 u >= 0; u = useset->get_next(u, &di)) {
                IR * use = m_ru->get_ir(u);
                usevec.set(num_of_use, use);
                num_of_use++;
            }
        } else  {
            continue;
        }

        IR const* prop_value = get_propagated_value(def_stmt);

        for (UINT i = 0; i < num_of_use; i++) {
            IR * use = usevec.get(i);
            ASSERT0(use->is_exp());
            IR * use_stmt = use->get_stmt();
            ASSERT0(use_stmt->is_stmt());

            ASSERT0(use_stmt->get_bb() != NULL);
            IRBB * use_bb = use_stmt->get_bb();
            if (!ssadu &&
                !(bb == use_bb && bb->is_dom(def_stmt, use_stmt, true)) &&
                !m_cfg->is_dom(BB_id(bb), BB_id(use_bb))) {
                //'def_stmt' must dominate 'use_stmt'.
                //e.g:
                //    if (...) {
                //        g = 10; //S1
                //    }
                //    ... = g; //S2
                //g can not be propagted since S1 is not dominate S2.
                continue;
            }

            if (!is_available(def_stmt, prop_value, use_stmt)) {
                //The value that will be propagated can
                //not be killed during 'ir' and 'use_stmt'.
                //e.g:
                //    g = a; //S1
                //    if (...) {
                //        a = ...; //S3
                //    }
                //    ... = g; //S2
                //g can not be propagted since a is killed by S3.
                continue;
            }

            if (!ssadu && !m_du->isExactAndUniqueDef(def_stmt, use)) {
                //Only single definition is allowed.
                //e.g:
                //    g = 20; //S3
                //    if (...) {
                //        g = 10; //S1
                //    }
                //    ... = g; //S2
                //g can not be propagted since there are
                //more than one definitions are able to get to S2.
                continue;
            }

            if (!canBeCandidate(prop_value)) {
                continue;
            }

            CPCtx lchange;
            IR * old_use_stmt = use_stmt;

            replaceExp(use, prop_value, lchange, ssadu);

            ASSERT(use_stmt && use_stmt->is_stmt(),
                    ("ensure use_stmt still legal"));
            change |= CPC_change(lchange);

            if (!CPC_change(lchange)) { continue; }

            //Indicate whether use_stmt is the next stmt of def_stmt.
            bool is_next = false;
            if (next_iter != NULL && use_stmt == next_iter->val()) {
                is_next = true;
            }

            RefineCtx rf;
            use_stmt = m_ru->refineIR(use_stmt, change, rf);
            if (use_stmt == NULL && is_next) {
                //use_stmt has been optimized and removed by refineIR().
                next_iter = cur_iter;
                BB_irlist(bb).get_next(&next_iter);
            }

            if (use_stmt != NULL && use_stmt != old_use_stmt) {
                //use_stmt has been removed and new stmt generated.
                ASSERT(old_use_stmt->is_undef(), ("the old one should be freed"));

                C<IR*> * irct = NULL;
                BB_irlist(use_bb).find(old_use_stmt, &irct);
                ASSERT0(irct);
                BB_irlist(use_bb).insert_before(use_stmt, irct);
                BB_irlist(use_bb).remove(irct);
            }
        } //end for each USE
    } //end for IR
    return change;
}