void SialPrinterForTests::do_print_block(const BlockId& id, Block::BlockPtr block, int line_number){ int MAX_TO_PRINT = 1024; int size = block->size(); int OUTPUT_ROW_SIZE = block->shape().segment_sizes_[0]; double* data = block->get_data(); out_.precision(14); out_.setf(std::ios_base::fixed); out_ << line_number << ": "; if (size == 1) { out_ << "printing " << id.str(sip_tables_) << " = "; out_ << *(data); } else { out_ << "printing " << (size < MAX_TO_PRINT?size:MAX_TO_PRINT); out_ << " of " <<size << " elements of block " << id.str(sip_tables_);//BlockId2String(id); out_ << " in the order stored in memory "; int i; for (i = 0; i < size && i < MAX_TO_PRINT; ++i){ if (i%OUTPUT_ROW_SIZE == 0) out_ << std::endl; out_ << *(data+i) << " "; } if (i == MAX_TO_PRINT){ out_ << "...."; } } out_ << std::endl; }
Block::BlockPtr ContiguousArrayManager::get_block(const BlockId& block_id, int& rank, Block::BlockPtr& contiguous, sip::offset_array_t& offsets) { //get contiguous array that contains block block_id, which must exist, and get its selectors and shape int array_id = block_id.array_id(); rank = sip_tables_.array_rank(array_id); contiguous = get_array(array_id); sip::check(contiguous != NULL, "contiguous array not allocated"); const sip::index_selector_t& selector = sip_tables_.selectors(array_id); BlockShape array_shape = sip_tables_.contiguous_array_shape(array_id); //shape of containing contiguous array //get offsets of block_id in the containing array for (int i = 0; i < rank; ++i) { offsets[i] = sip_tables_.offset_into_contiguous(selector[i], block_id.index_values(i)); } //set offsets of unused indices to 0 std::fill(offsets + rank, offsets + MAX_RANK, 0); //get shape of subblock BlockShape block_shape = sip_tables_.shape(block_id); //allocate a new block and copy data from contiguous block Block::BlockPtr block = new Block(block_shape); contiguous->extract_slice(rank, offsets, block); return block; }
void LookupInsert(Cache_BlockCFG *cache, BlockId *id) { Assert(id->Kind() == B_Function || id->Kind() == B_Loop); String *function = id->Function(); const char *function_name = function->Value(); if (!DoLookupTransaction(BODY_DATABASE, function_name, &scratch_buf)) { id->IncRef(cache); cache->Insert(id, NULL); return; } Buffer read_buf(scratch_buf.base, scratch_buf.pos - scratch_buf.base); Vector<BlockCFG*> cfg_list; BlockCFG::ReadList(&read_buf, &cfg_list); scratch_buf.Reset(); for (size_t ind = 0; ind < cfg_list.Size(); ind++) { BlockCFG *cfg = cfg_list[ind]; BlockId *id = cfg->GetId(); id->IncRef(cache); cfg->MoveRef(NULL, cache); cache->Insert(id, cfg); } }
void BlockCFGCacheAddListWithRefs(const Vector<BlockCFG*> &cfgs) { for (size_t ind = 0; ind < cfgs.Size(); ind++) { BlockCFG *cfg = cfgs[ind]; BlockId *id = cfg->GetId(); id->IncRef(&BlockCFGCache); cfg->IncRef(&BlockCFGCache); BlockCFGCache.Insert(id, cfg); } }
std::string SialPrinterForTests::BlockId2String(const BlockId& id){ std::stringstream ss; bool contiguous_local = sip_tables_.is_contiguous_local(id.array_id()); if (contiguous_local) ss << "contiguous local "; int rank = sip_tables_.array_rank(id.array_id()); ss << sip_tables_.array_name(id.array_id()) ; ss << '['; int i; for (i = 0; i < rank; ++i) { ss << (i == 0 ? "" : ",") << id.index_values(i); if (contiguous_local) ss << ":" << id.upper_index_values(i); } ss << ']'; return ss.str(); }
Block::BlockPtr SialOpsParallel::get_block_for_updating(const BlockId& id) { int array_id = id.array_id(); check( !(sip_tables_.is_distributed(array_id) || sip_tables_.is_served(array_id)), "attempting to update distributed or served block", current_line()); return block_manager_.get_block_for_updating(id); }
Block::BlockPtr SialOpsParallel::get_block_for_reading(const BlockId& id, int line) { int array_id = id.array_id(); if (sip_tables_.is_distributed(array_id) || sip_tables_.is_served(array_id)) { check_and_set_mode(array_id, READ); return wait_and_check(block_manager_.get_block_for_reading(id), line); } return block_manager_.get_block_for_reading(id); }
bool BlockManager::has_wild_value(const BlockId& id) { int i = 0; while (i != MAX_RANK) { if (id.index_values(i) == wild_card_value) return true; ++i; } return false; }
//TODO TEMPORARY FIX WHILE SEMANTICS BEING WORKED OUT Block::BlockPtr BlockManager::get_block_for_reading(const BlockId& id) { Block::BlockPtr blk = block(id); sial_check(blk != NULL, "Attempting to read non-existent block " + id.str(sip_tables_), current_line()); //// //#ifdef HAVE_CUDA // // Lazy copying of data from gpu to host if needed. // lazy_gpu_read_on_host(blk); //#endif //HAVE_CUDA return blk; }
Block::BlockPtr SialOpsParallel::get_block_for_writing(const BlockId& id, bool is_scope_extent) { int array_id = id.array_id(); if (sip_tables_.is_distributed(array_id) || sip_tables_.is_served(array_id)) { check(!is_scope_extent, "sip bug: asking for scope-extend dist or served block"); check_and_set_mode(array_id, WRITE); } return block_manager_.get_block_for_writing(id, is_scope_extent); }
void WherePostcondition::PrintHook(OutStream &out) const { BlockId *id = m_frame->CFG()->GetId(); Variable *func_var = id->BaseVar(); PEdge *edge = m_frame->CFG()->GetSingleOutgoingEdge(m_point); if (edge->IsLoop()) { PEdgeLoop *nedge = edge->AsLoop(); out << nedge->GetLoopId()->Loop()->Value() << " " << func_var->GetName()->Value(); } else { PEdgeCall *nedge = edge->AsCall(); if (Variable *callee = nedge->GetDirectFunction()) { // direct call, just one hook function. out << "post " << callee->GetName()->Value(); } else { // indirect call, one hook function for each callee. CallEdgeSet *callees = CalleeCache.Lookup(func_var); bool found_callee = false; if (callees) { for (size_t eind = 0; eind < callees->GetEdgeCount(); eind++) { const CallEdge &edge = callees->GetEdge(eind); if (edge.where.id == id && edge.where.point == m_point) { if (found_callee) out << "$"; // add the separator found_callee = true; out << "post " << edge.callee->GetName()->Value(); } } } CalleeCache.Release(func_var); } } }
/* gets block for reading and writing. The block should already exist.*/ Block::BlockPtr BlockManager::get_block_for_updating(const BlockId& id) { // std::cout << "calling get_block_for_updating for " << id << current_line()<<std::endl << std::flush; Block::BlockPtr blk = block(id); if (blk==NULL){ std::cout << *this; } sial_check(blk != NULL, "Attempting to update non-existent block " + id.str(sip_tables_), current_line()); #ifdef HAVE_CUDA // Lazy copying of data from gpu to host if needed. lazy_gpu_update_on_host(blk); #endif return blk; }
void BlockManager::gen(const BlockId& id, int rank, const int pos, std::vector<int> prefix /*pass by value*/, int to_append, std::vector<BlockId>& list) { if (pos != 0) { prefix.push_back(to_append); } if (pos < rank) { int curr_index = id.index_values(pos); if (curr_index == wild_card_value) { int index_slot = sip_tables_.selectors(id.array_id())[pos]; int lower = sip_tables_.lower_seg(index_slot); int upper = lower + sip_tables_.num_segments(index_slot); for (int i = lower; i < upper; ++i) { gen(id, rank, pos + 1, prefix, i, list); } } else { gen(id, rank, pos + 1, prefix, curr_index, list); } } else { list.push_back(BlockId(id.array_id(), rank, prefix)); } }
// assign the final names to all loops within cfg. loop naming is done after // the CFGs have been finalized as it depends on the topo ordering of points. static void FillLoopNames(BlockCFG *cfg, const char *prefix, const Vector<BlockCFG*> &cfg_list) { size_t found_loops = 0; for (size_t eind = 0; eind < cfg->GetEdgeCount(); eind++) { PEdgeLoop *edge = cfg->GetEdge(eind)->IfLoop(); if (!edge) continue; BlockId *loop = edge->GetLoopId(); // check for a duplicate. there can be multiple summary edges for // a loop if we reduced some irreducible loops or if there are // isomorphic points in the outer body of a nested loop. if (loop->WriteLoop() != NULL) continue; char name_buf[100]; snprintf(name_buf, sizeof(name_buf), "%s#%d", prefix, (int) found_loops); String *write_name = String::Make(name_buf); loop->SetWriteLoop(write_name); found_loops++; // recurse on the CFG for the loop itself, to get any nested loops. bool found = false; for (size_t ind = 0; ind < cfg_list.Size(); ind++) { if (cfg_list[ind]->GetId() == loop) { Assert(!found); found = true; FillLoopNames(cfg_list[ind], name_buf, cfg_list); } } Assert(found); } }
//TODO optimize this. Can reduce searches in block map. void SialOpsParallel::get(BlockId& block_id) { //check for "data race" check_and_set_mode(block_id, READ); //if block already exists, or has pending request, just return Block::BlockPtr block = block_manager_.block(block_id); if (block != NULL) return; //send get message to block's server, and post receive int server_rank = data_distribution_.get_server_rank(block_id); int get_tag; get_tag = barrier_support_.make_mpi_tag_for_GET(); sip::check(server_rank>=0&&server_rank<sip_mpi_attr_.global_size(), "invalid server rank",current_line()); SIP_LOG(std::cout<<"W " << sip_mpi_attr_.global_rank() << " : sending GET for block " << block_id << " to server "<< server_rank << std::endl); // Construct int array to send to server. const int to_send_size = BlockId::MPI_BLOCK_ID_COUNT + 2; const int line_num_offset = BlockId::MPI_BLOCK_ID_COUNT; const int section_num_offset = line_num_offset + 1; int to_send[to_send_size]; // BlockId & line number int *serialized_block_id = block_id.to_mpi_array(); std::copy(serialized_block_id + 0, serialized_block_id + BlockId::MPI_BLOCK_ID_COUNT, to_send); to_send[line_num_offset] = current_line(); to_send[section_num_offset] = barrier_support_.section_number(); SIPMPIUtils::check_err( MPI_Send(to_send, to_send_size, MPI_INT, server_rank, get_tag, MPI_COMM_WORLD)); //allocate block, and insert in block map, using block data as buffer block = block_manager_.get_block_for_writing(block_id, true); //post an asynchronous receive and store the request in the //block's state MPI_Request request; SIPMPIUtils::check_err( MPI_Irecv(block->get_data(), block->size(), MPI_DOUBLE, server_rank, get_tag, MPI_COMM_WORLD, &request)); block->state().mpi_request_ = request; }
/** * A put appears in a SIAL program as * put target(i,j,k,l) += source(i,j,k,l) * So we need the target block id, but the source block data. * Accumulation is done by the server * * The implementation will be more complicated if asynchronous send is * used * * @param target * @param source_ptr */ void SialOpsParallel::put_accumulate(BlockId& target_id, const Block::BlockPtr source_block) { //partial check for data races check_and_set_mode(target_id, WRITE); //send message with target block's id to server int my_rank = sip_mpi_attr_.global_rank(); int server_rank = data_distribution_.get_server_rank(target_id); int put_accumulate_tag, put_accumulate_data_tag; put_accumulate_tag = barrier_support_.make_mpi_tags_for_PUT_ACCUMULATE( put_accumulate_data_tag); sip::check(server_rank>=0&&server_rank<sip_mpi_attr_.global_size(), "invalid server rank",current_line()); SIP_LOG(std::cout<<"W " << sip_mpi_attr_.global_rank() << " : sending PUT_ACCUMULATE for block " << target_id << " to server "<< server_rank << std::endl); // Construct int array to send to server. const int to_send_size = BlockId::MPI_BLOCK_ID_COUNT + 2; const int line_num_offset = BlockId::MPI_BLOCK_ID_COUNT; const int section_num_offset = line_num_offset + 1; int to_send[to_send_size]; // BlockId & line number int *serialized_block_id = target_id.to_mpi_array(); std::copy(serialized_block_id + 0, serialized_block_id + BlockId::MPI_BLOCK_ID_COUNT, to_send); to_send[line_num_offset] = current_line(); to_send[section_num_offset] = barrier_support_.section_number(); //send block id SIPMPIUtils::check_err( MPI_Send(to_send, to_send_size, MPI_INT, server_rank, put_accumulate_tag, MPI_COMM_WORLD)); //immediately follow with the data SIPMPIUtils::check_err( MPI_Send(source_block->get_data(), source_block->size(), MPI_DOUBLE, server_rank, put_accumulate_data_tag, MPI_COMM_WORLD)); //ack ack_handler_.expect_ack_from(server_rank, put_accumulate_data_tag); SIP_LOG( std::cout<< "W " << sip_mpi_attr_.global_rank() << " : Done with PUT_ACCUMULATE for block " << target_id << " to server rank " << server_rank << std::endl); }
bool CheckFrame(CheckerState *state, CheckerFrame *frame, CheckerPropagate *propagate) { Assert(!state->GetReportKind()); BlockMemory *mcfg = frame->Memory(); BlockCFG *cfg = mcfg->GetCFG(); BlockId *id = cfg->GetId(); if (checker_verbose.IsSpecified()) { logout << "CHECK: " << frame << ": Entering " << id << endl; if (propagate) propagate->Print(); } Where *where = propagate ? propagate->m_where : NULL; // check if we should terminate the search at this point (with or without // generating a report). if (where && where->IsNone()) { WhereNone *nwhere = where->AsNone(); ReportKind kind = nwhere->GetReportKind(); if (kind == RK_None) { if (checker_verbose.IsSpecified()) logout << "CHECK: " << frame << ": Ignoring" << endl; return false; } else { if (checker_verbose.IsSpecified()) logout << "CHECK: " << frame << ": Propagation failed" << endl; state->SetReport(kind); return true; } } // check for other propagations on the stack with frames for the same block, // and block the recursion if we exceed the checker's depth. we assume that // if we're ever going to terminate in the presence of recursion, we will // do so quickly. if (propagate) { if (uint32_t depth = checker_depth.UIntValue()) { Vector<CheckerFrame*> recurse_frames; for (size_t ind = 0; ind < state->m_stack.Size(); ind++) { CheckerFrame *other_frame = state->m_stack[ind]->m_frame; if (other_frame != frame && other_frame->Memory() == mcfg && !recurse_frames.Contains(other_frame)) recurse_frames.PushBack(other_frame); } if (recurse_frames.Size() >= depth) { state->SetReport(RK_Recursion); return true; } } } // check if we are propagating into some callee. if (where && where->IsPostcondition()) { WherePostcondition *nwhere = where->AsPostcondition(); // expand the callee at the specified point. PPoint point = nwhere->GetPoint(); PEdge *edge = cfg->GetSingleOutgoingEdge(point); if (edge->IsLoop()) { // expanding data from a loop. first try the case that the loop // does not execute at all. if (checker_verbose.IsSpecified()) logout << "CHECK: " << frame << ": Trying to skip loop at " << point << endl; state->PushContext(); if (CheckSkipLoop(state, frame, point, nwhere)) return true; state->PopContext(); } if (BlockId *callee = edge->GetDirectCallee()) { // easy case, there is only a single callee. if (checker_verbose.IsSpecified()) logout << "CHECK: " << frame << ": Expanding single callee at " << point << ": " << callee << endl; state->PushContext(); if (CheckSingleCallee(state, frame, point, nwhere, callee, true)) return true; state->PopContext(); } else { // iterate through all the possible callees Variable *function = id->BaseVar(); CallEdgeSet *callees = CalleeCache.Lookup(function); Vector<Variable*> callee_vars; if (callees) { for (size_t eind = 0; eind < callees->GetEdgeCount(); eind++) { const CallEdge &edge = callees->GetEdge(eind); if (edge.where.id == id && edge.where.point == point) callee_vars.PushBack(edge.callee); } } SortVector<Variable*,Variable>(&callee_vars); for (size_t cind = 0; cind < callee_vars.Size(); cind++) { Variable *callee = callee_vars[cind]; if (checker_verbose.IsSpecified()) logout << "CHECK: " << frame << ": Expanding indirect callee at " << point << ": " << callee << endl; callee->IncRef(); BlockId *callee_id = BlockId::Make(B_Function, callee); state->PushContext(); if (CheckSingleCallee(state, frame, point, nwhere, callee_id, false)) { CalleeCache.Release(function); return true; } state->PopContext(); } if (callee_vars.Empty()) { if (checker_verbose.IsSpecified()) logout << "CHECK: " << frame << ": No callees to expand at " << point << endl; } CalleeCache.Release(function); } return false; } // any precondition we have to propagate up to the callers. WherePrecondition *precondition = NULL; if (where) precondition = where->IfPrecondition(); // whether we will be reconnecting to the caller without any // propagation information. bool reconnect_caller = false; if (precondition) { Bit *bit = precondition->GetBit(); WherePrecondition *dupe_precondition = new WherePrecondition(mcfg, bit); state->m_precondition_list.PushBack(dupe_precondition); } else { // we will propagate to the caller regardless if there is already a caller // hooked up or if we are inside a loop body. if (frame->GetCaller().id != NULL) reconnect_caller = true; if (frame->Kind() == B_Loop) reconnect_caller = true; } if (propagate && reconnect_caller) { // check to see if we are delaying any heap propagation. if (where->IsInvariant()) { Assert(state->m_delayed_propagate_heap == NULL); state->m_delayed_propagate_heap = propagate; } } else if (!precondition && !reconnect_caller) { // check to see if we are performing heap propagation. if (state->m_delayed_propagate_heap) { Assert(propagate == NULL); CheckerPropagate *heap_propagate = state->m_delayed_propagate_heap; state->m_delayed_propagate_heap = NULL; WhereInvariant *invariant = heap_propagate->m_where->AsInvariant(); if (CheckHeapWrites(state, frame, heap_propagate->m_frame, invariant)) return true; state->m_delayed_propagate_heap = heap_propagate; return false; } else if (where && where->IsInvariant()) { return CheckHeapWrites(state, frame, frame, where->AsInvariant()); } Assert(propagate); // don't need to expand the callers or anything else. // we can finally terminate propagation with an error report. if (checker_verbose.IsSpecified()) logout << "CHECK: " << frame << ": Nothing to expand, finishing" << endl; state->SetReport(RK_Finished); return true; } if (frame->GetCaller().id != NULL) { // just propagate to the existing caller. if (checker_verbose.IsSpecified()) logout << "CHECK: " << frame << ": Returning to caller" << endl; state->PushContext(); if (CheckSingleCaller(state, frame, precondition, frame->GetCaller())) return true; state->PopContext(); } else if (id->Kind() == B_Function) { // propagate to all callers to the function. Variable *function = id->BaseVar(); CallEdgeSet *callers = CallerCache.Lookup(function); Vector<BlockPPoint> caller_points; for (size_t eind = 0; callers && eind < callers->GetEdgeCount(); eind++) { const CallEdge &edge = callers->GetEdge(eind); Assert(edge.callee == function); caller_points.PushBack(edge.where); } SortVector<BlockPPoint,BlockPPoint>(&caller_points); for (size_t cind = 0; cind < caller_points.Size(); cind++) { BlockPPoint caller = caller_points[cind]; if (checker_verbose.IsSpecified()) logout << "CHECK: " << frame << ": Checking caller: " << caller << endl; state->PushContext(); if (CheckSingleCaller(state, frame, precondition, caller)) { CallerCache.Release(function); return true; } state->PopContext(); } if (caller_points.Empty()) { if (checker_verbose.IsSpecified()) logout << "CHECK: " << frame << ": No callers to expand" << endl; } CallerCache.Release(function); } else if (id->Kind() == B_Loop) { // check all possible callers of the loop. unroll an iteration before // checking the parents so that if we can't figure out a sufficient // condition for the loop we will stop exploration quickly. // unroll another iteration of the loop. if (checker_verbose.IsSpecified()) logout << "CHECK: " << frame << ": Unrolling loop iteration" << endl; state->PushContext(); BlockPPoint recursive_caller(id, cfg->GetExitPoint()); if (CheckSingleCaller(state, frame, precondition, recursive_caller)) return true; state->PopContext(); // check the parents which can initially invoke this loop. if (frame->GetLoopParent().id != NULL) { if (checker_verbose.IsSpecified()) logout << "CHECK: " << frame << ": Checking existing loop parent: " << frame->GetLoopParent() << endl; state->PushContext(); if (CheckSingleCaller(state, frame, precondition, frame->GetLoopParent())) return true; state->PopContext(); } else { for (size_t pind = 0; pind < cfg->GetLoopParentCount(); pind++) { BlockPPoint where = cfg->GetLoopParent(pind); if (checker_verbose.IsSpecified()) logout << "CHECK: " << frame << ": Checking loop parent: " << where << endl; state->PushContext(); if (CheckSingleCaller(state, frame, precondition, where)) return true; state->PopContext(); } } } else if (id->Kind() == B_Initializer) { // initializers don't have callers, can just ignore this. // TODO: should address why this code is being reached in the first place. if (checker_verbose.IsSpecified()) logout << "CHECK: " << frame << ": Initializer has no callers" << endl; return false; } else { // unknown type of block. Assert(false); } // if we set the state's delayed heap propagation then unset it. if (propagate && state->m_delayed_propagate_heap == propagate) state->m_delayed_propagate_heap = NULL; return false; }
// check propagation for each point bit in the specified frame. this is called // both for the initial and intermediate checks of the assertion. assert_safe // indicates that this is an initial check or an intermediate check of a heap // invariant, and should be marked as a base bit/frame in the state. bool CheckFrameList(CheckerState *state, CheckerFrame *frame, PPoint point, bool allow_point, bool assert_safe, Bit *base_bit, const GuardBitVector &point_list) { // check if we are ignoring this function outright. BlockId *id = frame->CFG()->GetId(); if (id->Kind() != B_Initializer && IgnoreFunction(id->BaseVar())) { if (checker_verbose.IsSpecified()) logout << "CHECK: " << frame << ": Ignoring function" << endl; return false; } Solver *solver = state->GetSolver(); if (!solver->IsSatisfiable()) { if (checker_verbose.IsSpecified()) logout << "CHECK: " << frame << ": List unsatisfiable" << endl; return false; } for (size_t ind = 0; ind < point_list.Size(); ind++) { const GuardBit &gb = point_list[ind]; state->PushContext(); // the guard for the paths this safe bit takes are an extra assumed bit. frame->PushAssumedBit(gb.guard); // add side conditions and pending information from the bit. solver->AddSideConditions(frame->Id(), gb.bit); if (assert_safe) state->PushBaseBit(gb.bit, frame); if (TestErrorSatisfiable(state, frame, gb.bit)) { // error is feasible along these paths, construct a propagation // for the safe bit and continue exploration. CheckerPropagate propagate(frame, point, allow_point); propagate.m_id = state->GetPropagateId(); propagate.FindTest(base_bit, gb.bit); state->m_stack.PushBack(&propagate); // check the frame against this propagation. if (CheckFrame(state, frame, &propagate)) return true; // check if there was a soft timeout while we were finished // exploring this path. when the timeout occurs all satisfiable // queries become false so we will end up here. if (TimerAlarm::ActiveExpired()) { logout << "Timeout: "; PrintTime(TimerAlarm::ActiveElapsed()); logout << endl; state->SetReport(RK_Timeout); return true; } state->m_stack.PopBack(); } // no error along these paths, unwind the changes we made beforehand. if (assert_safe) state->PopBaseBit(); frame->PopAssumedBit(); state->PopContext(); } return false; }
bool SialOpsParallel::check_and_set_mode(const BlockId& id, array_mode mode) { int array_id = id.array_id(); return check_and_set_mode(array_id, mode); }
void BlockModset::ComputeModset(BlockMemory *mcfg, bool indirect) { static BaseTimer compute_timer("modset_compute"); Timer _timer(&compute_timer); // get any indirect callees for this function, provided they have been // computed and stored in the callee database (indirect is set). CallEdgeSet *indirect_callees = NULL; if (indirect) indirect_callees = CalleeCache.Lookup(m_id->BaseVar()); BlockCFG *cfg = mcfg->GetCFG(); for (size_t eind = 0; eind < cfg->GetEdgeCount(); eind++) { PEdge *edge = cfg->GetEdge(eind); PPoint point = edge->GetSource(); if (edge->IsAssign() || edge->IsCall()) { // process direct assignments along this edge. const Vector<GuardAssign>* assigns = mcfg->GetAssigns(point); if (assigns) { for (size_t aind = 0; aind < assigns->Size(); aind++) { const GuardAssign &gasn = assigns->At(aind); ProcessUpdatedLval(mcfg, gasn.left, NULL, true, false); Exp *use_lval = NULL; Exp *kind = mcfg->GetTerminateAssign(point, gasn.left, gasn.right, &use_lval); if (kind) { ProcessUpdatedLval(mcfg, use_lval, kind, false, false); kind->DecRef(); } } } } // pull in modsets from the direct and indirect callees of the edge. if (BlockId *callee = edge->GetDirectCallee()) { ComputeModsetCall(mcfg, edge, callee, NULL); callee->DecRef(); } else if (edge->IsCall() && indirect_callees) { for (size_t ind = 0; ind < indirect_callees->GetEdgeCount(); ind++) { const CallEdge &cedge = indirect_callees->GetEdge(ind); // when comparing watch out for the case that this is a temporary // modset and does not share the same block kind as the edge point. if (cedge.where.version == cfg->GetVersion() && cedge.where.point == point && cedge.where.id->Function() == m_id->Function() && cedge.where.id->Loop() == m_id->Loop()) { cedge.callee->IncRef(); BlockId *callee = BlockId::Make(B_Function, cedge.callee); ComputeModsetCall(mcfg, edge, callee, cedge.rfld_chain); callee->DecRef(); } } } } // sort the modset exps to ensure a consistent representation. if (m_modset_list) SortVector<PointValue,compare_PointValue>(m_modset_list); if (m_assign_list) SortVector<GuardAssign,compare_GuardAssign>(m_assign_list); if (indirect) CalleeCache.Release(m_id->BaseVar()); }
void BlockSummary::GetAssumedBits(BlockMemory *mcfg, PPoint end_point, Vector<AssumeInfo> *assume_list) { BlockId *id = mcfg->GetId(); BlockCFG *cfg = mcfg->GetCFG(); BlockSummary *sum = GetBlockSummary(id); const Vector<Bit*> *assumes = sum->GetAssumes(); size_t assume_count = VectorSize<Bit*>(assumes); // pull in assumptions from the summary for mcfg. in some cases these // assumptions won't be useful, e.g. describing the state at exit // for functions. for now we're just adding all of them though. TODO: fix. for (size_t ind = 0; ind < assume_count; ind++) { Bit *bit = assumes->At(ind); bit->IncRef(assume_list); AssumeInfo info; info.bit = bit; assume_list->PushBack(info); } sum->DecRef(); Vector<BlockCFG*> *annot_list = BodyAnnotCache.Lookup(id->Function()); // add assumes at function entry for any preconditions. if (id->Kind() == B_Function) { for (size_t ind = 0; annot_list && ind < annot_list->Size(); ind++) { BlockCFG *annot_cfg = annot_list->At(ind); if (annot_cfg->GetAnnotationKind() != AK_Precondition && annot_cfg->GetAnnotationKind() != AK_PreconditionAssume) continue; Bit *bit = BlockMemory::GetAnnotationBit(annot_cfg); if (!bit) continue; annot_cfg->IncRef(assume_list); bit->IncRef(assume_list); AssumeInfo info; info.annot = annot_cfg; info.bit = bit; assume_list->PushBack(info); } } // add assumptions from points within the block. for (size_t pind = 0; pind < cfg->GetPointAnnotationCount(); pind++) { PointAnnotation pann = cfg->GetPointAnnotation(pind); if (end_point && pann.point >= end_point) continue; BlockCFG *annot_cfg = GetAnnotationCFG(pann.annot); if (!annot_cfg) continue; Assert(annot_cfg->GetAnnotationKind() != AK_AssertRuntime); if (Bit *bit = BlockMemory::GetAnnotationBit(annot_cfg)) { // get the annotation bit in terms of block entry. Bit *point_bit = NULL; mcfg->TranslateBit(TRK_Point, pann.point, bit, &point_bit); point_bit->MoveRef(&point_bit, assume_list); annot_cfg->IncRef(assume_list); AssumeInfo info; info.annot = annot_cfg; info.point = pann.point; info.bit = point_bit; assume_list->PushBack(info); } annot_cfg->DecRef(); } // add assumptions from annotation edges within the block, invariants // on values accessed by the block, and from the summaries of any callees. for (size_t eind = 0; eind < cfg->GetEdgeCount(); eind++) { PEdge *edge = cfg->GetEdge(eind); PPoint point = edge->GetSource(); if (end_point && point >= end_point) continue; InvariantAssumeVisitor visitor(mcfg, point, assume_list); edge->DoVisit(&visitor); if (PEdgeAnnotation *nedge = edge->IfAnnotation()) { // add an assumption for this annotation. BlockCFG *annot_cfg = GetAnnotationCFG(nedge->GetAnnotationId()); if (!annot_cfg) continue; Bit *bit = BlockMemory::GetAnnotationBit(annot_cfg); // don't incorporate AssertRuntimes, these are not assumed. if (bit && annot_cfg->GetAnnotationKind() != AK_AssertRuntime) { // get the annotation bit in terms of block entry. Bit *point_bit = NULL; mcfg->TranslateBit(TRK_Point, point, bit, &point_bit); point_bit->MoveRef(&point_bit, assume_list); annot_cfg->IncRef(assume_list); AssumeInfo info; info.annot = annot_cfg; info.point = point; info.bit = point_bit; assume_list->PushBack(info); } annot_cfg->DecRef(); } if (BlockId *callee = edge->GetDirectCallee()) { GetCallAssumedBits(mcfg, edge, callee, false, assume_list); callee->DecRef(); } else if (edge->IsCall()) { // add conditional assumes for the indirect targets of the call. // this is most useful for baked information and annotations, where // we sometimes need to attach information at indirect calls. CallEdgeSet *callees = CalleeCache.Lookup(id->BaseVar()); size_t old_count = assume_list->Size(); if (callees) { for (size_t cind = 0; cind < callees->GetEdgeCount(); cind++) { const CallEdge &cedge = callees->GetEdge(cind); if (cedge.where.id == id && cedge.where.point == point) { cedge.callee->IncRef(); BlockId *callee = BlockId::Make(B_Function, cedge.callee); GetCallAssumedBits(mcfg, edge, callee, true, assume_list); callee->DecRef(); } } } if (assume_list->Size() != old_count) { // we managed to do something at this indirect call site. // add another assumption restricting the possible callees to // only those identified by our callgraph. GuardExpVector receiver_list; mcfg->TranslateReceiver(point, &receiver_list); for (size_t rind = 0; rind < receiver_list.Size(); rind++) { const GuardExp &gs = receiver_list[rind]; gs.guard->IncRef(); // make a bit: !when || rcv == callee0 || rcv == callee1 || ... Bit *extra_bit = Bit::MakeNot(gs.guard); for (size_t cind = 0; cind < callees->GetEdgeCount(); cind++) { const CallEdge &cedge = callees->GetEdge(cind); if (cedge.where.id == id && cedge.where.point == point) { Variable *callee_var = cedge.callee; callee_var->IncRef(); Exp *callee_exp = Exp::MakeVar(callee_var); gs.exp->IncRef(); Bit *equal = Exp::MakeCompareBit(B_Equal, callee_exp, gs.exp); extra_bit = Bit::MakeOr(extra_bit, equal); } } extra_bit->MoveRef(NULL, assume_list); AssumeInfo info; info.bit = extra_bit; assume_list->PushBack(info); } } CalleeCache.Release(id->BaseVar()); } } BodyAnnotCache.Release(id->Function()); // add assumptions from heap invariants describing values mentioned // in added assumptions. we could keep doing this transitively but don't, // to ensure termination. size_t count = assume_list->Size(); for (size_t ind = 0; ind < count; ind++) { InvariantAssumeVisitor visitor(NULL, 0, assume_list); assume_list->At(ind).bit->DoVisit(&visitor); } CombineAssumeList(assume_list); }
void Visit(Exp *exp) { if (ExpFld *nexp = exp->IfFld()) { // pick up any type invariants from the host type. String *csu_name = nexp->GetField()->GetCSUType()->GetCSUName(); Vector<BlockCFG*> *annot_list = CompAnnotCache.Lookup(csu_name); for (size_t ind = 0; annot_list && ind < annot_list->Size(); ind++) { BlockCFG *annot_cfg = annot_list->At(ind); Assert(annot_cfg->GetAnnotationKind() == AK_Invariant || annot_cfg->GetAnnotationKind() == AK_InvariantAssume); BlockId *id = annot_cfg->GetId(); Bit *bit = BlockMemory::GetAnnotationBit(annot_cfg); if (!bit) continue; // get the *this expression. we'll replace this with the actual CSU // lvalue to get the assumed bit. id->IncRef(); Variable *this_var = Variable::Make(id, VK_This, NULL, 0, NULL); Exp *this_exp = Exp::MakeVar(this_var); Exp *this_drf = Exp::MakeDrf(this_exp); Exp *target = nexp->GetTarget(); GuardExpVector lval_res; if (mcfg) { mcfg->TranslateExp(TRK_Point, point, target, &lval_res); } else { target->IncRef(); lval_res.PushBack(GuardExp(target, Bit::MakeConstant(true))); } for (size_t lind = 0; lind < lval_res.Size(); lind++) { // ignore the guard component of the result here. this means that // accessing a field of a value means related invariants hold for // the value along all paths. which is normally right, except when // the value is the result of a cast, and could have a different type // along other paths. TODO: sort this out. const GuardExp &gs = lval_res[lind]; Bit *new_bit = BitReplaceExp(bit, this_drf, gs.exp); new_bit->MoveRef(NULL, assume_list); annot_cfg->IncRef(assume_list); AssumeInfo info; info.annot = annot_cfg; info.point = 0; info.bit = new_bit; assume_list->PushBack(info); } this_drf->DecRef(); } CompAnnotCache.Release(csu_name); } if (ExpVar *nexp = exp->IfVar()) { if (nexp->GetVariable()->Kind() == VK_Glob) { String *var_name = nexp->GetVariable()->GetName(); Vector<BlockCFG*> *annot_list = InitAnnotCache.Lookup(var_name); for (size_t ind = 0; annot_list && ind < annot_list->Size(); ind++) { BlockCFG *annot_cfg = annot_list->At(ind); Assert(annot_cfg->GetAnnotationKind() == AK_Invariant || annot_cfg->GetAnnotationKind() == AK_InvariantAssume); Bit *bit = BlockMemory::GetAnnotationBit(annot_cfg); if (!bit) continue; bit->IncRef(assume_list); annot_cfg->IncRef(assume_list); AssumeInfo info; info.annot = annot_cfg; info.point = 0; info.bit = bit; assume_list->PushBack(info); } InitAnnotCache.Release(var_name); } } }