bool isDeclCandidate(FunctionDecl * FDecl) {
      if (m_NonNullArgIndexs.count(FDecl))
        return true;

      if (llvm::isa<CXXRecordDecl>(FDecl))
        return true;

      std::bitset<32> ArgIndexs;
      for (specific_attr_iterator<NonNullAttr>
             I = FDecl->specific_attr_begin<NonNullAttr>(),
             E = FDecl->specific_attr_end<NonNullAttr>(); I != E; ++I) {

        NonNullAttr *NonNull = *I;
        for (NonNullAttr::args_iterator i = NonNull->args_begin(),
               e = NonNull->args_end(); i != e; ++i) {
          ArgIndexs.set(*i);
        }
      }

      if (ArgIndexs.any()) {
        m_NonNullArgIndexs.insert(std::make_pair(FDecl, ArgIndexs));
        return true;
      }
      return false;
    }
Esempio n. 2
0
static void
updateSSAForUseOfInst(SILSSAUpdater &Updater,
                      SmallVectorImpl<SILArgument*> &InsertedPHIs,
                      const llvm::DenseMap<ValueBase *, SILValue> &ValueMap,
                      SILBasicBlock *Header, SILBasicBlock *EntryCheckBlock,
                      ValueBase *Inst) {
    if (Inst->use_empty())
        return;

    // Find the mapped instruction.
    assert(ValueMap.count(Inst) && "Expected to find value in map!");
    SILValue MappedValue = ValueMap.find(Inst)->second;
    assert(MappedValue);

    // For each use of a specific result value of the instruction.
    if (Inst->hasValue()) {
        SILValue Res(Inst);
        assert(Res->getType() == MappedValue->getType() && "The types must match");

        InsertedPHIs.clear();
        Updater.Initialize(Res->getType());
        Updater.AddAvailableValue(Header, Res);
        Updater.AddAvailableValue(EntryCheckBlock, MappedValue);


        // Because of the way that phi nodes are represented we have to collect all
        // uses before we update SSA. Modifying one phi node can invalidate another
        // unrelated phi nodes operands through the common branch instruction (that
        // has to be modified). This would invalidate a plain ValueUseIterator.
        // Instead we collect uses wrapping uses in branches specially so that we
        // can reconstruct the use even after the branch has been modified.
        SmallVector<UseWrapper, 8> StoredUses;
        for (auto *U : Res->getUses())
            StoredUses.push_back(UseWrapper(U));
        for (auto U : StoredUses) {
            Operand *Use = U;
            SILInstruction *User = Use->getUser();
            assert(User && "Missing user");

            // Ignore uses in the same basic block.
            if (User->getParent() == Header)
                continue;

            assert(User->getParent() != EntryCheckBlock &&
                   "The entry check block should dominate the header");
            Updater.RewriteUse(*Use);
        }
        // Canonicalize inserted phis to avoid extra BB Args.
        for (SILArgument *Arg : InsertedPHIs) {
            if (SILInstruction *Inst = replaceBBArgWithCast(Arg)) {
                Arg->replaceAllUsesWith(Inst);
                // DCE+SimplifyCFG runs as a post-pass cleanup.
                // DCE replaces dead arg values with undef.
                // SimplifyCFG deletes the dead BB arg.
            }
        }
    }
}
Esempio n. 3
0
int Graph::getTaintedEdges () {
	int countEdges=0;

	for (llvm::DenseMap<GraphNode*, bool>::iterator it = taintedMap.begin(); it != taintedMap.end(); ++it) {
		std::map<GraphNode*, edgeType> succs = it->first->getSuccessors();
		for (std::map<GraphNode*, edgeType>::iterator succ = succs.begin(), s_end = succs.end(); succ != s_end; succ++) {
			if (taintedMap.count(succ->first) > 0) {
				countEdges++;
			}
		}
	}
	return (countEdges);
}
Esempio n. 4
0
std::unique_ptr<ICInfo> registerCompiledPatchpoint(uint8_t* start_addr, uint8_t* slowpath_start_addr,
                                                   uint8_t* continue_addr, uint8_t* slowpath_rtn_addr,
                                                   const ICSetupInfo* ic, StackInfo stack_info, LiveOutSet live_outs) {
    assert(slowpath_start_addr - start_addr >= ic->num_slots * ic->slot_size);
    assert(slowpath_rtn_addr > slowpath_start_addr);
    assert(slowpath_rtn_addr <= start_addr + ic->totalSize());

    assembler::GenericRegister return_register;
    assert(ic->getCallingConvention() == llvm::CallingConv::C
           || ic->getCallingConvention() == llvm::CallingConv::PreserveAll);

    if (ic->hasReturnValue()) {
        static const int DWARF_RAX = 0;
        // It's possible that the return value doesn't get used, in which case
        // we can avoid copying back into RAX at the end
        live_outs.clear(DWARF_RAX);

        // TODO we only need to do this if 0 was in live_outs, since if it wasn't, that indicates
        // the return value won't be used and we can optimize based on that.
        return_register = assembler::RAX;
    }

    // we can let the user just slide down the nop section, but instead
    // emit jumps to the end.
    // Not sure if this is worth it or not?
    for (int i = 0; i < ic->num_slots; i++) {
        uint8_t* start = start_addr + i * ic->slot_size;
        // std::unique_ptr<MCWriter> writer(createMCWriter(start, ic->slot_size * (ic->num_slots - i), 0));
        // writer->emitNop();
        // writer->emitGuardFalse();

        Assembler writer(start, ic->slot_size);
        writer.nop();
        // writer.trap();
        // writer.jmp(JumpDestination::fromStart(ic->slot_size * (ic->num_slots - i)));
        writer.jmp(JumpDestination::fromStart(slowpath_start_addr - start));
    }

    ICInfo* icinfo = new ICInfo(start_addr, slowpath_rtn_addr, continue_addr, stack_info, ic->num_slots, ic->slot_size,
                                ic->getCallingConvention(), std::move(live_outs), return_register, ic->type_recorder);

    assert(!ics_by_return_addr.count(slowpath_rtn_addr));
    ics_by_return_addr[slowpath_rtn_addr] = icinfo;

    registerGCTrackedICInfo(icinfo);

    return std::unique_ptr<ICInfo>(icinfo);
}
Esempio n. 5
0
void deregisterCompiledPatchpoint(ICInfo* ic) {
    assert(ics_by_return_addr.count(ic->slowpath_rtn_addr));
    ics_by_return_addr.erase(ic->slowpath_rtn_addr);

    deregisterGCTrackedICInfo(ic);
}
/// Process an apply instruction which uses a partial_apply
/// as its callee.
/// Returns true on success.
bool PartialApplyCombiner::processSingleApply(FullApplySite AI) {
    Builder.setInsertionPoint(AI.getInstruction());
    Builder.setCurrentDebugScope(AI.getDebugScope());

    // Prepare the args.
    SmallVector<SILValue, 8> Args;
    // First the ApplyInst args.
    for (auto Op : AI.getArguments())
        Args.push_back(Op);

    SILInstruction *InsertionPoint = &*Builder.getInsertionPoint();
    // Next, the partial apply args.

    // Pre-process partial_apply arguments only once, lazily.
    if (isFirstTime) {
        isFirstTime = false;
        if (!allocateTemporaries())
            return false;
    }

    // Now, copy over the partial apply args.
    for (auto Op : PAI->getArguments()) {
        auto Arg = Op;
        // If there is new temporary for this argument, use it instead.
        if (isa<AllocStackInst>(Arg)) {
            if (ArgToTmp.count(Arg)) {
                Op = ArgToTmp.lookup(Arg);
            }
        }
        Args.push_back(Op);
    }

    Builder.setInsertionPoint(InsertionPoint);
    Builder.setCurrentDebugScope(AI.getDebugScope());

    // The thunk that implements the partial apply calls the closure function
    // that expects all arguments to be consumed by the function. However, the
    // captured arguments are not arguments of *this* apply, so they are not
    // pre-incremented. When we combine the partial_apply and this apply into
    // a new apply we need to retain all of the closure non-address type
    // arguments.
    auto ParamInfo = PAI->getSubstCalleeType()->getParameters();
    auto PartialApplyArgs = PAI->getArguments();
    // Set of arguments that need to be released after each invocation.
    SmallVector<SILValue, 8> ToBeReleasedArgs;
    for (unsigned i = 0, e = PartialApplyArgs.size(); i < e; ++i) {
        SILValue Arg = PartialApplyArgs[i];
        if (!Arg->getType().isAddress()) {
            // Retain the argument as the callee may consume it.
            Builder.emitRetainValueOperation(PAI->getLoc(), Arg);
            // For non consumed parameters (e.g. guaranteed), we also need to
            // insert releases after each apply instruction that we create.
            if (!ParamInfo[ParamInfo.size() - PartialApplyArgs.size() + i].
                    isConsumed())
                ToBeReleasedArgs.push_back(Arg);
        }
    }

    auto *F = FRI->getReferencedFunction();
    SILType FnType = F->getLoweredType();
    SILType ResultTy = F->getLoweredFunctionType()->getSILResult();
    ArrayRef<Substitution> Subs = PAI->getSubstitutions();
    if (!Subs.empty()) {
        FnType = FnType.substGenericArgs(PAI->getModule(), Subs);
        ResultTy = FnType.getAs<SILFunctionType>()->getSILResult();
    }

    FullApplySite NAI;
    if (auto *TAI = dyn_cast<TryApplyInst>(AI))
        NAI =
            Builder.createTryApply(AI.getLoc(), FRI, FnType, Subs, Args,
                                   TAI->getNormalBB(), TAI->getErrorBB());
    else
        NAI =
            Builder.createApply(AI.getLoc(), FRI, FnType, ResultTy, Subs, Args,
                                cast<ApplyInst>(AI)->isNonThrowing());

    // We also need to release the partial_apply instruction itself because it
    // is consumed by the apply_instruction.
    if (auto *TAI = dyn_cast<TryApplyInst>(AI)) {
        Builder.setInsertionPoint(TAI->getNormalBB()->begin());
        for (auto Arg : ToBeReleasedArgs) {
            Builder.emitReleaseValueOperation(PAI->getLoc(), Arg);
        }
        Builder.createStrongRelease(AI.getLoc(), PAI, Atomicity::Atomic);
        Builder.setInsertionPoint(TAI->getErrorBB()->begin());
        // Release the non-consumed parameters.
        for (auto Arg : ToBeReleasedArgs) {
            Builder.emitReleaseValueOperation(PAI->getLoc(), Arg);
        }
        Builder.createStrongRelease(AI.getLoc(), PAI, Atomicity::Atomic);
        Builder.setInsertionPoint(AI.getInstruction());
    } else {
        // Release the non-consumed parameters.
        for (auto Arg : ToBeReleasedArgs) {
            Builder.emitReleaseValueOperation(PAI->getLoc(), Arg);
        }
        Builder.createStrongRelease(AI.getLoc(), PAI, Atomicity::Atomic);
    }

    SilCombiner->replaceInstUsesWith(*AI.getInstruction(), NAI.getInstruction());
    SilCombiner->eraseInstFromFunction(*AI.getInstruction());
    return true;
}
Esempio n. 7
0
 bool isClosureScope(SILFunction *F) { return scopeToIndexMap.count(F); }
Esempio n. 8
0
Graph Graph::generateSubGraph(Value *src, Value *dst) {
        Graph G(this->AS);

        std::map<GraphNode*, GraphNode*> nodeMap;

        std::set<GraphNode*> visitedNodes1;
        std::set<GraphNode*> visitedNodes2;


        GraphNode* source = findOpNode(src);
        if (!source) source = findNode(src);

        GraphNode* destination = findNode(dst);

        if (source == NULL || destination == NULL) {
                return G;
        }

        dfsVisit(source, destination, visitedNodes1);
        dfsVisitBack(destination, source, visitedNodes2);

        //check the nodes visited in both directions
        for (std::set<GraphNode*>::iterator it = visitedNodes1.begin(); it != visitedNodes1.end(); ++it) {
                if (visitedNodes2.count(*it) > 0) {
                        nodeMap[*it] = (*it)->clone();
                        //Armazena os nós originais no mapa estático
                        if (taintedMap.count(*it)==0) {
                        	taintedMap[*it] = true;
                        }
                }
        }

        //connect the new vertices
        for (std::map<GraphNode*, GraphNode*>::iterator it = nodeMap.begin(); it != nodeMap.end(); ++it) {

                std::map<GraphNode*, edgeType> succs = it->first->getSuccessors();

                for (std::map<GraphNode*, edgeType>::iterator succ = succs.begin(), s_end = succs.end(); succ != s_end; succ++) {
                        if (nodeMap.count(succ->first) > 0) {
                                it->second->connect(nodeMap[succ->first], succ->second);
                        }
                }

                if ( !G.nodes.count(it->second)) {
                	G.nodes.insert(it->second);

                	if (isa<VarNode>(it->second)) {
                		G.varNodes[dyn_cast<VarNode>(it->second)->getValue()] = dyn_cast<VarNode>(it->second);
                	}

                	if (isa<MemNode>(it->second)) {
                		G.memNodes[dyn_cast<MemNode>(it->second)->getAliasSetId()] = dyn_cast<MemNode>(it->second);
                	}

                	if (isa<OpNode>(it->second)) {
                		G.opNodes[dyn_cast<OpNode>(it->second)->getValue()] = dyn_cast<OpNode>(it->second);

                		if (isa<CallNode>(it->second)) {
                    		G.callNodes[dyn_cast<CallNode>(it->second)->getCallInst()] = dyn_cast<CallNode>(it->second);

                    	}
                	}

                }

        }


        return G;
}