Exemplo n.º 1
0
/// IsOperandAMemoryOperand - Check to see if all uses of OpVal by the specified
/// inline asm call are due to memory operands.  If so, return true, otherwise
/// return false.
static bool IsOperandAMemoryOperand(CallInst *CI, InlineAsm *IA, Value *OpVal,
                                    const TargetLowering &TLI) {
  TargetLowering::AsmOperandInfoVector TargetConstraints = TLI.ParseConstraints(ImmutableCallSite(CI));
  for (unsigned i = 0, e = TargetConstraints.size(); i != e; ++i) {
    TargetLowering::AsmOperandInfo &OpInfo = TargetConstraints[i];
    
    // Compute the constraint code and ConstraintType to use.
    TLI.ComputeConstraintToUse(OpInfo, SDValue());

    // If this asm operand is our Value*, and if it isn't an indirect memory
    // operand, we can't fold it!
    if (OpInfo.CallOperandVal == OpVal &&
        (OpInfo.ConstraintType != TargetLowering::C_Memory ||
         !OpInfo.isIndirect))
      return false;
  }

  return true;
}
Exemplo n.º 2
0
/// OptimizeInlineAsmInst - If there are any memory operands, use
/// OptimizeMemoryInst to sink their address computing into the block when
/// possible / profitable.
bool CodeGenPrepare::OptimizeInlineAsmInst(CallInst *CS) {
    bool MadeChange = false;

    TargetLowering::AsmOperandInfoVector
    TargetConstraints = TLI->ParseConstraints(CS);
    unsigned ArgNo = 0;
    for (unsigned i = 0, e = TargetConstraints.size(); i != e; ++i) {
        TargetLowering::AsmOperandInfo &OpInfo = TargetConstraints[i];

        // Compute the constraint code and ConstraintType to use.
        TLI->ComputeConstraintToUse(OpInfo, SDValue());

        if (OpInfo.ConstraintType == TargetLowering::C_Memory &&
                OpInfo.isIndirect) {
            Value *OpVal = CS->getArgOperand(ArgNo++);
            MadeChange |= OptimizeMemoryInst(CS, OpVal, OpVal->getType());
        } else if (OpInfo.Type == InlineAsm::isInput)
            ArgNo++;
    }

    return MadeChange;
}