Beispiel #1
0
bool MipsFastISel::emitStore(MVT VT, unsigned SrcReg, Address &Addr,
                             unsigned Alignment) {
  //
  // more cases will be handled here in following patches.
  //
  unsigned Opc;
  switch (VT.SimpleTy) {
  case MVT::i8:
    Opc = Mips::SB;
    break;
  case MVT::i16:
    Opc = Mips::SH;
    break;
  case MVT::i32:
    Opc = Mips::SW;
    break;
  case MVT::f32:
    if (UnsupportedFPMode)
      return false;
    Opc = Mips::SWC1;
    break;
  case MVT::f64:
    if (UnsupportedFPMode)
      return false;
    Opc = Mips::SDC1;
    break;
  default:
    return false;
  }
  emitInstStore(Opc, SrcReg, Addr.getReg(), Addr.getOffset());
  return true;
}
Beispiel #2
0
/// \brief Function that finds the pointer's path to the root of it's local
/// tree.
/// TODO: Optimize function to take advantage of topological ordering.
void OffsetPointer::getPathToRoot() {
  OffsetPointer* current = this;
  int index = 0;
  Offset offset;
  while(true) {
    path_to_root[current] = std::pair<int, Offset>(index, offset);
    if(current->addresses.size() == 1) {
      Address* addr = *(current->addresses.begin());
      current = addr->getBase();
      if(path_to_root.count(current)) {
        //This means that the local tree is actually a lonely loop
      	// so the local tree's root will be the pointer with the highest address
      	OffsetPointer* root = NULL;
      	for(auto i : path_to_root){
      		if(root < i.first) root = i.first;
      	}
      	local_root = root;
      	break;
      }
      index++;
      offset = offset + addr->getOffset();
    } else {
      local_root = current;
      break;
    }
  }
}
Beispiel #3
0
void WebAssemblyFastISel::addLoadStoreOperands(const Address &Addr,
                                               const MachineInstrBuilder &MIB,
                                               MachineMemOperand *MMO) {
  if (const GlobalValue *GV = Addr.getGlobalValue())
    MIB.addGlobalAddress(GV, Addr.getOffset());
  else
    MIB.addImm(Addr.getOffset());

  if (Addr.isRegBase())
    MIB.addReg(Addr.getReg());
  else
    MIB.addFrameIndex(Addr.getFI());

  // Set the alignment operand (this is rewritten in SetP2AlignOperands).
  // TODO: Disable SetP2AlignOperands for FastISel and just do it here.
  MIB.addImm(0);

  MIB.addMemOperand(MMO);
}
Beispiel #4
0
bool MipsFastISel::emitLoad(MVT VT, unsigned &ResultReg, Address &Addr,
                            unsigned Alignment) {
  //
  // more cases will be handled here in following patches.
  //
  unsigned Opc;
  switch (VT.SimpleTy) {
  case MVT::i32: {
    ResultReg = createResultReg(&Mips::GPR32RegClass);
    Opc = Mips::LW;
    break;
  }
  case MVT::i16: {
    ResultReg = createResultReg(&Mips::GPR32RegClass);
    Opc = Mips::LHu;
    break;
  }
  case MVT::i8: {
    ResultReg = createResultReg(&Mips::GPR32RegClass);
    Opc = Mips::LBu;
    break;
  }
  case MVT::f32: {
    if (UnsupportedFPMode)
      return false;
    ResultReg = createResultReg(&Mips::FGR32RegClass);
    Opc = Mips::LWC1;
    break;
  }
  case MVT::f64: {
    if (UnsupportedFPMode)
      return false;
    ResultReg = createResultReg(&Mips::AFGR64RegClass);
    Opc = Mips::LDC1;
    break;
  }
  default:
    return false;
  }
  emitInstLoad(Opc, ResultReg, Addr.getReg(), Addr.getOffset());
  return true;
}
Beispiel #5
0
bool MipsFastISel::processCallArgs(CallLoweringInfo &CLI,
                                   SmallVectorImpl<MVT> &OutVTs,
                                   unsigned &NumBytes) {
  CallingConv::ID CC = CLI.CallConv;
  SmallVector<CCValAssign, 16> ArgLocs;
  CCState CCInfo(CC, false, *FuncInfo.MF, ArgLocs, *Context);
  CCInfo.AnalyzeCallOperands(OutVTs, CLI.OutFlags, CCAssignFnForCall(CC));
  // Get a count of how many bytes are to be pushed on the stack.
  NumBytes = CCInfo.getNextStackOffset();
  // This is the minimum argument area used for A0-A3.
  if (NumBytes < 16)
    NumBytes = 16;

  emitInst(Mips::ADJCALLSTACKDOWN).addImm(16);
  // Process the args.
  MVT firstMVT;
  for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
    CCValAssign &VA = ArgLocs[i];
    const Value *ArgVal = CLI.OutVals[VA.getValNo()];
    MVT ArgVT = OutVTs[VA.getValNo()];

    if (i == 0) {
      firstMVT = ArgVT;
      if (ArgVT == MVT::f32) {
        VA.convertToReg(Mips::F12);
      } else if (ArgVT == MVT::f64) {
        VA.convertToReg(Mips::D6);
      }
    } else if (i == 1) {
      if ((firstMVT == MVT::f32) || (firstMVT == MVT::f64)) {
        if (ArgVT == MVT::f32) {
          VA.convertToReg(Mips::F14);
        } else if (ArgVT == MVT::f64) {
          VA.convertToReg(Mips::D7);
        }
      }
    }
    if (((ArgVT == MVT::i32) || (ArgVT == MVT::f32)) && VA.isMemLoc()) {
      switch (VA.getLocMemOffset()) {
      case 0:
        VA.convertToReg(Mips::A0);
        break;
      case 4:
        VA.convertToReg(Mips::A1);
        break;
      case 8:
        VA.convertToReg(Mips::A2);
        break;
      case 12:
        VA.convertToReg(Mips::A3);
        break;
      default:
        break;
      }
    }
    unsigned ArgReg = getRegForValue(ArgVal);
    if (!ArgReg)
      return false;

    // Handle arg promotion: SExt, ZExt, AExt.
    switch (VA.getLocInfo()) {
    case CCValAssign::Full:
      break;
    case CCValAssign::AExt:
    case CCValAssign::SExt: {
      MVT DestVT = VA.getLocVT();
      MVT SrcVT = ArgVT;
      ArgReg = emitIntExt(SrcVT, ArgReg, DestVT, /*isZExt=*/false);
      if (!ArgReg)
        return false;
      break;
    }
    case CCValAssign::ZExt: {
      MVT DestVT = VA.getLocVT();
      MVT SrcVT = ArgVT;
      ArgReg = emitIntExt(SrcVT, ArgReg, DestVT, /*isZExt=*/true);
      if (!ArgReg)
        return false;
      break;
    }
    default:
      llvm_unreachable("Unknown arg promotion!");
    }

    // Now copy/store arg to correct locations.
    if (VA.isRegLoc() && !VA.needsCustom()) {
      BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
              TII.get(TargetOpcode::COPY), VA.getLocReg()).addReg(ArgReg);
      CLI.OutRegs.push_back(VA.getLocReg());
    } else if (VA.needsCustom()) {
      llvm_unreachable("Mips does not use custom args.");
      return false;
    } else {
      //
      // FIXME: This path will currently return false. It was copied
      // from the AArch64 port and should be essentially fine for Mips too.
      // The work to finish up this path will be done in a follow-on patch.
      //
      assert(VA.isMemLoc() && "Assuming store on stack.");
      // Don't emit stores for undef values.
      if (isa<UndefValue>(ArgVal))
        continue;

      // Need to store on the stack.
      // FIXME: This alignment is incorrect but this path is disabled
      // for now (will return false). We need to determine the right alignment
      // based on the normal alignment for the underlying machine type.
      //
      unsigned ArgSize = RoundUpToAlignment(ArgVT.getSizeInBits(), 4);

      unsigned BEAlign = 0;
      if (ArgSize < 8 && !Subtarget->isLittle())
        BEAlign = 8 - ArgSize;

      Address Addr;
      Addr.setKind(Address::RegBase);
      Addr.setReg(Mips::SP);
      Addr.setOffset(VA.getLocMemOffset() + BEAlign);

      unsigned Alignment = DL.getABITypeAlignment(ArgVal->getType());
      MachineMemOperand *MMO = FuncInfo.MF->getMachineMemOperand(
          MachinePointerInfo::getStack(Addr.getOffset()),
          MachineMemOperand::MOStore, ArgVT.getStoreSize(), Alignment);
      (void)(MMO);
      // if (!emitStore(ArgVT, ArgReg, Addr, MMO))
      return false; // can't store on the stack yet.
    }
  }

  return true;
}
Beispiel #6
0
bool WebAssemblyFastISel::computeAddress(const Value *Obj, Address &Addr) {

  const User *U = nullptr;
  unsigned Opcode = Instruction::UserOp1;
  if (const Instruction *I = dyn_cast<Instruction>(Obj)) {
    // Don't walk into other basic blocks unless the object is an alloca from
    // another block, otherwise it may not have a virtual register assigned.
    if (FuncInfo.StaticAllocaMap.count(static_cast<const AllocaInst *>(Obj)) ||
        FuncInfo.MBBMap[I->getParent()] == FuncInfo.MBB) {
      Opcode = I->getOpcode();
      U = I;
    }
  } else if (const ConstantExpr *C = dyn_cast<ConstantExpr>(Obj)) {
    Opcode = C->getOpcode();
    U = C;
  }

  if (auto *Ty = dyn_cast<PointerType>(Obj->getType()))
    if (Ty->getAddressSpace() > 255)
      // Fast instruction selection doesn't support the special
      // address spaces.
      return false;

  if (const GlobalValue *GV = dyn_cast<GlobalValue>(Obj)) {
    if (Addr.getGlobalValue())
      return false;
    Addr.setGlobalValue(GV);
    return true;
  }

  switch (Opcode) {
  default:
    break;
  case Instruction::BitCast: {
    // Look through bitcasts.
    return computeAddress(U->getOperand(0), Addr);
  }
  case Instruction::IntToPtr: {
    // Look past no-op inttoptrs.
    if (TLI.getValueType(DL, U->getOperand(0)->getType()) ==
        TLI.getPointerTy(DL))
      return computeAddress(U->getOperand(0), Addr);
    break;
  }
  case Instruction::PtrToInt: {
    // Look past no-op ptrtoints.
    if (TLI.getValueType(DL, U->getType()) == TLI.getPointerTy(DL))
      return computeAddress(U->getOperand(0), Addr);
    break;
  }
  case Instruction::GetElementPtr: {
    Address SavedAddr = Addr;
    uint64_t TmpOffset = Addr.getOffset();
    // Iterate through the GEP folding the constants into offsets where
    // we can.
    for (gep_type_iterator GTI = gep_type_begin(U), E = gep_type_end(U);
         GTI != E; ++GTI) {
      const Value *Op = GTI.getOperand();
      if (StructType *STy = dyn_cast<StructType>(*GTI)) {
        const StructLayout *SL = DL.getStructLayout(STy);
        unsigned Idx = cast<ConstantInt>(Op)->getZExtValue();
        TmpOffset += SL->getElementOffset(Idx);
      } else {
        uint64_t S = DL.getTypeAllocSize(GTI.getIndexedType());
        for (;;) {
          if (const ConstantInt *CI = dyn_cast<ConstantInt>(Op)) {
            // Constant-offset addressing.
            TmpOffset += CI->getSExtValue() * S;
            break;
          }
          if (S == 1 && Addr.isRegBase() && Addr.getReg() == 0) {
            // An unscaled add of a register. Set it as the new base.
            Addr.setReg(getRegForValue(Op));
            break;
          }
          if (canFoldAddIntoGEP(U, Op)) {
            // A compatible add with a constant operand. Fold the constant.
            ConstantInt *CI =
                cast<ConstantInt>(cast<AddOperator>(Op)->getOperand(1));
            TmpOffset += CI->getSExtValue() * S;
            // Iterate on the other operand.
            Op = cast<AddOperator>(Op)->getOperand(0);
            continue;
          }
          // Unsupported
          goto unsupported_gep;
        }
      }
    }
    // Try to grab the base operand now.
    Addr.setOffset(TmpOffset);
    if (computeAddress(U->getOperand(0), Addr))
      return true;
    // We failed, restore everything and try the other options.
    Addr = SavedAddr;
  unsupported_gep:
    break;
  }
  case Instruction::Alloca: {
    const AllocaInst *AI = cast<AllocaInst>(Obj);
    DenseMap<const AllocaInst *, int>::iterator SI =
        FuncInfo.StaticAllocaMap.find(AI);
    if (SI != FuncInfo.StaticAllocaMap.end()) {
      Addr.setKind(Address::FrameIndexBase);
      Addr.setFI(SI->second);
      return true;
    }
    break;
  }
  case Instruction::Add: {
    // Adds of constants are common and easy enough.
    const Value *LHS = U->getOperand(0);
    const Value *RHS = U->getOperand(1);

    if (isa<ConstantInt>(LHS))
      std::swap(LHS, RHS);

    if (const ConstantInt *CI = dyn_cast<ConstantInt>(RHS)) {
      Addr.setOffset(Addr.getOffset() + CI->getSExtValue());
      return computeAddress(LHS, Addr);
    }

    Address Backup = Addr;
    if (computeAddress(LHS, Addr) && computeAddress(RHS, Addr))
      return true;
    Addr = Backup;

    break;
  }
  case Instruction::Sub: {
    // Subs of constants are common and easy enough.
    const Value *LHS = U->getOperand(0);
    const Value *RHS = U->getOperand(1);

    if (const ConstantInt *CI = dyn_cast<ConstantInt>(RHS)) {
      Addr.setOffset(Addr.getOffset() - CI->getSExtValue());
      return computeAddress(LHS, Addr);
    }
    break;
  }
  }
  Addr.setReg(getRegForValue(Obj));
  return Addr.getReg() != 0;
}
// Applies the narrowing operations
void RangeBasedPointerAnalysis::applyNarrowing()
{
	for(auto i : RangedPointers)
  {
    for(auto j = i.second->addr_begin(), je = i.second->addr_end(); 
    j != je; j++)
    {
      Address* a = *j;
      if(!(*j)->Narrowing_Ops.empty())
      {
        Range narrowed = Range(a->getOffset()->getLower(), 
          a->getOffset()->getUpper());
        
        std::pair<bool,bool> growth;
        if(narrowed.getUpper().isEQ(Expr::getPlusInf(*SI)))
          growth.second = true;
        if(narrowed.getLower().isEQ(Expr::getMinusInf(*SI)))
          growth.first = true;
        
        for(auto z : a->Narrowing_Ops)
        {
          if(z.second->cmp_op == CmpInst::ICMP_EQ)
          {
            //errs() << "=\n";
            RangedPointer* rp = RangedPointers[z.second->cmp_v];
            for(auto w = rp->addr_begin(), we = rp->addr_end(); w != we; w++)
            {
              Expr dw = (*w)->getOffset()->getLower() + z.second->context->getUpper();
              if(!narrowed.getUpper().isConstant() and !dw.isConstant())
                narrowed.setUpper(narrowed.getUpper().min(dw));
              else if(gt(narrowed.getUpper(), dw))
                narrowed.setUpper(dw);
                
              //narrowed.setUpper(narrowed.getUpper().min(dw));
              
              Expr up = (*w)->getOffset()->getUpper() + z.second->context->getLower();
              if(!narrowed.getLower().isConstant() and !up.isConstant())
                narrowed.setLower(narrowed.getLower().max(up));
              else if(lt(narrowed.getLower(), up))
                narrowed.setLower(up);
              
              //narrowed.setLower(narrowed.getLower().max(up));
            }
          }
          else if(z.second->cmp_op == CmpInst::ICMP_NE)
          {
            //errs() << "!=\n";
            RangedPointer* rp = RangedPointers[z.second->cmp_v];
            for(auto w = rp->addr_begin(), we = rp->addr_end(); w != we; w++)
            {
              if(a->widened)
              {
                if(growth.second)
                {
                  Expr dw = (*w)->getOffset()->getLower() + z.second->context->getUpper();
                  if(!narrowed.getUpper().isConstant() and !dw.isConstant())
                    narrowed.setUpper(narrowed.getUpper().min(dw - 1));
                  else if(ge(narrowed.getUpper(), dw))
                    narrowed.setUpper(dw - 1);
                    
                  //narrowed.setUpper(narrowed.getUpper().min(dw - 1));
                }
                else if(growth.first)
                {
                  Expr up = (*w)->getOffset()->getUpper() + z.second->context->getLower();
                  if(!narrowed.getLower().isConstant() and !up.isConstant())
                    narrowed.setLower(narrowed.getLower().max(up + 1));
                  else if(le(narrowed.getLower(), up))
                    narrowed.setLower(up + 1);
                    
                  //narrowed.setLower(narrowed.getLower().max(up + 1));
                }
              }
            }
          }
          else if(z.second->cmp_op == CmpInst::ICMP_SLT)
          {
            //errs() << "<\n";
            RangedPointer* rp = RangedPointers[z.second->cmp_v];
            for(auto w = rp->addr_begin(), we = rp->addr_end(); w != we; w++)
            {
              Expr dw = (*w)->getOffset()->getLower() + z.second->context->getUpper();
              if(!narrowed.getUpper().isConstant() and !dw.isConstant())
                narrowed.setUpper(narrowed.getUpper().min(dw - 1));
              else if(ge(narrowed.getUpper(), dw))
                narrowed.setUpper(dw - 1);
              
              //narrowed.setUpper(narrowed.getUpper().min(dw - 1));
            }
          }
          else if(z.second->cmp_op == CmpInst::ICMP_SLE)
          {
            //errs() << "<=\n";
            RangedPointer* rp = RangedPointers[z.second->cmp_v];
            for(auto w = rp->addr_begin(), we = rp->addr_end(); w != we; w++)
            {
              Expr dw = (*w)->getOffset()->getLower() + z.second->context->getUpper();
              if(!narrowed.getUpper().isConstant() and !dw.isConstant())
                narrowed.setUpper(narrowed.getUpper().min(dw));
              else if(gt(narrowed.getUpper(), dw))
                narrowed.setUpper(dw);
            
              //narrowed.setUpper(narrowed.getUpper().min(dw));
            }
          }
          else if(z.second->cmp_op == CmpInst::ICMP_SGT)
          {
            //errs() << ">\n";
            RangedPointer* rp = RangedPointers[z.second->cmp_v];
            for(auto w = rp->addr_begin(), we = rp->addr_end(); w != we; w++)
            {
              Expr up = (*w)->getOffset()->getUpper() + z.second->context->getLower();
              if(!narrowed.getLower().isConstant() and !up.isConstant())
                narrowed.setLower(narrowed.getLower().max(up + 1));
              else if(le(narrowed.getLower(), up))
                narrowed.setLower(up + 1);
              
              //narrowed.setLower(narrowed.getLower().max(up + 1));
            }
          }
          else if(z.second->cmp_op == CmpInst::ICMP_SGE)
          {
            //errs() << ">=\n";
            RangedPointer* rp = RangedPointers[z.second->cmp_v];
            for(auto w = rp->addr_begin(), we = rp->addr_end(); w != we; w++)
            {
              Expr up = (*w)->getOffset()->getUpper() + z.second->context->getLower();
              if(!narrowed.getLower().isConstant() and !up.isConstant())
                narrowed.setLower(narrowed.getLower().max(up));
              else if(lt(narrowed.getLower(), up))
                narrowed.setLower(up);
              
              //narrowed.setLower(narrowed.getLower().max(up));
            }
          }
          
        } 
        a->Narrowing_Ops.clear();
        a->setOffset(narrowed.getLower(), narrowed.getUpper());
      }
    }
  }
}