Esempio n. 1
0
void PandaInstrumentVisitor::visitMemSetInst(MemSetInst &I){
    Function *F = mod->getFunction("log_dynval");
    if (!F) {
        printf("Instrumentation function not found\n");
        assert(1==0);
    }

    int bytes = 0;
    Value *length = I.getLength();
    if (ConstantInt* CI = dyn_cast<ConstantInt>(length)) {
        if (CI->getBitWidth() <= 64) {
            bytes = CI->getSExtValue();
        }
    }

    if (bytes > 100) {
        //This mostly happens in cpu state reset
        printf("Note: dyn log ignoring memset greater than 100 bytes\n");
        return;
    }

    PtrToIntInst *PTII;
    CallInst *CI;
    std::vector<Value*> argValues;
    PTII = static_cast<PtrToIntInst*>(IRB.CreatePtrToInt(
        I.getOperand(0), wordType));
    argValues.push_back(ConstantInt::get(ptrType,
        (uintptr_t)dynval_buffer));
    argValues.push_back(ConstantInt::get(intType, ADDRENTRY));
    argValues.push_back(ConstantInt::get(intType, STORE));
    argValues.push_back(static_cast<Value*>(PTII));
    CI = IRB.CreateCall(F, ArrayRef<Value*>(argValues));
    CI->insertBefore(static_cast<Instruction*>(&I));
    PTII->insertBefore(static_cast<Instruction*>(CI));
}
/// tryMergingIntoMemset - When scanning forward over instructions, we look for
/// some other patterns to fold away.  In particular, this looks for stores to
/// neighboring locations of memory.  If it sees enough consecutive ones, it
/// attempts to merge them together into a memcpy/memset.
Instruction *MemCpyOpt::tryMergingIntoMemset(Instruction *StartInst,
        Value *StartPtr, Value *ByteVal) {
    if (TD == 0) return 0;

    // Okay, so we now have a single store that can be splatable.  Scan to find
    // all subsequent stores of the same value to offset from the same pointer.
    // Join these together into ranges, so we can decide whether contiguous blocks
    // are stored.
    MemsetRanges Ranges(*TD);

    BasicBlock::iterator BI = StartInst;
    for (++BI; !isa<TerminatorInst>(BI); ++BI) {
        if (!isa<StoreInst>(BI) && !isa<MemSetInst>(BI)) {
            // If the instruction is readnone, ignore it, otherwise bail out.  We
            // don't even allow readonly here because we don't want something like:
            // A[1] = 2; strlen(A); A[2] = 2; -> memcpy(A, ...); strlen(A).
            if (BI->mayWriteToMemory() || BI->mayReadFromMemory())
                break;
            continue;
        }

        if (StoreInst *NextStore = dyn_cast<StoreInst>(BI)) {
            // If this is a store, see if we can merge it in.
            if (!NextStore->isSimple()) break;

            // Check to see if this stored value is of the same byte-splattable value.
            if (ByteVal != isBytewiseValue(NextStore->getOperand(0)))
                break;

            // Check to see if this store is to a constant offset from the start ptr.
            int64_t Offset;
            if (!IsPointerOffset(StartPtr, NextStore->getPointerOperand(),
                                 Offset, *TD))
                break;

            Ranges.addStore(Offset, NextStore);
        } else {
            MemSetInst *MSI = cast<MemSetInst>(BI);

            if (MSI->isVolatile() || ByteVal != MSI->getValue() ||
                    !isa<ConstantInt>(MSI->getLength()))
                break;

            // Check to see if this store is to a constant offset from the start ptr.
            int64_t Offset;
            if (!IsPointerOffset(StartPtr, MSI->getDest(), Offset, *TD))
                break;

            Ranges.addMemSet(Offset, MSI);
        }
    }

    // If we have no ranges, then we just had a single store with nothing that
    // could be merged in.  This is a very common case of course.
    if (Ranges.empty())
        return 0;

    // If we had at least one store that could be merged in, add the starting
    // store as well.  We try to avoid this unless there is at least something
    // interesting as a small compile-time optimization.
    Ranges.addInst(0, StartInst);

    // If we create any memsets, we put it right before the first instruction that
    // isn't part of the memset block.  This ensure that the memset is dominated
    // by any addressing instruction needed by the start of the block.
    IRBuilder<> Builder(BI);

    // Now that we have full information about ranges, loop over the ranges and
    // emit memset's for anything big enough to be worthwhile.
    Instruction *AMemSet = 0;
    for (MemsetRanges::const_iterator I = Ranges.begin(), E = Ranges.end();
            I != E; ++I) {
        const MemsetRange &Range = *I;

        if (Range.TheStores.size() == 1) continue;

        // If it is profitable to lower this range to memset, do so now.
        if (!Range.isProfitableToUseMemset(*TD))
            continue;

        // Otherwise, we do want to transform this!  Create a new memset.
        // Get the starting pointer of the block.
        StartPtr = Range.StartPtr;

        // Determine alignment
        unsigned Alignment = Range.Alignment;
        if (Alignment == 0) {
            Type *EltType =
                cast<PointerType>(StartPtr->getType())->getElementType();
            Alignment = TD->getABITypeAlignment(EltType);
        }

        AMemSet =
            Builder.CreateMemSet(StartPtr, ByteVal, Range.End-Range.Start, Alignment);

        DEBUG(dbgs() << "Replace stores:\n";
              for (unsigned i = 0, e = Range.TheStores.size(); i != e; ++i)
              dbgs() << *Range.TheStores[i] << '\n';
              dbgs() << "With: " << *AMemSet << '\n');

        if (!Range.TheStores.empty())
            AMemSet->setDebugLoc(Range.TheStores[0]->getDebugLoc());

        // Zap all the stores.
        for (SmallVector<Instruction*, 16>::const_iterator
                SI = Range.TheStores.begin(),
                SE = Range.TheStores.end(); SI != SE; ++SI) {
            MD->removeInstruction(*SI);
            (*SI)->eraseFromParent();
        }
        ++NumMemSetInfer;
    }

    return AMemSet;
}
Esempio n. 3
0
void AMDGPUPromoteAlloca::handleAlloca(AllocaInst &I) {
  // Array allocations are probably not worth handling, since an allocation of
  // the array type is the canonical form.
  if (!I.isStaticAlloca() || I.isArrayAllocation())
    return;

  IRBuilder<> Builder(&I);

  // First try to replace the alloca with a vector
  Type *AllocaTy = I.getAllocatedType();

  DEBUG(dbgs() << "Trying to promote " << I << '\n');

  if (tryPromoteAllocaToVector(&I))
    return;

  DEBUG(dbgs() << " alloca is not a candidate for vectorization.\n");

  const Function &ContainingFunction = *I.getParent()->getParent();

  // FIXME: We should also try to get this value from the reqd_work_group_size
  // function attribute if it is available.
  unsigned WorkGroupSize = AMDGPU::getMaximumWorkGroupSize(ContainingFunction);

  int AllocaSize =
      WorkGroupSize * Mod->getDataLayout().getTypeAllocSize(AllocaTy);

  if (AllocaSize > LocalMemAvailable) {
    DEBUG(dbgs() << " Not enough local memory to promote alloca.\n");
    return;
  }

  std::vector<Value*> WorkList;

  if (!collectUsesWithPtrTypes(&I, WorkList)) {
    DEBUG(dbgs() << " Do not know how to convert all uses\n");
    return;
  }

  DEBUG(dbgs() << "Promoting alloca to local memory\n");
  LocalMemAvailable -= AllocaSize;

  Function *F = I.getParent()->getParent();

  Type *GVTy = ArrayType::get(I.getAllocatedType(), WorkGroupSize);
  GlobalVariable *GV = new GlobalVariable(
      *Mod, GVTy, false, GlobalValue::InternalLinkage,
      UndefValue::get(GVTy),
      Twine(F->getName()) + Twine('.') + I.getName(),
      nullptr,
      GlobalVariable::NotThreadLocal,
      AMDGPUAS::LOCAL_ADDRESS);
  GV->setUnnamedAddr(true);
  GV->setAlignment(I.getAlignment());

  Value *TCntY, *TCntZ;

  std::tie(TCntY, TCntZ) = getLocalSizeYZ(Builder);
  Value *TIdX = getWorkitemID(Builder, 0);
  Value *TIdY = getWorkitemID(Builder, 1);
  Value *TIdZ = getWorkitemID(Builder, 2);

  Value *Tmp0 = Builder.CreateMul(TCntY, TCntZ, "", true, true);
  Tmp0 = Builder.CreateMul(Tmp0, TIdX);
  Value *Tmp1 = Builder.CreateMul(TIdY, TCntZ, "", true, true);
  Value *TID = Builder.CreateAdd(Tmp0, Tmp1);
  TID = Builder.CreateAdd(TID, TIdZ);

  Value *Indices[] = {
    Constant::getNullValue(Type::getInt32Ty(Mod->getContext())),
    TID
  };

  Value *Offset = Builder.CreateInBoundsGEP(GVTy, GV, Indices);
  I.mutateType(Offset->getType());
  I.replaceAllUsesWith(Offset);
  I.eraseFromParent();

  for (Value *V : WorkList) {
    CallInst *Call = dyn_cast<CallInst>(V);
    if (!Call) {
      Type *EltTy = V->getType()->getPointerElementType();
      PointerType *NewTy = PointerType::get(EltTy, AMDGPUAS::LOCAL_ADDRESS);

      // The operand's value should be corrected on its own.
      if (isa<AddrSpaceCastInst>(V))
        continue;

      // FIXME: It doesn't really make sense to try to do this for all
      // instructions.
      V->mutateType(NewTy);
      continue;
    }

    IntrinsicInst *Intr = dyn_cast<IntrinsicInst>(Call);
    if (!Intr) {
      // FIXME: What is this for? It doesn't make sense to promote arbitrary
      // function calls. If the call is to a defined function that can also be
      // promoted, we should be able to do this once that function is also
      // rewritten.

      std::vector<Type*> ArgTypes;
      for (unsigned ArgIdx = 0, ArgEnd = Call->getNumArgOperands();
                                ArgIdx != ArgEnd; ++ArgIdx) {
        ArgTypes.push_back(Call->getArgOperand(ArgIdx)->getType());
      }
      Function *F = Call->getCalledFunction();
      FunctionType *NewType = FunctionType::get(Call->getType(), ArgTypes,
                                                F->isVarArg());
      Constant *C = Mod->getOrInsertFunction((F->getName() + ".local").str(),
                                             NewType, F->getAttributes());
      Function *NewF = cast<Function>(C);
      Call->setCalledFunction(NewF);
      continue;
    }

    Builder.SetInsertPoint(Intr);
    switch (Intr->getIntrinsicID()) {
    case Intrinsic::lifetime_start:
    case Intrinsic::lifetime_end:
      // These intrinsics are for address space 0 only
      Intr->eraseFromParent();
      continue;
    case Intrinsic::memcpy: {
      MemCpyInst *MemCpy = cast<MemCpyInst>(Intr);
      Builder.CreateMemCpy(MemCpy->getRawDest(), MemCpy->getRawSource(),
                           MemCpy->getLength(), MemCpy->getAlignment(),
                           MemCpy->isVolatile());
      Intr->eraseFromParent();
      continue;
    }
    case Intrinsic::memmove: {
      MemMoveInst *MemMove = cast<MemMoveInst>(Intr);
      Builder.CreateMemMove(MemMove->getRawDest(), MemMove->getRawSource(),
                            MemMove->getLength(), MemMove->getAlignment(),
                            MemMove->isVolatile());
      Intr->eraseFromParent();
      continue;
    }
    case Intrinsic::memset: {
      MemSetInst *MemSet = cast<MemSetInst>(Intr);
      Builder.CreateMemSet(MemSet->getRawDest(), MemSet->getValue(),
                           MemSet->getLength(), MemSet->getAlignment(),
                           MemSet->isVolatile());
      Intr->eraseFromParent();
      continue;
    }
    case Intrinsic::invariant_start:
    case Intrinsic::invariant_end:
    case Intrinsic::invariant_group_barrier:
      Intr->eraseFromParent();
      // FIXME: I think the invariant marker should still theoretically apply,
      // but the intrinsics need to be changed to accept pointers with any
      // address space.
      continue;
    case Intrinsic::objectsize: {
      Value *Src = Intr->getOperand(0);
      Type *SrcTy = Src->getType()->getPointerElementType();
      Function *ObjectSize = Intrinsic::getDeclaration(Mod,
        Intrinsic::objectsize,
        { Intr->getType(), PointerType::get(SrcTy, AMDGPUAS::LOCAL_ADDRESS) }
      );

      CallInst *NewCall
        = Builder.CreateCall(ObjectSize, { Src, Intr->getOperand(1) });
      Intr->replaceAllUsesWith(NewCall);
      Intr->eraseFromParent();
      continue;
    }
    default:
      Intr->dump();
      llvm_unreachable("Don't know how to promote alloca intrinsic use.");
    }
  }
}
Esempio n. 4
0
// FIXME: Should try to pick the most likely to be profitable allocas first.
bool AMDGPUPromoteAlloca::handleAlloca(AllocaInst &I, bool SufficientLDS) {
  // Array allocations are probably not worth handling, since an allocation of
  // the array type is the canonical form.
  if (!I.isStaticAlloca() || I.isArrayAllocation())
    return false;

  IRBuilder<> Builder(&I);

  // First try to replace the alloca with a vector
  Type *AllocaTy = I.getAllocatedType();

  DEBUG(dbgs() << "Trying to promote " << I << '\n');

  if (tryPromoteAllocaToVector(&I, AS))
    return true; // Promoted to vector.

  const Function &ContainingFunction = *I.getParent()->getParent();
  CallingConv::ID CC = ContainingFunction.getCallingConv();

  // Don't promote the alloca to LDS for shader calling conventions as the work
  // item ID intrinsics are not supported for these calling conventions.
  // Furthermore not all LDS is available for some of the stages.
  switch (CC) {
  case CallingConv::AMDGPU_KERNEL:
  case CallingConv::SPIR_KERNEL:
    break;
  default:
    DEBUG(dbgs() << " promote alloca to LDS not supported with calling convention.\n");
    return false;
  }

  // Not likely to have sufficient local memory for promotion.
  if (!SufficientLDS)
    return false;

  const AMDGPUSubtarget &ST =
    TM->getSubtarget<AMDGPUSubtarget>(ContainingFunction);
  unsigned WorkGroupSize = ST.getFlatWorkGroupSizes(ContainingFunction).second;

  const DataLayout &DL = Mod->getDataLayout();

  unsigned Align = I.getAlignment();
  if (Align == 0)
    Align = DL.getABITypeAlignment(I.getAllocatedType());

  // FIXME: This computed padding is likely wrong since it depends on inverse
  // usage order.
  //
  // FIXME: It is also possible that if we're allowed to use all of the memory
  // could could end up using more than the maximum due to alignment padding.

  uint32_t NewSize = alignTo(CurrentLocalMemUsage, Align);
  uint32_t AllocSize = WorkGroupSize * DL.getTypeAllocSize(AllocaTy);
  NewSize += AllocSize;

  if (NewSize > LocalMemLimit) {
    DEBUG(dbgs() << "  " << AllocSize
          << " bytes of local memory not available to promote\n");
    return false;
  }

  CurrentLocalMemUsage = NewSize;

  std::vector<Value*> WorkList;

  if (!collectUsesWithPtrTypes(&I, &I, WorkList)) {
    DEBUG(dbgs() << " Do not know how to convert all uses\n");
    return false;
  }

  DEBUG(dbgs() << "Promoting alloca to local memory\n");

  Function *F = I.getParent()->getParent();

  Type *GVTy = ArrayType::get(I.getAllocatedType(), WorkGroupSize);
  GlobalVariable *GV = new GlobalVariable(
      *Mod, GVTy, false, GlobalValue::InternalLinkage,
      UndefValue::get(GVTy),
      Twine(F->getName()) + Twine('.') + I.getName(),
      nullptr,
      GlobalVariable::NotThreadLocal,
      AS.LOCAL_ADDRESS);
  GV->setUnnamedAddr(GlobalValue::UnnamedAddr::Global);
  GV->setAlignment(I.getAlignment());

  Value *TCntY, *TCntZ;

  std::tie(TCntY, TCntZ) = getLocalSizeYZ(Builder);
  Value *TIdX = getWorkitemID(Builder, 0);
  Value *TIdY = getWorkitemID(Builder, 1);
  Value *TIdZ = getWorkitemID(Builder, 2);

  Value *Tmp0 = Builder.CreateMul(TCntY, TCntZ, "", true, true);
  Tmp0 = Builder.CreateMul(Tmp0, TIdX);
  Value *Tmp1 = Builder.CreateMul(TIdY, TCntZ, "", true, true);
  Value *TID = Builder.CreateAdd(Tmp0, Tmp1);
  TID = Builder.CreateAdd(TID, TIdZ);

  Value *Indices[] = {
    Constant::getNullValue(Type::getInt32Ty(Mod->getContext())),
    TID
  };

  Value *Offset = Builder.CreateInBoundsGEP(GVTy, GV, Indices);
  I.mutateType(Offset->getType());
  I.replaceAllUsesWith(Offset);
  I.eraseFromParent();

  for (Value *V : WorkList) {
    CallInst *Call = dyn_cast<CallInst>(V);
    if (!Call) {
      if (ICmpInst *CI = dyn_cast<ICmpInst>(V)) {
        Value *Src0 = CI->getOperand(0);
        Type *EltTy = Src0->getType()->getPointerElementType();
        PointerType *NewTy = PointerType::get(EltTy, AS.LOCAL_ADDRESS);

        if (isa<ConstantPointerNull>(CI->getOperand(0)))
          CI->setOperand(0, ConstantPointerNull::get(NewTy));

        if (isa<ConstantPointerNull>(CI->getOperand(1)))
          CI->setOperand(1, ConstantPointerNull::get(NewTy));

        continue;
      }

      // The operand's value should be corrected on its own and we don't want to
      // touch the users.
      if (isa<AddrSpaceCastInst>(V))
        continue;

      Type *EltTy = V->getType()->getPointerElementType();
      PointerType *NewTy = PointerType::get(EltTy, AS.LOCAL_ADDRESS);

      // FIXME: It doesn't really make sense to try to do this for all
      // instructions.
      V->mutateType(NewTy);

      // Adjust the types of any constant operands.
      if (SelectInst *SI = dyn_cast<SelectInst>(V)) {
        if (isa<ConstantPointerNull>(SI->getOperand(1)))
          SI->setOperand(1, ConstantPointerNull::get(NewTy));

        if (isa<ConstantPointerNull>(SI->getOperand(2)))
          SI->setOperand(2, ConstantPointerNull::get(NewTy));
      } else if (PHINode *Phi = dyn_cast<PHINode>(V)) {
        for (unsigned I = 0, E = Phi->getNumIncomingValues(); I != E; ++I) {
          if (isa<ConstantPointerNull>(Phi->getIncomingValue(I)))
            Phi->setIncomingValue(I, ConstantPointerNull::get(NewTy));
        }
      }

      continue;
    }

    IntrinsicInst *Intr = cast<IntrinsicInst>(Call);
    Builder.SetInsertPoint(Intr);
    switch (Intr->getIntrinsicID()) {
    case Intrinsic::lifetime_start:
    case Intrinsic::lifetime_end:
      // These intrinsics are for address space 0 only
      Intr->eraseFromParent();
      continue;
    case Intrinsic::memcpy: {
      MemCpyInst *MemCpy = cast<MemCpyInst>(Intr);
      Builder.CreateMemCpy(MemCpy->getRawDest(), MemCpy->getDestAlignment(),
                           MemCpy->getRawSource(), MemCpy->getSourceAlignment(),
                           MemCpy->getLength(), MemCpy->isVolatile());
      Intr->eraseFromParent();
      continue;
    }
    case Intrinsic::memmove: {
      MemMoveInst *MemMove = cast<MemMoveInst>(Intr);
      Builder.CreateMemMove(MemMove->getRawDest(), MemMove->getDestAlignment(),
                            MemMove->getRawSource(), MemMove->getSourceAlignment(),
                            MemMove->getLength(), MemMove->isVolatile());
      Intr->eraseFromParent();
      continue;
    }
    case Intrinsic::memset: {
      MemSetInst *MemSet = cast<MemSetInst>(Intr);
      Builder.CreateMemSet(MemSet->getRawDest(), MemSet->getValue(),
                           MemSet->getLength(), MemSet->getDestAlignment(),
                           MemSet->isVolatile());
      Intr->eraseFromParent();
      continue;
    }
    case Intrinsic::invariant_start:
    case Intrinsic::invariant_end:
    case Intrinsic::invariant_group_barrier:
      Intr->eraseFromParent();
      // FIXME: I think the invariant marker should still theoretically apply,
      // but the intrinsics need to be changed to accept pointers with any
      // address space.
      continue;
    case Intrinsic::objectsize: {
      Value *Src = Intr->getOperand(0);
      Type *SrcTy = Src->getType()->getPointerElementType();
      Function *ObjectSize = Intrinsic::getDeclaration(Mod,
        Intrinsic::objectsize,
        { Intr->getType(), PointerType::get(SrcTy, AS.LOCAL_ADDRESS) }
      );

      CallInst *NewCall = Builder.CreateCall(
          ObjectSize, {Src, Intr->getOperand(1), Intr->getOperand(2)});
      Intr->replaceAllUsesWith(NewCall);
      Intr->eraseFromParent();
      continue;
    }
    default:
      Intr->print(errs());
      llvm_unreachable("Don't know how to promote alloca intrinsic use.");
    }
  }
  return true;
}
Esempio n. 5
0
bool NVPTXLowerAggrCopies::runOnFunction(Function &F) {
  SmallVector<LoadInst *, 4> aggrLoads;
  SmallVector<MemTransferInst *, 4> aggrMemcpys;
  SmallVector<MemSetInst *, 4> aggrMemsets;

  DataLayout *TD = &getAnalysis<DataLayout>();
  LLVMContext &Context = F.getParent()->getContext();

  //
  // Collect all the aggrLoads, aggrMemcpys and addrMemsets.
  //
  //const BasicBlock *firstBB = &F.front();  // first BB in F
  for (Function::iterator BI = F.begin(), BE = F.end(); BI != BE; ++BI) {
    //BasicBlock *bb = BI;
    for (BasicBlock::iterator II = BI->begin(), IE = BI->end(); II != IE;
        ++II) {
      if (LoadInst * load = dyn_cast<LoadInst>(II)) {

        if (load->hasOneUse() == false) continue;

        if (TD->getTypeStoreSize(load->getType()) < MaxAggrCopySize) continue;

        User *use = *(load->use_begin());
        if (StoreInst * store = dyn_cast<StoreInst>(use)) {
          if (store->getOperand(0) != load) //getValueOperand
          continue;
          aggrLoads.push_back(load);
        }
      } else if (MemTransferInst * intr = dyn_cast<MemTransferInst>(II)) {
        Value *len = intr->getLength();
        // If the number of elements being copied is greater
        // than MaxAggrCopySize, lower it to a loop
        if (ConstantInt * len_int = dyn_cast < ConstantInt > (len)) {
          if (len_int->getZExtValue() >= MaxAggrCopySize) {
            aggrMemcpys.push_back(intr);
          }
        } else {
          // turn variable length memcpy/memmov into loop
          aggrMemcpys.push_back(intr);
        }
      } else if (MemSetInst * memsetintr = dyn_cast<MemSetInst>(II)) {
        Value *len = memsetintr->getLength();
        if (ConstantInt * len_int = dyn_cast<ConstantInt>(len)) {
          if (len_int->getZExtValue() >= MaxAggrCopySize) {
            aggrMemsets.push_back(memsetintr);
          }
        } else {
          // turn variable length memset into loop
          aggrMemsets.push_back(memsetintr);
        }
      }
    }
  }
  if ((aggrLoads.size() == 0) && (aggrMemcpys.size() == 0)
      && (aggrMemsets.size() == 0)) return false;

  //
  // Do the transformation of an aggr load/copy/set to a loop
  //
  for (unsigned i = 0, e = aggrLoads.size(); i != e; ++i) {
    LoadInst *load = aggrLoads[i];
    StoreInst *store = dyn_cast<StoreInst>(*load->use_begin());
    Value *srcAddr = load->getOperand(0);
    Value *dstAddr = store->getOperand(1);
    unsigned numLoads = TD->getTypeStoreSize(load->getType());
    Value *len = ConstantInt::get(Type::getInt32Ty(Context), numLoads);

    convertTransferToLoop(store, srcAddr, dstAddr, len, load->isVolatile(),
                          store->isVolatile(), Context, F);

    store->eraseFromParent();
    load->eraseFromParent();
  }

  for (unsigned i = 0, e = aggrMemcpys.size(); i != e; ++i) {
    MemTransferInst *cpy = aggrMemcpys[i];
    Value *len = cpy->getLength();
    // llvm 2.7 version of memcpy does not have volatile
    // operand yet. So always making it non-volatile
    // optimistically, so that we don't see unnecessary
    // st.volatile in ptx
    convertTransferToLoop(cpy, cpy->getSource(), cpy->getDest(), len, false,
                          false, Context, F);
    cpy->eraseFromParent();
  }

  for (unsigned i = 0, e = aggrMemsets.size(); i != e; ++i) {
    MemSetInst *memsetinst = aggrMemsets[i];
    Value *len = memsetinst->getLength();
    Value *val = memsetinst->getValue();
    convertMemSetToLoop(memsetinst, memsetinst->getDest(), len, val, Context,
                        F);
    memsetinst->eraseFromParent();
  }

  return true;
}
void AMDGPUPromoteAlloca::visitAlloca(AllocaInst &I) {
  IRBuilder<> Builder(&I);

  // First try to replace the alloca with a vector
  Type *AllocaTy = I.getAllocatedType();

  DEBUG(dbgs() << "Trying to promote " << I << '\n');

  if (tryPromoteAllocaToVector(&I))
    return;

  DEBUG(dbgs() << " alloca is not a candidate for vectorization.\n");

  // FIXME: This is the maximum work group size.  We should try to get
  // value from the reqd_work_group_size function attribute if it is
  // available.
  unsigned WorkGroupSize = 256;
  int AllocaSize = WorkGroupSize *
      Mod->getDataLayout()->getTypeAllocSize(AllocaTy);

  if (AllocaSize > LocalMemAvailable) {
    DEBUG(dbgs() << " Not enough local memory to promote alloca.\n");
    return;
  }

  std::vector<Value*> WorkList;

  if (!collectUsesWithPtrTypes(&I, WorkList)) {
    DEBUG(dbgs() << " Do not know how to convert all uses\n");
    return;
  }

  DEBUG(dbgs() << "Promoting alloca to local memory\n");
  LocalMemAvailable -= AllocaSize;

  GlobalVariable *GV = new GlobalVariable(
      *Mod, ArrayType::get(I.getAllocatedType(), 256), false,
      GlobalValue::ExternalLinkage, 0, I.getName(), 0,
      GlobalVariable::NotThreadLocal, AMDGPUAS::LOCAL_ADDRESS);

  FunctionType *FTy = FunctionType::get(
      Type::getInt32Ty(Mod->getContext()), false);
  AttributeSet AttrSet;
  AttrSet.addAttribute(Mod->getContext(), 0, Attribute::ReadNone);

  Value *ReadLocalSizeY = Mod->getOrInsertFunction(
      "llvm.r600.read.local.size.y", FTy, AttrSet);
  Value *ReadLocalSizeZ = Mod->getOrInsertFunction(
      "llvm.r600.read.local.size.z", FTy, AttrSet);
  Value *ReadTIDIGX = Mod->getOrInsertFunction(
      "llvm.r600.read.tidig.x", FTy, AttrSet);
  Value *ReadTIDIGY = Mod->getOrInsertFunction(
      "llvm.r600.read.tidig.y", FTy, AttrSet);
  Value *ReadTIDIGZ = Mod->getOrInsertFunction(
      "llvm.r600.read.tidig.z", FTy, AttrSet);


  Value *TCntY = Builder.CreateCall(ReadLocalSizeY);
  Value *TCntZ = Builder.CreateCall(ReadLocalSizeZ);
  Value *TIdX  = Builder.CreateCall(ReadTIDIGX);
  Value *TIdY  = Builder.CreateCall(ReadTIDIGY);
  Value *TIdZ  = Builder.CreateCall(ReadTIDIGZ);

  Value *Tmp0 = Builder.CreateMul(TCntY, TCntZ);
  Tmp0 = Builder.CreateMul(Tmp0, TIdX);
  Value *Tmp1 = Builder.CreateMul(TIdY, TCntZ);
  Value *TID = Builder.CreateAdd(Tmp0, Tmp1);
  TID = Builder.CreateAdd(TID, TIdZ);

  std::vector<Value*> Indices;
  Indices.push_back(Constant::getNullValue(Type::getInt32Ty(Mod->getContext())));
  Indices.push_back(TID);

  Value *Offset = Builder.CreateGEP(GV, Indices);
  I.mutateType(Offset->getType());
  I.replaceAllUsesWith(Offset);
  I.eraseFromParent();

  for (std::vector<Value*>::iterator i = WorkList.begin(),
                                     e = WorkList.end(); i != e; ++i) {
    Value *V = *i;
    CallInst *Call = dyn_cast<CallInst>(V);
    if (!Call) {
      Type *EltTy = V->getType()->getPointerElementType();
      PointerType *NewTy = PointerType::get(EltTy, AMDGPUAS::LOCAL_ADDRESS);

      // The operand's value should be corrected on its own.
      if (isa<AddrSpaceCastInst>(V))
        continue;

      // FIXME: It doesn't really make sense to try to do this for all
      // instructions.
      V->mutateType(NewTy);
      continue;
    }

    IntrinsicInst *Intr = dyn_cast<IntrinsicInst>(Call);
    if (!Intr) {
      std::vector<Type*> ArgTypes;
      for (unsigned ArgIdx = 0, ArgEnd = Call->getNumArgOperands();
                                ArgIdx != ArgEnd; ++ArgIdx) {
        ArgTypes.push_back(Call->getArgOperand(ArgIdx)->getType());
      }
      Function *F = Call->getCalledFunction();
      FunctionType *NewType = FunctionType::get(Call->getType(), ArgTypes,
                                                F->isVarArg());
      Constant *C = Mod->getOrInsertFunction(StringRef(F->getName().str() + ".local"), NewType,
                                             F->getAttributes());
      Function *NewF = cast<Function>(C);
      Call->setCalledFunction(NewF);
      continue;
    }

    Builder.SetInsertPoint(Intr);
    switch (Intr->getIntrinsicID()) {
    case Intrinsic::lifetime_start:
    case Intrinsic::lifetime_end:
      // These intrinsics are for address space 0 only
      Intr->eraseFromParent();
      continue;
    case Intrinsic::memcpy: {
      MemCpyInst *MemCpy = cast<MemCpyInst>(Intr);
      Builder.CreateMemCpy(MemCpy->getRawDest(), MemCpy->getRawSource(),
                           MemCpy->getLength(), MemCpy->getAlignment(),
                           MemCpy->isVolatile());
      Intr->eraseFromParent();
      continue;
    }
    case Intrinsic::memset: {
      MemSetInst *MemSet = cast<MemSetInst>(Intr);
      Builder.CreateMemSet(MemSet->getRawDest(), MemSet->getValue(),
                           MemSet->getLength(), MemSet->getAlignment(),
                           MemSet->isVolatile());
      Intr->eraseFromParent();
      continue;
    }
    default:
      Intr->dump();
      llvm_unreachable("Don't know how to promote alloca intrinsic use.");
    }
  }
}