Пример #1
0
void Mapper::remapInstruction(Instruction *I) {
  // Remap operands.
  for (Use &Op : I->operands()) {
    Value *V = mapValue(Op);
    // If we aren't ignoring missing entries, assert that something happened.
    if (V)
      Op = V;
    else
      assert((Flags & RF_IgnoreMissingLocals) &&
             "Referenced value not in value map!");
  }

  // Remap phi nodes' incoming blocks.
  if (PHINode *PN = dyn_cast<PHINode>(I)) {
    for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
      Value *V = mapValue(PN->getIncomingBlock(i));
      // If we aren't ignoring missing entries, assert that something happened.
      if (V)
        PN->setIncomingBlock(i, cast<BasicBlock>(V));
      else
        assert((Flags & RF_IgnoreMissingLocals) &&
               "Referenced block not in value map!");
    }
  }

  // Remap attached metadata.
  SmallVector<std::pair<unsigned, MDNode *>, 4> MDs;
  I->getAllMetadata(MDs);
  for (const auto &MI : MDs) {
    MDNode *Old = MI.second;
    MDNode *New = cast_or_null<MDNode>(mapMetadata(Old));
    if (New != Old)
      I->setMetadata(MI.first, New);
  }

  if (!TypeMapper)
    return;

  // If the instruction's type is being remapped, do so now.
  if (auto CS = CallSite(I)) {
    SmallVector<Type *, 3> Tys;
    FunctionType *FTy = CS.getFunctionType();
    Tys.reserve(FTy->getNumParams());
    for (Type *Ty : FTy->params())
      Tys.push_back(TypeMapper->remapType(Ty));
    CS.mutateFunctionType(FunctionType::get(
        TypeMapper->remapType(I->getType()), Tys, FTy->isVarArg()));
    return;
  }
  if (auto *AI = dyn_cast<AllocaInst>(I))
    AI->setAllocatedType(TypeMapper->remapType(AI->getAllocatedType()));
  if (auto *GEP = dyn_cast<GetElementPtrInst>(I)) {
    GEP->setSourceElementType(
        TypeMapper->remapType(GEP->getSourceElementType()));
    GEP->setResultElementType(
        TypeMapper->remapType(GEP->getResultElementType()));
  }
  I->mutateType(TypeMapper->remapType(I->getType()));
}
Пример #2
0
bool AMDGPUPromoteAlloca::runOnFunction(Function &F) {
  if (!TM || skipFunction(F))
    return false;

  FunctionType *FTy = F.getFunctionType();

  // If the function has any arguments in the local address space, then it's
  // possible these arguments require the entire local memory space, so
  // we cannot use local memory in the pass.
  for (Type *ParamTy : FTy->params()) {
    PointerType *PtrTy = dyn_cast<PointerType>(ParamTy);
    if (PtrTy && PtrTy->getAddressSpace() == AMDGPUAS::LOCAL_ADDRESS) {
      LocalMemAvailable = 0;
      DEBUG(dbgs() << "Function has local memory argument.  Promoting to "
                      "local memory disabled.\n");
      return false;
    }
  }

  const AMDGPUSubtarget &ST = TM->getSubtarget<AMDGPUSubtarget>(F);
  LocalMemAvailable = ST.getLocalMemorySize();
  if (LocalMemAvailable == 0)
    return false;

  // Check how much local memory is being used by global objects
  for (GlobalVariable &GV : Mod->globals()) {
    if (GV.getType()->getAddressSpace() != AMDGPUAS::LOCAL_ADDRESS)
      continue;

    for (User *U : GV.users()) {
      Instruction *Use = dyn_cast<Instruction>(U);
      if (!Use)
        continue;

      if (Use->getParent()->getParent() == &F) {
        LocalMemAvailable -=
          Mod->getDataLayout().getTypeAllocSize(GV.getValueType());
        break;
      }
    }
  }

  LocalMemAvailable = std::max(0, LocalMemAvailable);
  DEBUG(dbgs() << LocalMemAvailable << " bytes free in local memory.\n");

  BasicBlock &EntryBB = *F.begin();
  for (auto I = EntryBB.begin(), E = EntryBB.end(); I != E; ) {
    AllocaInst *AI = dyn_cast<AllocaInst>(I);

    ++I;
    if (AI)
      handleAlloca(*AI);
  }

  return true;
}
Пример #3
0
/// RemapInstruction - Convert the instruction operands from referencing the
/// current values into those specified by VMap.
///
void llvm::RemapInstruction(Instruction *I, ValueToValueMapTy &VMap,
                            RemapFlags Flags, ValueMapTypeRemapper *TypeMapper,
                            ValueMaterializer *Materializer){
  // Remap operands.
  for (User::op_iterator op = I->op_begin(), E = I->op_end(); op != E; ++op) {
    Value *V = MapValue(*op, VMap, Flags, TypeMapper, Materializer);
    // If we aren't ignoring missing entries, assert that something happened.
    if (V)
      *op = V;
    else
      assert((Flags & RF_IgnoreMissingEntries) &&
             "Referenced value not in value map!");
  }

  // Remap phi nodes' incoming blocks.
  if (PHINode *PN = dyn_cast<PHINode>(I)) {
    for (unsigned i = 0, e = PN->getNumIncomingValues(); i != e; ++i) {
      Value *V = MapValue(PN->getIncomingBlock(i), VMap, Flags);
      // If we aren't ignoring missing entries, assert that something happened.
      if (V)
        PN->setIncomingBlock(i, cast<BasicBlock>(V));
      else
        assert((Flags & RF_IgnoreMissingEntries) &&
               "Referenced block not in value map!");
    }
  }

  // Remap attached metadata.
  SmallVector<std::pair<unsigned, MDNode *>, 4> MDs;
  I->getAllMetadata(MDs);
  for (SmallVectorImpl<std::pair<unsigned, MDNode *>>::iterator
           MI = MDs.begin(),
           ME = MDs.end();
       MI != ME; ++MI) {
    MDNode *Old = MI->second;
    MDNode *New = MapMetadata(Old, VMap, Flags, TypeMapper, Materializer);
    if (New != Old)
      I->setMetadata(MI->first, New);
  }
  
  if (!TypeMapper)
    return;

  // If the instruction's type is being remapped, do so now.
  if (auto CS = CallSite(I)) {
    SmallVector<Type *, 3> Tys;
    FunctionType *FTy = CS.getFunctionType();
    Tys.reserve(FTy->getNumParams());
    for (Type *Ty : FTy->params())
      Tys.push_back(TypeMapper->remapType(Ty));
    CS.mutateFunctionType(FunctionType::get(
        TypeMapper->remapType(I->getType()), Tys, FTy->isVarArg()));
  } else
    I->mutateType(TypeMapper->remapType(I->getType()));
}
Пример #4
0
bool AMDGPUPromoteAlloca::runOnFunction(Function &F) {
  if (!TM || F.hasFnAttribute(Attribute::OptimizeNone))
    return false;

  FunctionType *FTy = F.getFunctionType();

  // If the function has any arguments in the local address space, then it's
  // possible these arguments require the entire local memory space, so
  // we cannot use local memory in the pass.
  for (Type *ParamTy : FTy->params()) {
    PointerType *PtrTy = dyn_cast<PointerType>(ParamTy);
    if (PtrTy && PtrTy->getAddressSpace() == AMDGPUAS::LOCAL_ADDRESS) {
      LocalMemAvailable = 0;
      DEBUG(dbgs() << "Function has local memory argument.  Promoting to "
                      "local memory disabled.\n");
      return false;
    }
  }

  const AMDGPUSubtarget &ST = TM->getSubtarget<AMDGPUSubtarget>(F);
  LocalMemAvailable = ST.getLocalMemorySize();
  if (LocalMemAvailable == 0)
    return false;

  // Check how much local memory is being used by global objects
  for (GlobalVariable &GV : Mod->globals()) {
    if (GV.getType()->getAddressSpace() != AMDGPUAS::LOCAL_ADDRESS)
      continue;

    for (Use &U : GV.uses()) {
      Instruction *Use = dyn_cast<Instruction>(U);
      if (!Use)
        continue;

      if (Use->getParent()->getParent() == &F)
        LocalMemAvailable -=
          Mod->getDataLayout().getTypeAllocSize(GV.getValueType());
    }
  }

  LocalMemAvailable = std::max(0, LocalMemAvailable);
  DEBUG(dbgs() << LocalMemAvailable << " bytes free in local memory.\n");

  visit(F);

  return true;
}
Пример #5
0
// Try to find address of external function given a Function object.
// Please note, that interpreter doesn't know how to assemble a
// real call in general case (this is JIT job), that's why it assumes,
// that all external functions has the same (and pretty "general") signature.
// The typical example of such functions are "lle_X_" ones.
static ExFunc lookupFunction(const Function *F) {
  // Function not found, look it up... start by figuring out what the
  // composite function name should be.
  std::string ExtName = "lle_";
  FunctionType *FT = F->getFunctionType();
  ExtName += getTypeID(FT->getReturnType());
  for (Type *T : FT->params())
    ExtName += getTypeID(T);
  ExtName += ("_" + F->getName()).str();

  sys::ScopedLock Writer(*FunctionsLock);
  ExFunc FnPtr = (*FuncNames)[ExtName];
  if (!FnPtr)
    FnPtr = (*FuncNames)[("lle_X_" + F->getName()).str()];
  if (!FnPtr)  // Try calling a generic function... if it exists...
    FnPtr = (ExFunc)(intptr_t)sys::DynamicLibrary::SearchForAddressOfSymbol(
        ("lle_X_" + F->getName()).str());
  if (FnPtr)
    ExportedFunctions->insert(std::make_pair(F, FnPtr));  // Cache for later
  return FnPtr;
}
Пример #6
0
bool CallingConvention_x86_64_systemv::analyzeFunctionType(ParameterRegistry& registry, CallInformation& fillOut, FunctionType& type)
{
	TargetInfo& targetInfo = registry.getTargetInfo();
	auto iter = begin(returnRegisters);
	auto addReturn = &CallInformation::addReturn<ValueInformation>;
	if (!addEntriesForType(targetInfo, fillOut, addReturn, type.getReturnType(), iter, end(returnRegisters)))
	{
		return false;
	}
	
	size_t spOffset = 0;
	iter = begin(parameterRegisters);
	auto addParam = &CallInformation::addParameter<ValueInformation>;
	for (Type* t : type.params())
	{
		if (!addEntriesForType(targetInfo, fillOut, addParam, t, iter, end(parameterRegisters), &spOffset))
		{
			return false;
		}
	}
	
	return true;
}
Пример #7
0
bool AMDGPUPromoteAlloca::hasSufficientLocalMem(const Function &F) {

  FunctionType *FTy = F.getFunctionType();
  const AMDGPUSubtarget &ST = TM->getSubtarget<AMDGPUSubtarget>(F);

  // If the function has any arguments in the local address space, then it's
  // possible these arguments require the entire local memory space, so
  // we cannot use local memory in the pass.
  for (Type *ParamTy : FTy->params()) {
    PointerType *PtrTy = dyn_cast<PointerType>(ParamTy);
    if (PtrTy && PtrTy->getAddressSpace() == AS.LOCAL_ADDRESS) {
      LocalMemLimit = 0;
      DEBUG(dbgs() << "Function has local memory argument. Promoting to "
                      "local memory disabled.\n");
      return false;
    }
  }

  LocalMemLimit = ST.getLocalMemorySize();
  if (LocalMemLimit == 0)
    return false;

  const DataLayout &DL = Mod->getDataLayout();

  // Check how much local memory is being used by global objects
  CurrentLocalMemUsage = 0;
  for (GlobalVariable &GV : Mod->globals()) {
    if (GV.getType()->getAddressSpace() != AS.LOCAL_ADDRESS)
      continue;

    for (const User *U : GV.users()) {
      const Instruction *Use = dyn_cast<Instruction>(U);
      if (!Use)
        continue;

      if (Use->getParent()->getParent() == &F) {
        unsigned Align = GV.getAlignment();
        if (Align == 0)
          Align = DL.getABITypeAlignment(GV.getValueType());

        // FIXME: Try to account for padding here. The padding is currently
        // determined from the inverse order of uses in the function. I'm not
        // sure if the use list order is in any way connected to this, so the
        // total reported size is likely incorrect.
        uint64_t AllocSize = DL.getTypeAllocSize(GV.getValueType());
        CurrentLocalMemUsage = alignTo(CurrentLocalMemUsage, Align);
        CurrentLocalMemUsage += AllocSize;
        break;
      }
    }
  }

  unsigned MaxOccupancy = ST.getOccupancyWithLocalMemSize(CurrentLocalMemUsage,
                                                          F);

  // Restrict local memory usage so that we don't drastically reduce occupancy,
  // unless it is already significantly reduced.

  // TODO: Have some sort of hint or other heuristics to guess occupancy based
  // on other factors..
  unsigned OccupancyHint = ST.getWavesPerEU(F).second;
  if (OccupancyHint == 0)
    OccupancyHint = 7;

  // Clamp to max value.
  OccupancyHint = std::min(OccupancyHint, ST.getMaxWavesPerEU());

  // Check the hint but ignore it if it's obviously wrong from the existing LDS
  // usage.
  MaxOccupancy = std::min(OccupancyHint, MaxOccupancy);


  // Round up to the next tier of usage.
  unsigned MaxSizeWithWaveCount
    = ST.getMaxLocalMemSizeWithWaveCount(MaxOccupancy, F);

  // Program is possibly broken by using more local mem than available.
  if (CurrentLocalMemUsage > MaxSizeWithWaveCount)
    return false;

  LocalMemLimit = MaxSizeWithWaveCount;

  DEBUG(
    dbgs() << F.getName() << " uses " << CurrentLocalMemUsage << " bytes of LDS\n"
    << "  Rounding size to " << MaxSizeWithWaveCount
    << " with a maximum occupancy of " << MaxOccupancy << '\n'
    << " and " << (LocalMemLimit - CurrentLocalMemUsage)
    << " available for promotion\n"
  );

  return true;
}
Пример #8
0
bool CodeGenerator::genFunction(FunctionDefn * fdef) {
  // Don't generate undefined functions.
  if (fdef->isUndefined() || fdef->isAbstract() || fdef->isInterfaceMethod()) {
    return true;
  }

  DASSERT_OBJ(fdef->isSingular(), fdef);
  DASSERT_OBJ(fdef->type(), fdef);
  DASSERT_OBJ(fdef->type()->isSingular(), fdef);

  // Don't generate intrinsic functions.
  if (fdef->isIntrinsic()) {
    return true;
  }

  // Don't generate a function if it has been merged to another function
  if (fdef->mergeTo() != NULL || fdef->isUndefined()) {
    return true;
  }

  // Create the function
  Function * f = genFunctionValue(fdef);

  if (fdef->hasBody() && f->getBasicBlockList().empty()) {
    FunctionType * ftype = fdef->functionType();

    if (fdef->isSynthetic()) {
      f->setLinkage(GlobalValue::LinkOnceODRLinkage);
    }

    if (gcEnabled_) {
      if (SsGC) {
        f->setGC("shadow-stack");
      } else {
        f->setGC("tart-gc");
      }
    }

    if (debug_) {
      dbgContext_ = genDISubprogram(fdef);
      //dbgContext_ = genLexicalBlock(fdef->location());
      dbgInlineContext_ = DIScope();
      setDebugLocation(fdef->location());
    }

    BasicBlock * prologue = BasicBlock::Create(context_, "prologue", f);

    // Create the LLVM Basic Blocks corresponding to each high level BB.
//    BlockList & blocks = fdef->blocks();
//    for (BlockList::iterator b = blocks.begin(); b != blocks.end(); ++b) {
//      Block * blk = *b;
//      blk->setIRBlock(BasicBlock::Create(context_, blk->label(), f));
//    }

    builder_.SetInsertPoint(prologue);

    // Handle the explicit parameters
    unsigned param_index = 0;
    Function::arg_iterator it = f->arg_begin();

    Value * saveStructRet = structRet_;
    if (ftype->isStructReturn()) {
      it->addAttr(llvm::Attribute::StructRet);
      structRet_ = it;
      ++it;
    }

    // Handle the 'self' parameter
    if (ftype->selfParam() != NULL) {
      ParameterDefn * selfParam = ftype->selfParam();
      const Type * selfParamType = selfParam->type().unqualified();
      DASSERT_OBJ(fdef->storageClass() == Storage_Instance ||
          fdef->storageClass() == Storage_Local, fdef);
      DASSERT_OBJ(it != f->arg_end(), ftype);

      // Check if the self param is a root.
      if (selfParamType->isReferenceType()) {
        selfParam->setFlag(ParameterDefn::LValueParam, true);
        Value * selfAlloca = builder_.CreateAlloca(
            selfParam->type()->irEmbeddedType(), 0, "self.alloca");
        builder_.CreateStore(it, selfAlloca);
        selfParam->setIRValue(selfAlloca);
        markGCRoot(selfAlloca, NULL, "self.alloca");
      } else {
        // Since selfParam is always a pointer, we don't need to mark the object pointed
        // to as a root, since the next call frame up is responsible for tracing it.
        ftype->selfParam()->setIRValue(it);
      }

      it->setName("self");
      ++it;
    }

    // If this function needs to make allocations, cache a copy of the
    // allocation context pointer for this thread, since it can on some
    // platforms be expensive to look up.
    if (fdef->flags() & FunctionDefn::MakesAllocs) {
      Function * gcGetAllocContext = genFunctionValue(gc_allocContext);
      gcAllocContext_ = builder_.CreateCall(gcGetAllocContext, "allocCtx");
    }

    for (; it != f->arg_end(); ++it, ++param_index) {

      // Set the name of the Nth parameter
      ParameterDefn * param = ftype->params()[param_index];
      DASSERT_OBJ(param != NULL, fdef);
      DASSERT_OBJ(param->storageClass() == Storage_Local, param);
      QualifiedType paramType = param->internalType();
      it->setName(param->name());
      Value * paramValue = it;

      // If the parameter is a shared reference, then create the shared ref.
      if (param->isSharedRef()) {
        genLocalVar(param, paramValue);
        genGCRoot(param->irValue(), param->sharedRefType(), param->name());
        continue;
      }

      // If the parameter type contains any reference types, then the parameter needs
      // to be a root.
      bool paramIsRoot = false;
      if (paramType->isReferenceType()) {
        param->setFlag(ParameterDefn::LValueParam, true);
        paramIsRoot = true;
      } else if (paramType->containsReferenceType()) {
        // TODO: Handle roots of various shapes
        //param->setFlag(ParameterDefn::LValueParam, true);
      }

      // See if we need to make a local copy of the param.
      if (param->isLValue()) {
        Value * paramAlloca = builder_.CreateAlloca(paramType->irEmbeddedType(), 0, param->name());
        param->setIRValue(paramAlloca);

        if (paramType->typeShape() == Shape_Large_Value) {
          paramValue = builder_.CreateLoad(paramValue);
        }

        builder_.CreateStore(paramValue, paramAlloca);
        if (paramIsRoot) {
          genGCRoot(paramAlloca, paramType.unqualified(), param->name());
        }
      } else {
        param->setIRValue(paramValue);
      }
    }

    // Generate the body
    Function * saveFn = currentFn_;
    currentFn_ = f;
#if 0
    if (fdef->isGenerator()) {
      assert(false);
    } else {
#endif
      genLocalStorage(fdef->localScopes());
      genDISubprogramStart(fdef);
      genLocalRoots(fdef->localScopes());

      BasicBlock * blkEntry = createBlock("entry");
      builder_.SetInsertPoint(blkEntry);
      genExpr(fdef->body());

      if (!atTerminator()) {
        if (fdef->returnType()->isVoidType()) {
          builder_.CreateRetVoid();
        } else {
          // TODO: Use the location from the last statement of the function.
          diag.error(fdef) << "Missing return statement at end of non-void function.";
        }
      }

      gcAllocContext_ = NULL;
#if 0
    }
#endif

    builder_.SetInsertPoint(prologue);
    builder_.CreateBr(blkEntry);

    currentFn_ = saveFn;
    structRet_ = saveStructRet;

    if (!diag.inRecovery()) {
      if (verifyFunction(*f, PrintMessageAction)) {
        f->dump();
        DFAIL("Function failed to verify");
      }
    }

    //if (debug_ && !dbgContext_.isNull() && !dbgContext_.Verify()) {
    //  dbgContext_.Verify();
    //  DFAIL("BAD DBG");
    //}

    dbgContext_ = DIScope();
    dbgInlineContext_ = DIScope();
    builder_.ClearInsertionPoint();
    builder_.SetCurrentDebugLocation(llvm::DebugLoc());
  }

  return true;
}
Пример #9
0
static void parseControlImm(CursorState* cursor,Name& outBranchTargetName,ControlStructureImm& imm)
{
	tryParseName(cursor,outBranchTargetName);
	cursor->functionState->labelDisassemblyNames.push_back(outBranchTargetName.getString());
	
	FunctionType functionType;
	
	// For backward compatibility, handle a naked result type.
	ValueType singleResultType;
	if(tryParseValueType(cursor, singleResultType))
	{
		functionType = FunctionType(TypeTuple(singleResultType));
	}
	else
	{
		// Parse the callee type, as a reference or explicit declaration.
		const Token* firstTypeToken = cursor->nextToken;
		std::vector<std::string> paramDisassemblyNames;
		NameToIndexMap paramNameToIndexMap;
		const UnresolvedFunctionType unresolvedFunctionType = parseFunctionTypeRefAndOrDecl(
			cursor,
			paramNameToIndexMap,
			paramDisassemblyNames);
		
		// Disallow named parameters.
		if(paramNameToIndexMap.size())
		{
			auto paramNameIt = paramNameToIndexMap.begin();
			parseErrorf(
				cursor->parseState,
				firstTypeToken,
				"block type declaration may not declare parameter names ($%s)",
				paramNameIt->key.getString().c_str());
		}

		if(!unresolvedFunctionType.reference)
		{
			// If there wasn't a type reference, just use the inline declared params and results.
			functionType = unresolvedFunctionType.explicitType;
		}
		else
		{
			// If there was a type reference, resolve it. This also verifies that if there were also
			// params and/or results declared inline that they match the resolved type reference.
			const Uptr referencedFunctionTypeIndex = resolveFunctionType(
				cursor->moduleState,
				unresolvedFunctionType
				).index;
			functionType = cursor->moduleState->module.types[referencedFunctionTypeIndex];
		}
	}

	// Translate the function type into an indexed block type.
	if(functionType.params().size() == 0 && functionType.results().size() == 0)
	{
		imm.type.format = IndexedBlockType::noParametersOrResult;
		imm.type.resultType = ValueType::any;
	}
	else if(functionType.params().size() == 0 && functionType.results().size() == 1)
	{
		imm.type.format = IndexedBlockType::oneResult;
		imm.type.resultType = functionType.results()[0];
	}
	else
	{
		imm.type.format = IndexedBlockType::functionType;
		imm.type.index = getUniqueFunctionTypeIndex(cursor->moduleState, functionType).index;
	}
}