예제 #1
0
/// EmitCompoundStmt - Emit a compound statement {..} node.  If GetLast is true,
/// this captures the expression result of the last sub-statement and returns it
/// (for use by the statement expression extension).
RValue CodeGenFunction::EmitCompoundStmt(const CompoundStmt &S, bool GetLast,
                                         llvm::Value *AggLoc, bool isAggVol) {
  PrettyStackTraceLoc CrashInfo(getContext().getSourceManager(),S.getLBracLoc(),
                             "LLVM IR generation of compound statement ('{}')");
  
  CGDebugInfo *DI = getDebugInfo();
  if (DI) {
    EnsureInsertPoint();
    DI->setLocation(S.getLBracLoc());
    // FIXME: The llvm backend is currently not ready to deal with region_end
    // for block scoping.  In the presence of always_inline functions it gets so
    // confused that it doesn't emit any debug info.  Just disable this for now.
    //DI->EmitRegionStart(CurFn, Builder);
  }

  // Keep track of the current cleanup stack depth.
  size_t CleanupStackDepth = CleanupEntries.size();
  bool OldDidCallStackSave = DidCallStackSave;
  DidCallStackSave = false;
  
  for (CompoundStmt::const_body_iterator I = S.body_begin(),
       E = S.body_end()-GetLast; I != E; ++I)
    EmitStmt(*I);

  if (DI) {
    EnsureInsertPoint();
    DI->setLocation(S.getRBracLoc());
    
    // FIXME: The llvm backend is currently not ready to deal with region_end
    // for block scoping.  In the presence of always_inline functions it gets so
    // confused that it doesn't emit any debug info.  Just disable this for now.
    //DI->EmitRegionEnd(CurFn, Builder);
  }

  RValue RV;
  if (!GetLast) 
    RV = RValue::get(0);
  else {
    // We have to special case labels here.  They are statements, but when put 
    // at the end of a statement expression, they yield the value of their
    // subexpression.  Handle this by walking through all labels we encounter,
    // emitting them before we evaluate the subexpr.
    const Stmt *LastStmt = S.body_back();
    while (const LabelStmt *LS = dyn_cast<LabelStmt>(LastStmt)) {
      EmitLabel(*LS);
      LastStmt = LS->getSubStmt();
    }
  
    EnsureInsertPoint();
    
    RV = EmitAnyExpr(cast<Expr>(LastStmt), AggLoc);
  }

  DidCallStackSave = OldDidCallStackSave;
  
  EmitCleanupBlocks(CleanupStackDepth);
  
  return RV;
}
예제 #2
0
파일: CGDecl.cpp 프로젝트: aaasz/SHP
void CodeGenFunction::EmitStaticBlockVarDecl(const VarDecl &D) {
  llvm::Value *&DMEntry = LocalDeclMap[&D];
  assert(DMEntry == 0 && "Decl already exists in localdeclmap!");

  llvm::GlobalVariable *GV =
    CreateStaticBlockVarDecl(D, ".", llvm::GlobalValue::InternalLinkage);

  // Store into LocalDeclMap before generating initializer to handle
  // circular references.
  DMEntry = GV;

  // Make sure to evaluate VLA bounds now so that we have them for later.
  //
  // FIXME: Can this happen?
  if (D.getType()->isVariablyModifiedType())
    EmitVLASize(D.getType());

  // If this value has an initializer, emit it.
  if (D.getInit())
    GV = AddInitializerToGlobalBlockVarDecl(D, GV);

  // FIXME: Merge attribute handling.
  if (const AnnotateAttr *AA = D.getAttr<AnnotateAttr>()) {
    SourceManager &SM = CGM.getContext().getSourceManager();
    llvm::Constant *Ann =
      CGM.EmitAnnotateAttr(GV, AA,
                           SM.getInstantiationLineNumber(D.getLocation()));
    CGM.AddAnnotation(Ann);
  }

  if (const SectionAttr *SA = D.getAttr<SectionAttr>())
    GV->setSection(SA->getName());

  if (D.hasAttr<UsedAttr>())
    CGM.AddUsedGlobal(GV);

  // We may have to cast the constant because of the initializer
  // mismatch above.
  //
  // FIXME: It is really dangerous to store this in the map; if anyone
  // RAUW's the GV uses of this constant will be invalid.
  const llvm::Type *LTy = CGM.getTypes().ConvertTypeForMem(D.getType());
  const llvm::Type *LPtrTy =
    llvm::PointerType::get(LTy, D.getType().getAddressSpace());
  DMEntry = llvm::ConstantExpr::getBitCast(GV, LPtrTy);

  // Emit global variable debug descriptor for static vars.
  CGDebugInfo *DI = getDebugInfo();
  if (DI) {
    DI->setLocation(D.getLocation());
    DI->EmitGlobalVariable(static_cast<llvm::GlobalVariable *>(GV), &D);
  }
}
예제 #3
0
void CodeGenFunction::EmitOMPForDirective(const OMPForDirective &S) {
  InlinedOpenMPRegion Region(*this, S.getAssociatedStmt());
  RunCleanupsScope DirectiveScope(*this);

  CGDebugInfo *DI = getDebugInfo();
  if (DI)
    DI->EmitLexicalBlockStart(Builder, S.getSourceRange().getBegin());

  EmitOMPWorksharingLoop(S);

  // Emit an implicit barrier at the end.
  CGM.getOpenMPRuntime().EmitOMPBarrierCall(*this, S.getLocStart(),
                                            /*IsExplicit*/ false);
  if (DI)
    DI->EmitLexicalBlockEnd(Builder, S.getSourceRange().getEnd());
}
예제 #4
0
/// Pops a cleanup block.  If the block includes a normal cleanup, the
/// current insertion point is threaded through the cleanup, as are
/// any branch fixups on the cleanup.
void CodeGenFunction::PopCleanupBlock(bool FallthroughIsBranchThrough) {
  assert(!EHStack.empty() && "cleanup stack is empty!");
  assert(isa<EHCleanupScope>(*EHStack.begin()) && "top not a cleanup!");
  EHCleanupScope &Scope = cast<EHCleanupScope>(*EHStack.begin());
  assert(Scope.getFixupDepth() <= EHStack.getNumBranchFixups());

  // Remember activation information.
  bool IsActive = Scope.isActive();
  llvm::Value *NormalActiveFlag =
    Scope.shouldTestFlagInNormalCleanup() ? Scope.getActiveFlag() : nullptr;
  llvm::Value *EHActiveFlag = 
    Scope.shouldTestFlagInEHCleanup() ? Scope.getActiveFlag() : nullptr;

  // Check whether we need an EH cleanup.  This is only true if we've
  // generated a lazy EH cleanup block.
  llvm::BasicBlock *EHEntry = Scope.getCachedEHDispatchBlock();
  assert(Scope.hasEHBranches() == (EHEntry != nullptr));
  bool RequiresEHCleanup = (EHEntry != nullptr);
  EHScopeStack::stable_iterator EHParent = Scope.getEnclosingEHScope();

  // Check the three conditions which might require a normal cleanup:

  // - whether there are branch fix-ups through this cleanup
  unsigned FixupDepth = Scope.getFixupDepth();
  bool HasFixups = EHStack.getNumBranchFixups() != FixupDepth;

  // - whether there are branch-throughs or branch-afters
  bool HasExistingBranches = Scope.hasBranches();

  // - whether there's a fallthrough
  llvm::BasicBlock *FallthroughSource = Builder.GetInsertBlock();
  bool HasFallthrough = (FallthroughSource != nullptr && IsActive);

  // Branch-through fall-throughs leave the insertion point set to the
  // end of the last cleanup, which points to the current scope.  The
  // rest of IR gen doesn't need to worry about this; it only happens
  // during the execution of PopCleanupBlocks().
  bool HasPrebranchedFallthrough =
    (FallthroughSource && FallthroughSource->getTerminator());

  // If this is a normal cleanup, then having a prebranched
  // fallthrough implies that the fallthrough source unconditionally
  // jumps here.
  assert(!Scope.isNormalCleanup() || !HasPrebranchedFallthrough ||
         (Scope.getNormalBlock() &&
          FallthroughSource->getTerminator()->getSuccessor(0)
            == Scope.getNormalBlock()));

  bool RequiresNormalCleanup = false;
  if (Scope.isNormalCleanup() &&
      (HasFixups || HasExistingBranches || HasFallthrough)) {
    RequiresNormalCleanup = true;
  }

  // If we have a prebranched fallthrough into an inactive normal
  // cleanup, rewrite it so that it leads to the appropriate place.
  if (Scope.isNormalCleanup() && HasPrebranchedFallthrough && !IsActive) {
    llvm::BasicBlock *prebranchDest;
    
    // If the prebranch is semantically branching through the next
    // cleanup, just forward it to the next block, leaving the
    // insertion point in the prebranched block.
    if (FallthroughIsBranchThrough) {
      EHScope &enclosing = *EHStack.find(Scope.getEnclosingNormalCleanup());
      prebranchDest = CreateNormalEntry(*this, cast<EHCleanupScope>(enclosing));

    // Otherwise, we need to make a new block.  If the normal cleanup
    // isn't being used at all, we could actually reuse the normal
    // entry block, but this is simpler, and it avoids conflicts with
    // dead optimistic fixup branches.
    } else {
      prebranchDest = createBasicBlock("forwarded-prebranch");
      EmitBlock(prebranchDest);
    }

    llvm::BasicBlock *normalEntry = Scope.getNormalBlock();
    assert(normalEntry && !normalEntry->use_empty());

    ForwardPrebranchedFallthrough(FallthroughSource,
                                  normalEntry, prebranchDest);
  }

  // If we don't need the cleanup at all, we're done.
  if (!RequiresNormalCleanup && !RequiresEHCleanup) {
    destroyOptimisticNormalEntry(*this, Scope);
    EHStack.popCleanup(); // safe because there are no fixups
    assert(EHStack.getNumBranchFixups() == 0 ||
           EHStack.hasNormalCleanups());
    return;
  }

  // Copy the cleanup emission data out.  Note that SmallVector
  // guarantees maximal alignment for its buffer regardless of its
  // type parameter.
  SmallVector<char, 8*sizeof(void*)> CleanupBuffer;
  CleanupBuffer.reserve(Scope.getCleanupSize());
  memcpy(CleanupBuffer.data(),
         Scope.getCleanupBuffer(), Scope.getCleanupSize());
  CleanupBuffer.set_size(Scope.getCleanupSize());
  EHScopeStack::Cleanup *Fn =
    reinterpret_cast<EHScopeStack::Cleanup*>(CleanupBuffer.data());

  EHScopeStack::Cleanup::Flags cleanupFlags;
  if (Scope.isNormalCleanup())
    cleanupFlags.setIsNormalCleanupKind();
  if (Scope.isEHCleanup())
    cleanupFlags.setIsEHCleanupKind();

  if (!RequiresNormalCleanup) {
    destroyOptimisticNormalEntry(*this, Scope);
    EHStack.popCleanup();
  } else {
    // If we have a fallthrough and no other need for the cleanup,
    // emit it directly.
    if (HasFallthrough && !HasPrebranchedFallthrough &&
        !HasFixups && !HasExistingBranches) {

      destroyOptimisticNormalEntry(*this, Scope);
      EHStack.popCleanup();

      EmitCleanup(*this, Fn, cleanupFlags, NormalActiveFlag);

    // Otherwise, the best approach is to thread everything through
    // the cleanup block and then try to clean up after ourselves.
    } else {
      // Force the entry block to exist.
      llvm::BasicBlock *NormalEntry = CreateNormalEntry(*this, Scope);

      // I.  Set up the fallthrough edge in.

      CGBuilderTy::InsertPoint savedInactiveFallthroughIP;

      // If there's a fallthrough, we need to store the cleanup
      // destination index.  For fall-throughs this is always zero.
      if (HasFallthrough) {
        if (!HasPrebranchedFallthrough)
          Builder.CreateStore(Builder.getInt32(0), getNormalCleanupDestSlot());

      // Otherwise, save and clear the IP if we don't have fallthrough
      // because the cleanup is inactive.
      } else if (FallthroughSource) {
        assert(!IsActive && "source without fallthrough for active cleanup");
        savedInactiveFallthroughIP = Builder.saveAndClearIP();
      }

      // II.  Emit the entry block.  This implicitly branches to it if
      // we have fallthrough.  All the fixups and existing branches
      // should already be branched to it.
      EmitBlock(NormalEntry);

      // III.  Figure out where we're going and build the cleanup
      // epilogue.

      bool HasEnclosingCleanups =
        (Scope.getEnclosingNormalCleanup() != EHStack.stable_end());

      // Compute the branch-through dest if we need it:
      //   - if there are branch-throughs threaded through the scope
      //   - if fall-through is a branch-through
      //   - if there are fixups that will be optimistically forwarded
      //     to the enclosing cleanup
      llvm::BasicBlock *BranchThroughDest = nullptr;
      if (Scope.hasBranchThroughs() ||
          (FallthroughSource && FallthroughIsBranchThrough) ||
          (HasFixups && HasEnclosingCleanups)) {
        assert(HasEnclosingCleanups);
        EHScope &S = *EHStack.find(Scope.getEnclosingNormalCleanup());
        BranchThroughDest = CreateNormalEntry(*this, cast<EHCleanupScope>(S));
      }

      llvm::BasicBlock *FallthroughDest = nullptr;
      SmallVector<llvm::Instruction*, 2> InstsToAppend;

      // If there's exactly one branch-after and no other threads,
      // we can route it without a switch.
      if (!Scope.hasBranchThroughs() && !HasFixups && !HasFallthrough &&
          Scope.getNumBranchAfters() == 1) {
        assert(!BranchThroughDest || !IsActive);

        // TODO: clean up the possibly dead stores to the cleanup dest slot.
        llvm::BasicBlock *BranchAfter = Scope.getBranchAfterBlock(0);
        InstsToAppend.push_back(llvm::BranchInst::Create(BranchAfter));

      // Build a switch-out if we need it:
      //   - if there are branch-afters threaded through the scope
      //   - if fall-through is a branch-after
      //   - if there are fixups that have nowhere left to go and
      //     so must be immediately resolved
      } else if (Scope.getNumBranchAfters() ||
                 (HasFallthrough && !FallthroughIsBranchThrough) ||
                 (HasFixups && !HasEnclosingCleanups)) {

        llvm::BasicBlock *Default =
          (BranchThroughDest ? BranchThroughDest : getUnreachableBlock());

        // TODO: base this on the number of branch-afters and fixups
        const unsigned SwitchCapacity = 10;

        llvm::LoadInst *Load =
          new llvm::LoadInst(getNormalCleanupDestSlot(), "cleanup.dest");
        llvm::SwitchInst *Switch =
          llvm::SwitchInst::Create(Load, Default, SwitchCapacity);

        InstsToAppend.push_back(Load);
        InstsToAppend.push_back(Switch);

        // Branch-after fallthrough.
        if (FallthroughSource && !FallthroughIsBranchThrough) {
          FallthroughDest = createBasicBlock("cleanup.cont");
          if (HasFallthrough)
            Switch->addCase(Builder.getInt32(0), FallthroughDest);
        }

        for (unsigned I = 0, E = Scope.getNumBranchAfters(); I != E; ++I) {
          Switch->addCase(Scope.getBranchAfterIndex(I),
                          Scope.getBranchAfterBlock(I));
        }

        // If there aren't any enclosing cleanups, we can resolve all
        // the fixups now.
        if (HasFixups && !HasEnclosingCleanups)
          ResolveAllBranchFixups(*this, Switch, NormalEntry);
      } else {
        // We should always have a branch-through destination in this case.
        assert(BranchThroughDest);
        InstsToAppend.push_back(llvm::BranchInst::Create(BranchThroughDest));
      }

      // IV.  Pop the cleanup and emit it.
      EHStack.popCleanup();
      assert(EHStack.hasNormalCleanups() == HasEnclosingCleanups);

      EmitCleanup(*this, Fn, cleanupFlags, NormalActiveFlag);

      // Append the prepared cleanup prologue from above.
      llvm::BasicBlock *NormalExit = Builder.GetInsertBlock();
      for (unsigned I = 0, E = InstsToAppend.size(); I != E; ++I)
        NormalExit->getInstList().push_back(InstsToAppend[I]);

      // Optimistically hope that any fixups will continue falling through.
      for (unsigned I = FixupDepth, E = EHStack.getNumBranchFixups();
           I < E; ++I) {
        BranchFixup &Fixup = EHStack.getBranchFixup(I);
        if (!Fixup.Destination) continue;
        if (!Fixup.OptimisticBranchBlock) {
          new llvm::StoreInst(Builder.getInt32(Fixup.DestinationIndex),
                              getNormalCleanupDestSlot(),
                              Fixup.InitialBranch);
          Fixup.InitialBranch->setSuccessor(0, NormalEntry);
        }
        Fixup.OptimisticBranchBlock = NormalExit;
      }

      // V.  Set up the fallthrough edge out.
      
      // Case 1: a fallthrough source exists but doesn't branch to the
      // cleanup because the cleanup is inactive.
      if (!HasFallthrough && FallthroughSource) {
        // Prebranched fallthrough was forwarded earlier.
        // Non-prebranched fallthrough doesn't need to be forwarded.
        // Either way, all we need to do is restore the IP we cleared before.
        assert(!IsActive);
        Builder.restoreIP(savedInactiveFallthroughIP);

      // Case 2: a fallthrough source exists and should branch to the
      // cleanup, but we're not supposed to branch through to the next
      // cleanup.
      } else if (HasFallthrough && FallthroughDest) {
        assert(!FallthroughIsBranchThrough);
        EmitBlock(FallthroughDest);

      // Case 3: a fallthrough source exists and should branch to the
      // cleanup and then through to the next.
      } else if (HasFallthrough) {
        // Everything is already set up for this.

      // Case 4: no fallthrough source exists.
      } else {
        Builder.ClearInsertionPoint();
      }

      // VI.  Assorted cleaning.

      // Check whether we can merge NormalEntry into a single predecessor.
      // This might invalidate (non-IR) pointers to NormalEntry.
      llvm::BasicBlock *NewNormalEntry =
        SimplifyCleanupEntry(*this, NormalEntry);

      // If it did invalidate those pointers, and NormalEntry was the same
      // as NormalExit, go back and patch up the fixups.
      if (NewNormalEntry != NormalEntry && NormalEntry == NormalExit)
        for (unsigned I = FixupDepth, E = EHStack.getNumBranchFixups();
               I < E; ++I)
          EHStack.getBranchFixup(I).OptimisticBranchBlock = NewNormalEntry;
    }
  }

  assert(EHStack.hasNormalCleanups() || EHStack.getNumBranchFixups() == 0);

  // Emit the EH cleanup if required.
  if (RequiresEHCleanup) {
    CGDebugInfo *DI = getDebugInfo();
    SaveAndRestoreLocation AutoRestoreLocation(*this, Builder);
    if (DI)
      DI->EmitLocation(Builder, CurEHLocation);

    CGBuilderTy::InsertPoint SavedIP = Builder.saveAndClearIP();

    EmitBlock(EHEntry);

    // We only actually emit the cleanup code if the cleanup is either
    // active or was used before it was deactivated.
    if (EHActiveFlag || IsActive) {

      cleanupFlags.setIsForEHCleanup();
      EmitCleanup(*this, Fn, cleanupFlags, EHActiveFlag);
    }

    Builder.CreateBr(getEHDispatchBlock(EHParent));

    Builder.restoreIP(SavedIP);

    SimplifyCleanupEntry(*this, EHEntry);
  }
}
예제 #5
0
void CodeGenFunction::EmitOMPSimdDirective(const OMPSimdDirective &S) {
  // Pragma 'simd' code depends on presence of 'lastprivate'.
  // If present, we have to separate last iteration of the loop:
  //
  // if (LastIteration != 0) {
  //   for (IV in 0..LastIteration-1) BODY;
  //   BODY with updates of lastprivate vars;
  //   <Final counter/linear vars updates>;
  // }
  //
  // otherwise (when there's no lastprivate):
  //
  //   for (IV in 0..LastIteration) BODY;
  //   <Final counter/linear vars updates>;
  //

  // Walk clauses and process safelen/lastprivate.
  bool SeparateIter = false;
  LoopStack.setParallel();
  LoopStack.setVectorizerEnable(true);
  for (auto C : S.clauses()) {
    switch (C->getClauseKind()) {
    case OMPC_safelen: {
      RValue Len = EmitAnyExpr(cast<OMPSafelenClause>(C)->getSafelen(),
                               AggValueSlot::ignored(), true);
      llvm::ConstantInt *Val = cast<llvm::ConstantInt>(Len.getScalarVal());
      LoopStack.setVectorizerWidth(Val->getZExtValue());
      // In presence of finite 'safelen', it may be unsafe to mark all
      // the memory instructions parallel, because loop-carried
      // dependences of 'safelen' iterations are possible.
      LoopStack.setParallel(false);
      break;
    }
    case OMPC_aligned:
      EmitOMPAlignedClause(*this, CGM, cast<OMPAlignedClause>(*C));
      break;
    case OMPC_lastprivate:
      SeparateIter = true;
      break;
    default:
      // Not handled yet
      ;
    }
  }

  RunCleanupsScope DirectiveScope(*this);

  CGDebugInfo *DI = getDebugInfo();
  if (DI)
    DI->EmitLexicalBlockStart(Builder, S.getSourceRange().getBegin());

  // Emit the loop iteration variable.
  const Expr *IVExpr = S.getIterationVariable();
  const VarDecl *IVDecl = cast<VarDecl>(cast<DeclRefExpr>(IVExpr)->getDecl());
  EmitVarDecl(*IVDecl);
  EmitIgnoredExpr(S.getInit());

  // Emit the iterations count variable.
  // If it is not a variable, Sema decided to calculate iterations count on each
  // iteration (e.g., it is foldable into a constant).
  if (auto LIExpr = dyn_cast<DeclRefExpr>(S.getLastIteration())) {
    EmitVarDecl(*cast<VarDecl>(LIExpr->getDecl()));
    // Emit calculation of the iterations count.
    EmitIgnoredExpr(S.getCalcLastIteration());
  }

  if (SeparateIter) {
    // Emit: if (LastIteration > 0) - begin.
    RegionCounter Cnt = getPGORegionCounter(&S);
    auto ThenBlock = createBasicBlock("simd.if.then");
    auto ContBlock = createBasicBlock("simd.if.end");
    EmitBranchOnBoolExpr(S.getPreCond(), ThenBlock, ContBlock, Cnt.getCount());
    EmitBlock(ThenBlock);
    Cnt.beginRegion(Builder);
    // Emit 'then' code.
    {
      OMPPrivateScope LoopScope(*this);
      EmitPrivateLoopCounters(*this, LoopScope, S.counters());
      EmitOMPInnerLoop(S, LoopScope, /* SeparateIter */ true);
      EmitOMPLoopBody(S, /* SeparateIter */ true);
    }
    EmitOMPSimdFinal(S);
    // Emit: if (LastIteration != 0) - end.
    EmitBranch(ContBlock);
    EmitBlock(ContBlock, true);
  } else {
    {
      OMPPrivateScope LoopScope(*this);
      EmitPrivateLoopCounters(*this, LoopScope, S.counters());
      EmitOMPInnerLoop(S, LoopScope);
    }
    EmitOMPSimdFinal(S);
  }

  if (DI)
    DI->EmitLexicalBlockEnd(Builder, S.getSourceRange().getEnd());
}
예제 #6
0
void CodeGenFunction::EmitUPCForAllStmt(const UPCForAllStmt &S) {
  JumpDest LoopExit = getJumpDestInCurrentScope("upc_forall.end");

  RunCleanupsScope ForScope(*this);

  llvm::Value *Depth = 0;
  if (S.getAfnty()) {
    Address DepthAddr = getUPCForAllDepth(CGM);
    Depth = Builder.CreateLoad(DepthAddr);
    Builder.CreateStore(Builder.CreateNUWAdd(Depth,
                                             llvm::ConstantInt::get(IntTy, 1),
                                             "upc_forall.inc_depth"),
                        DepthAddr);
    EHStack.pushCleanup<UPCForAllCleanup>(NormalAndEHCleanup, Depth);
  }

  CGDebugInfo *DI = getDebugInfo();
  if (DI)
    DI->EmitLexicalBlockStart(Builder, S.getSourceRange().getBegin());

  // Evaluate the first part before the loop.
  if (S.getInit())
    EmitStmt(S.getInit());

  // Start the loop with a block that tests the condition.
  // If there's an increment, the continue scope will be overwritten
  // later.
  JumpDest Continue = getJumpDestInCurrentScope("upc_forall.cond");
  llvm::BasicBlock *CondBlock = Continue.getBlock();
  EmitBlock(CondBlock);

  // Create a cleanup scope for the condition variable cleanups.
  RunCleanupsScope ConditionScope(*this);
  
  llvm::Value *BoolCondVal = 0;
  if (S.getCond()) {
    // If the for statement has a condition scope, emit the local variable
    // declaration.
    llvm::BasicBlock *ExitBlock = LoopExit.getBlock();
    if (S.getConditionVariable()) {
      EmitAutoVarDecl(*S.getConditionVariable());
    }

    // If there are any cleanups between here and the loop-exit scope,
    // create a block to stage a loop exit along.
    if (ForScope.requiresCleanups())
      ExitBlock = createBasicBlock("upcforall.cond.cleanup");
    
    // As long as the condition is true, iterate the loop.
    llvm::BasicBlock *ForBody = createBasicBlock("upc_forall.filter");

    // C99 6.8.5p2/p4: The first substatement is executed if the expression
    // compares unequal to 0.  The condition must be a scalar type.
    BoolCondVal = EvaluateExprAsBool(S.getCond());
    Builder.CreateCondBr(BoolCondVal, ForBody, ExitBlock);

    if (ExitBlock != LoopExit.getBlock()) {
      EmitBlock(ExitBlock);
      EmitBranchThroughCleanup(LoopExit);
    }

    EmitBlock(ForBody);
  } else {
    // Treat it as a non-zero constant.  Don't even create a new block for the
    // body, just fall into it.
  }

  // If the for loop doesn't have an increment we can just use the
  // condition as the continue block.  Otherwise we'll need to create
  // a block for it (in the current scope, i.e. in the scope of the
  // condition), and that we will become our continue block.
  if (S.getInc())
    Continue = getJumpDestInCurrentScope("upc_forall.inc");

  // Store the blocks to use for break and continue.
  BreakContinueStack.push_back(BreakContinue(LoopExit, Continue));

  if (const Expr *Afnty = S.getAfnty()) {
    llvm::Value *Affinity = EmitScalarExpr(Afnty);
    if (Afnty->getType()->hasPointerToSharedRepresentation()) {
      // get threadof
      Affinity = EmitUPCPointerGetThread(Affinity);
    } else {
      assert(Affinity->getType()->isIntegerTy());
      llvm::Value *Threads = EmitUPCThreads();
      if (cast<llvm::IntegerType>(Threads->getType())->getBitWidth() <
          cast<llvm::IntegerType>(Affinity->getType())->getBitWidth()) {
        Threads = Builder.CreateIntCast(Threads, Affinity->getType(), false);
      } else {
        Affinity = Builder.CreateIntCast(Affinity, Threads->getType(),
                                         Afnty->getType()->hasSignedIntegerRepresentation());
      }
      if (Afnty->getType()->hasSignedIntegerRepresentation()) {
        Affinity = Builder.CreateSRem(Affinity, Threads);
        llvm::Value *Zero = llvm::ConstantInt::get(Affinity->getType(), 0);
        Affinity = Builder.CreateSelect(Builder.CreateICmpSLT(Affinity, Zero),
                                        Builder.CreateAdd(Affinity, Threads),
                                        Affinity);
      } else {
        Affinity = Builder.CreateURem(Affinity, Threads);
      }
    }
    Affinity = Builder.CreateIntCast(Affinity, IntTy, false);

    llvm::Value *MyThread = EmitUPCMyThread();

    llvm::Value *Test =
      Builder.CreateOr(Builder.CreateICmpUGT(Depth, llvm::ConstantInt::get(IntTy, 0)),
                        Builder.CreateICmpEQ(Affinity, MyThread));

    llvm::BasicBlock *RealBody = createBasicBlock("upc_forall.body");
    Builder.CreateCondBr(Test, RealBody, Continue.getBlock());
    EmitBlock(RealBody);
  }

  {
    // Create a separate cleanup scope for the body, in case it is not
    // a compound statement.
    RunCleanupsScope BodyScope(*this);
    EmitStmt(S.getBody());
  }

  // If there is an increment, emit it next.
  if (S.getInc()) {
    EmitBlock(Continue.getBlock());
    EmitStmt(S.getInc());
  }

  BreakContinueStack.pop_back();

  ConditionScope.ForceCleanup();
  EmitBranch(CondBlock);

  ForScope.ForceCleanup();

  if (DI)
    DI->EmitLexicalBlockEnd(Builder, S.getSourceRange().getEnd());

  // Emit the fall-through block.
  EmitBlock(LoopExit.getBlock(), true);
}
예제 #7
0
void CodeGenFunction::EmitStaticBlockVarDecl(const VarDecl &D) { 
  llvm::Value *&DMEntry = LocalDeclMap[&D];
  assert(DMEntry == 0 && "Decl already exists in localdeclmap!");
  
  llvm::GlobalVariable *GV = 
    CreateStaticBlockVarDecl(D, ".", llvm::GlobalValue::InternalLinkage);

  // Store into LocalDeclMap before generating initializer to handle
  // circular references.
  DMEntry = GV;

  // Make sure to evaluate VLA bounds now so that we have them for later.
  //
  // FIXME: Can this happen?
  if (D.getType()->isVariablyModifiedType())
    EmitVLASize(D.getType());

  if (D.getInit()) {
    llvm::Constant *Init = CGM.EmitConstantExpr(D.getInit(), D.getType(), this);

    // If constant emission failed, then this should be a C++ static
    // initializer.
    if (!Init) {
      if (!getContext().getLangOptions().CPlusPlus)
        CGM.ErrorUnsupported(D.getInit(), "constant l-value expression");
      else
        EmitStaticCXXBlockVarDeclInit(D, GV);
    } else {
      // The initializer may differ in type from the global. Rewrite
      // the global to match the initializer.  (We have to do this
      // because some types, like unions, can't be completely represented
      // in the LLVM type system.)
      if (GV->getType() != Init->getType()) {
        llvm::GlobalVariable *OldGV = GV;
        
        GV = new llvm::GlobalVariable(CGM.getModule(), Init->getType(),
                                      OldGV->isConstant(),
                                      OldGV->getLinkage(), Init, "",
                                      0, D.isThreadSpecified(),
                                      D.getType().getAddressSpace());

        // Steal the name of the old global
        GV->takeName(OldGV);

        // Replace all uses of the old global with the new global
        llvm::Constant *NewPtrForOldDecl = 
          llvm::ConstantExpr::getBitCast(GV, OldGV->getType());
        OldGV->replaceAllUsesWith(NewPtrForOldDecl);

        // Erase the old global, since it is no longer used.
        OldGV->eraseFromParent();
      } 

      GV->setInitializer(Init);
    }
  }

  // FIXME: Merge attribute handling.
  if (const AnnotateAttr *AA = D.getAttr<AnnotateAttr>()) {
    SourceManager &SM = CGM.getContext().getSourceManager();
    llvm::Constant *Ann =
      CGM.EmitAnnotateAttr(GV, AA, 
                           SM.getInstantiationLineNumber(D.getLocation()));
    CGM.AddAnnotation(Ann);
  }

  if (const SectionAttr *SA = D.getAttr<SectionAttr>())
    GV->setSection(SA->getName());
  
  if (D.hasAttr<UsedAttr>())
    CGM.AddUsedGlobal(GV);

  // We may have to cast the constant because of the initializer
  // mismatch above.
  //
  // FIXME: It is really dangerous to store this in the map; if anyone
  // RAUW's the GV uses of this constant will be invalid.
  const llvm::Type *LTy = CGM.getTypes().ConvertTypeForMem(D.getType());
  const llvm::Type *LPtrTy =
    llvm::PointerType::get(LTy, D.getType().getAddressSpace());
  DMEntry = llvm::ConstantExpr::getBitCast(GV, LPtrTy);

  // Emit global variable debug descriptor for static vars.
  CGDebugInfo *DI = getDebugInfo();
  if (DI) {
    DI->setLocation(D.getLocation());
    DI->EmitGlobalVariable(static_cast<llvm::GlobalVariable *>(GV), &D);
  }
}