Esempio n. 1
0
/// Are all available values identicalTo each other.
static bool areIdentical(llvm::DenseMap<SILBasicBlock *, SILValue> &Avails) {
  if (auto *First = dyn_cast<SingleValueInstruction>(Avails.begin()->second)) {
    for (auto Avail : Avails) {
      auto *Inst = dyn_cast<SingleValueInstruction>(Avail.second);
      if (!Inst)
        return false;
      if (!Inst->isIdenticalTo(First))
        return false;
    }
    return true;
  }

  auto *MVIR = dyn_cast<MultipleValueInstructionResult>(Avails.begin()->second);
  if (!MVIR)
    return false;

  for (auto Avail : Avails) {
    auto *Result = dyn_cast<MultipleValueInstructionResult>(Avail.second);
    if (!Result)
      return false;
    if (!Result->getParent()->isIdenticalTo(MVIR->getParent()) ||
        Result->getIndex() != MVIR->getIndex()) {
      return false;
    }
  }
  return true;
}
Esempio n. 2
0
 // a function which searches for the key in the map passed in. Returns 0 if unsuccessful or the instruction number on success
 int findKey(llvm::DenseMap<llvm::Instruction*, int>& map, llvm::Instruction* key){
     llvm::DenseMap<llvm::Instruction*, int>::iterator iter = map.find(key);
     if(iter == map.end())
         return 0;    
     else
         return iter->second;
 }
    bool isDeclCandidate(FunctionDecl * FDecl) {
      if (m_NonNullArgIndexs.count(FDecl))
        return true;

      if (llvm::isa<CXXRecordDecl>(FDecl))
        return true;

      std::bitset<32> ArgIndexs;
      for (specific_attr_iterator<NonNullAttr>
             I = FDecl->specific_attr_begin<NonNullAttr>(),
             E = FDecl->specific_attr_end<NonNullAttr>(); I != E; ++I) {

        NonNullAttr *NonNull = *I;
        for (NonNullAttr::args_iterator i = NonNull->args_begin(),
               e = NonNull->args_end(); i != e; ++i) {
          ArgIndexs.set(*i);
        }
      }

      if (ArgIndexs.any()) {
        m_NonNullArgIndexs.insert(std::make_pair(FDecl, ArgIndexs));
        return true;
      }
      return false;
    }
Esempio n. 4
0
ICInfo* getICInfo(void* rtn_addr) {
    // TODO: load this from the CF instead of tracking it separately
    auto&& it = ics_by_return_addr.find(rtn_addr);
    if (it == ics_by_return_addr.end())
        return NULL;
    return it->second;
}
Esempio n. 5
0
static
llvm::MDNode *myGetType(const Type *type) {
  typedef llvm::DenseMap<const Type*, llvm::MDNode *>::const_iterator TypeNodeIter;
  TypeNodeIter i = myTypeDescriptors.find(type);
  if(i != myTypeDescriptors.end())
    return i->second;
  return NULL;
}
Esempio n. 6
0
static void
updateSSAForUseOfInst(SILSSAUpdater &Updater,
                      SmallVectorImpl<SILArgument*> &InsertedPHIs,
                      const llvm::DenseMap<ValueBase *, SILValue> &ValueMap,
                      SILBasicBlock *Header, SILBasicBlock *EntryCheckBlock,
                      ValueBase *Inst) {
    if (Inst->use_empty())
        return;

    // Find the mapped instruction.
    assert(ValueMap.count(Inst) && "Expected to find value in map!");
    SILValue MappedValue = ValueMap.find(Inst)->second;
    assert(MappedValue);

    // For each use of a specific result value of the instruction.
    if (Inst->hasValue()) {
        SILValue Res(Inst);
        assert(Res->getType() == MappedValue->getType() && "The types must match");

        InsertedPHIs.clear();
        Updater.Initialize(Res->getType());
        Updater.AddAvailableValue(Header, Res);
        Updater.AddAvailableValue(EntryCheckBlock, MappedValue);


        // Because of the way that phi nodes are represented we have to collect all
        // uses before we update SSA. Modifying one phi node can invalidate another
        // unrelated phi nodes operands through the common branch instruction (that
        // has to be modified). This would invalidate a plain ValueUseIterator.
        // Instead we collect uses wrapping uses in branches specially so that we
        // can reconstruct the use even after the branch has been modified.
        SmallVector<UseWrapper, 8> StoredUses;
        for (auto *U : Res->getUses())
            StoredUses.push_back(UseWrapper(U));
        for (auto U : StoredUses) {
            Operand *Use = U;
            SILInstruction *User = Use->getUser();
            assert(User && "Missing user");

            // Ignore uses in the same basic block.
            if (User->getParent() == Header)
                continue;

            assert(User->getParent() != EntryCheckBlock &&
                   "The entry check block should dominate the header");
            Updater.RewriteUse(*Use);
        }
        // Canonicalize inserted phis to avoid extra BB Args.
        for (SILArgument *Arg : InsertedPHIs) {
            if (SILInstruction *Inst = replaceBBArgWithCast(Arg)) {
                Arg->replaceAllUsesWith(Inst);
                // DCE+SimplifyCFG runs as a post-pass cleanup.
                // DCE replaces dead arg values with undef.
                // SimplifyCFG deletes the dead BB arg.
            }
        }
    }
}
Esempio n. 7
0
 // Return a range of scopes for the given closure. The elements of the
 // returned range have type `SILFunction *` and are non-null. Return an empty
 // range for a SILFunction that is not a closure or is a dead closure.
 ScopeRange getClosureScopes(SILFunction *ClosureF) {
   IndexRange indexRange(nullptr, nullptr);
   auto closureScopesPos = closureToScopesMap.find(ClosureF);
   if (closureScopesPos != closureToScopesMap.end()) {
     auto &indexedScopes = closureScopesPos->second;
     indexRange = IndexRange(indexedScopes.begin(), indexedScopes.end());
   }
   return makeOptionalTransformRange(indexRange,
                                     IndexLookupFunc(indexedScopes));
 }
Esempio n. 8
0
  int lookupScopeIndex(SILFunction *scopeFunc) {
    auto indexPos = scopeToIndexMap.find(scopeFunc);
    if (indexPos != scopeToIndexMap.end())
      return indexPos->second;

    int scopeIdx = indexedScopes.size();
    scopeToIndexMap[scopeFunc] = scopeIdx;
    indexedScopes.push_back(scopeFunc);
    return scopeIdx;
  }
Esempio n. 9
0
 void erase(SILFunction *F) {
   // If this function is a mapped closure scope, remove it, leaving a nullptr
   // sentinel.
   auto indexPos = scopeToIndexMap.find(F);
   if (indexPos != scopeToIndexMap.end()) {
     indexedScopes[indexPos->second] = nullptr;
     scopeToIndexMap.erase(F);
   }
   // If this function is a closure, remove it.
   closureToScopesMap.erase(F);
 }
Esempio n. 10
0
static void mapOperands(SILInstruction *I,
                        const llvm::DenseMap<ValueBase *, SILValue> &ValueMap) {
  for (auto &Opd : I->getAllOperands()) {
    SILValue OrigVal = Opd.get();
    ValueBase *OrigDef = OrigVal;
    auto Found = ValueMap.find(OrigDef);
    if (Found != ValueMap.end()) {
      SILValue MappedVal = Found->second;
      Opd.set(MappedVal);
    }
  }
}
Esempio n. 11
0
  void addDecl(llvm::DenseMap<K, FoundDecl> &Map, K Key, FoundDecl FD) {
    // Add the declaration if we haven't found an equivalent yet, otherwise
    // replace the equivalent if the found decl has a higher access level.
    auto existingDecl = Map.find(Key);

    if ((existingDecl == Map.end()) ||
        (Map[Key].first->getFormalAccess() < FD.first->getFormalAccess())) {
      if (existingDecl != Map.end())
        declsToReport.erase({existingDecl->getSecond().first});
      Map[Key] = FD;
      declsToReport.insert(FD);
    }
  }
Esempio n. 12
0
int Graph::getTaintedEdges () {
	int countEdges=0;

	for (llvm::DenseMap<GraphNode*, bool>::iterator it = taintedMap.begin(); it != taintedMap.end(); ++it) {
		std::map<GraphNode*, edgeType> succs = it->first->getSuccessors();
		for (std::map<GraphNode*, edgeType>::iterator succ = succs.begin(), s_end = succs.end(); succ != s_end; succ++) {
			if (taintedMap.count(succ->first) > 0) {
				countEdges++;
			}
		}
	}
	return (countEdges);
}
llvm::Function *CGObjCJit::GenerateMethod(const ObjCMethodDecl *OMD,
                                          const ObjCContainerDecl *CD) {

  assert(CD && "Missing container decl in GetNameForMethod");

  llvm::SmallString<256> Name;
  llvm::raw_svector_ostream OS(Name);

  OS << '\01' << (OMD->isInstanceMethod() ? '-' : '+')
     << '[' << CD->getName();
  if (const ObjCCategoryImplDecl *CID =
        dyn_cast<ObjCCategoryImplDecl>(OMD->getDeclContext())) {
    OS << '(' << CID << ')';
  }
  OS << ' ' << OMD->getSelector().getAsString() << ']';

  CodeGenTypes &Types = CGM.getTypes();
  llvm::FunctionType *MethodTy =
    Types.GetFunctionType(Types.arrangeObjCMethodDeclaration(OMD));
  llvm::Function *Method =
    llvm::Function::Create(MethodTy,
                           llvm::GlobalValue::InternalLinkage,
                           Name.str(),
                           &CGM.getModule());
  MethodDefinitions.insert(std::make_pair(OMD, Method));

  return Method;
}
Esempio n. 14
0
 /// Insert a block into the worklist and set its stack depth.
 void insert(SILBasicBlock *BB, int StackDepth) {
   auto Iter = Block2StackDepth.find(BB);
   if (Iter != Block2StackDepth.end()) {
     // We already handled the block.
     assert(StackDepth >= 0);
     if (Iter->second < 0) {
       // Update the stack depth if we didn't set it yet for the block.
       Iter->second = StackDepth;
     } else {
       assert(Iter->second == StackDepth &&
                "inconsistent stack depth at a CFG merge point");
     }
   } else {
     Block2StackDepth[BB] = StackDepth;
     ToHandle.push_back(BB);
   }
 }
Esempio n. 15
0
static void mapOperands(SILInstruction *I,
                        const llvm::DenseMap<ValueBase *, SILValue> &ValueMap) {
  for (auto &Opd : I->getAllOperands()) {
    SILValue OrigVal = Opd.get();
    ValueBase *OrigDef = OrigVal.getDef();
    auto Found = ValueMap.find(OrigDef);
    if (Found != ValueMap.end()) {
      SILValue MappedVal = Found->second;
      unsigned ResultIdx = OrigVal.getResultNumber();
      // All mapped instructions have their result number set to zero. Except
      // for arguments that we followed along one edge to their incoming value
      // on that edge.
      if (isa<SILArgument>(OrigDef))
        ResultIdx = MappedVal.getResultNumber();
      Opd.set(SILValue(MappedVal.getDef(), ResultIdx));
    }
  }
}
void CGObjCJit::AddMethodsToClass(void *theClass) {

  // Methods need to be added at runtime. Method function pointers (IMP)
  // are not available until then.

  CGBuilderTy Builder(JitInitBlock);
  CodeGen::CodeGenFunction CGF(CGM);

  void *theMetaclass = _object_getClass(theClass);

  llvm::DenseMap<const ObjCMethodDecl*, llvm::Function*>::iterator I =
      MethodDefinitions.begin();

  while (I != MethodDefinitions.end()) {
    const ObjCMethodDecl *D = I->first;
    std::string TypeStr;
    CGM.getContext().getObjCEncodingForMethodDecl(const_cast<ObjCMethodDecl*>(D),
        TypeStr);
    const char* TypeCStr = // keep in a set
      MethodTypeStrings.insert(MethodTypeStrings.begin(), TypeStr)->c_str();
    void *ClassObject = D->isClassMethod() ? theMetaclass : theClass;
    llvm::Value *ClassArg =
      llvm::Constant::getIntegerValue(ObjCTypes.ClassPtrTy,
                                      llvm::APInt(sizeof(void*) * 8,
                                          (uint64_t)ClassObject));
    llvm::Value *SelectorArg = GetSelector(CGF, D->getSelector());
    llvm::Value *TypeArg =
      llvm::Constant::getIntegerValue(ObjCTypes.Int8PtrTy,
                                      llvm::APInt(sizeof(void*) * 8,
                                          (uint64_t)TypeCStr));

    llvm::Value *MethodArg = Builder.CreateBitCast(I->second, ImpPtrTy);

    Builder.CreateCall4(fn_class_addMethod,
                        ClassArg,
                        SelectorArg,
                        MethodArg,
                        TypeArg);
    I++;
  }

  // Done with list for this implementation, so clear it
  MethodDefinitions.clear();
}
Esempio n. 17
0
void ICSlotRewrite::commit(CommitHook* hook, std::vector<void*> gc_references) {
    bool still_valid = true;
    for (int i = 0; i < dependencies.size(); i++) {
        int orig_version = dependencies[i].second;
        ICInvalidator* invalidator = dependencies[i].first;
        if (orig_version != invalidator->version()) {
            still_valid = false;
            break;
        }
    }
    if (!still_valid) {
        if (VERBOSITY() >= 3)
            printf("not committing %s icentry since a dependency got updated before commit\n", debug_name);
        return;
    }

    uint8_t* slot_start = getSlotStart();
    uint8_t* continue_point = (uint8_t*)ic->continue_addr;

    bool do_commit = hook->finishAssembly(continue_point - slot_start);

    if (!do_commit)
        return;

    assert(!assembler.hasFailed());

    for (int i = 0; i < dependencies.size(); i++) {
        ICInvalidator* invalidator = dependencies[i].first;
        invalidator->addDependent(ic_entry);
    }

    ic->next_slot_to_try++;

    // if (VERBOSITY()) printf("Commiting to %p-%p\n", start, start + ic->slot_size);
    memcpy(slot_start, buf, ic->getSlotSize());

    for (auto p : ic_entry->gc_references) {
        int& i = ic_gc_references[p];
        if (i == 1)
            ic_gc_references.erase(p);
        else
            --i;
    }
    ic_entry->gc_references = std::move(gc_references);
    for (auto p : ic_entry->gc_references)
        ic_gc_references[p]++;

    ic->times_rewritten++;

    if (ic->times_rewritten == IC_MEGAMORPHIC_THRESHOLD) {
        static StatCounter megamorphic_ics("megamorphic_ics");
        megamorphic_ics.log();
    }

    llvm::sys::Memory::InvalidateInstructionCache(slot_start, ic->getSlotSize());
}
Esempio n. 18
0
bool ClangASTImporter::LayoutRecordType(
    const clang::RecordDecl *record_decl, uint64_t &bit_size,
    uint64_t &alignment,
    llvm::DenseMap<const clang::FieldDecl *, uint64_t> &field_offsets,
    llvm::DenseMap<const clang::CXXRecordDecl *, clang::CharUnits>
        &base_offsets,
    llvm::DenseMap<const clang::CXXRecordDecl *, clang::CharUnits>
        &vbase_offsets) {
  RecordDeclToLayoutMap::iterator pos =
      m_record_decl_to_layout_map.find(record_decl);
  bool success = false;
  base_offsets.clear();
  vbase_offsets.clear();
  if (pos != m_record_decl_to_layout_map.end()) {
    bit_size = pos->second.bit_size;
    alignment = pos->second.alignment;
    field_offsets.swap(pos->second.field_offsets);
    base_offsets.swap(pos->second.base_offsets);
    vbase_offsets.swap(pos->second.vbase_offsets);
    m_record_decl_to_layout_map.erase(pos);
    success = true;
  } else {
    bit_size = 0;
    alignment = 0;
    field_offsets.clear();
  }
  return success;
}
Esempio n. 19
0
  ModuleDecl *loadModule(SourceLoc importLoc,
                         ArrayRef<std::pair<Identifier, SourceLoc>> path) {
    // FIXME: Implement submodule support!
    Identifier name = path[0].first;

    auto it = ModuleWrappers.find(name);
    if (it != ModuleWrappers.end())
      return it->second->getParentModule();

    auto *decl = ModuleDecl::create(name, SwiftContext);
    // Silence error messages about testably importing a Clang module.
    decl->setTestingEnabled();
    decl->setHasResolvedImports();
    auto wrapperUnit = new (SwiftContext) DWARFModuleUnit(*decl);
    ModuleWrappers.insert({name, wrapperUnit});
    decl->addFile(*wrapperUnit);

    // Force load adapter modules for all imported modules.
    decl->forAllVisibleModules({}, [](ModuleDecl::ImportedModule import) {});

    return decl;
  }
Esempio n. 20
0
std::unique_ptr<ICInfo> registerCompiledPatchpoint(uint8_t* start_addr, uint8_t* slowpath_start_addr,
                                                   uint8_t* continue_addr, uint8_t* slowpath_rtn_addr,
                                                   const ICSetupInfo* ic, StackInfo stack_info, LiveOutSet live_outs) {
    assert(slowpath_start_addr - start_addr >= ic->num_slots * ic->slot_size);
    assert(slowpath_rtn_addr > slowpath_start_addr);
    assert(slowpath_rtn_addr <= start_addr + ic->totalSize());

    assembler::GenericRegister return_register;
    assert(ic->getCallingConvention() == llvm::CallingConv::C
           || ic->getCallingConvention() == llvm::CallingConv::PreserveAll);

    if (ic->hasReturnValue()) {
        static const int DWARF_RAX = 0;
        // It's possible that the return value doesn't get used, in which case
        // we can avoid copying back into RAX at the end
        live_outs.clear(DWARF_RAX);

        // TODO we only need to do this if 0 was in live_outs, since if it wasn't, that indicates
        // the return value won't be used and we can optimize based on that.
        return_register = assembler::RAX;
    }

    // we can let the user just slide down the nop section, but instead
    // emit jumps to the end.
    // Not sure if this is worth it or not?
    for (int i = 0; i < ic->num_slots; i++) {
        uint8_t* start = start_addr + i * ic->slot_size;
        // std::unique_ptr<MCWriter> writer(createMCWriter(start, ic->slot_size * (ic->num_slots - i), 0));
        // writer->emitNop();
        // writer->emitGuardFalse();

        Assembler writer(start, ic->slot_size);
        writer.nop();
        // writer.trap();
        // writer.jmp(JumpDestination::fromStart(ic->slot_size * (ic->num_slots - i)));
        writer.jmp(JumpDestination::fromStart(slowpath_start_addr - start));
    }

    ICInfo* icinfo = new ICInfo(start_addr, slowpath_rtn_addr, continue_addr, stack_info, ic->num_slots, ic->slot_size,
                                ic->getCallingConvention(), std::move(live_outs), return_register, ic->type_recorder);

    assert(!ics_by_return_addr.count(slowpath_rtn_addr));
    ics_by_return_addr[slowpath_rtn_addr] = icinfo;

    registerGCTrackedICInfo(icinfo);

    return std::unique_ptr<ICInfo>(icinfo);
}
 void VisitCallExpr(CallExpr* CE) {
    Visit(CE->getCallee());
    FunctionDecl* FDecl = CE->getDirectCallee();
    if (FDecl && isDeclCandidate(FDecl)) {
      decl_map_t::const_iterator it = m_NonNullArgIndexs.find(FDecl);
      const std::bitset<32>& ArgIndexs = it->second;
      Sema::ContextRAII pushedDC(m_Sema, FDecl);
      for (int index = 0; index < 32; ++index) {
        if (ArgIndexs.test(index)) {
          // Get the argument with the nonnull attribute.
          Expr* Arg = CE->getArg(index);
          CE->setArg(index, SynthesizeCheck(Arg));
        }
      }
    }
  }
 bool VisitCallExpr(CallExpr* CE) {
   VisitStmt(CE->getCallee());
   FunctionDecl* FDecl = CE->getDirectCallee();
   if (FDecl && isDeclCandidate(FDecl)) {
     decl_map_t::const_iterator it = m_NonNullArgIndexs.find(FDecl);
     const std::bitset<32>& ArgIndexs = it->second;
     Sema::ContextRAII pushedDC(m_Sema, FDecl);
     for (int index = 0; index < 32; ++index) {
       if (ArgIndexs.test(index)) {
         // Get the argument with the nonnull attribute.
         Expr* Arg = CE->getArg(index);
         if (Arg->getType().getTypePtr()->isPointerType()
             && !llvm::isa<clang::CXXThisExpr>(Arg))
           CE->setArg(index, SynthesizeCheck(Arg));
       }
     }
   }
   return true;
 }
Esempio n. 23
0
/// Process an apply instruction which uses a partial_apply
/// as its callee.
/// Returns true on success.
bool PartialApplyCombiner::processSingleApply(FullApplySite AI) {
    Builder.setInsertionPoint(AI.getInstruction());
    Builder.setCurrentDebugScope(AI.getDebugScope());

    // Prepare the args.
    SmallVector<SILValue, 8> Args;
    // First the ApplyInst args.
    for (auto Op : AI.getArguments())
        Args.push_back(Op);

    SILInstruction *InsertionPoint = &*Builder.getInsertionPoint();
    // Next, the partial apply args.

    // Pre-process partial_apply arguments only once, lazily.
    if (isFirstTime) {
        isFirstTime = false;
        if (!allocateTemporaries())
            return false;
    }

    // Now, copy over the partial apply args.
    for (auto Op : PAI->getArguments()) {
        auto Arg = Op;
        // If there is new temporary for this argument, use it instead.
        if (isa<AllocStackInst>(Arg)) {
            if (ArgToTmp.count(Arg)) {
                Op = ArgToTmp.lookup(Arg);
            }
        }
        Args.push_back(Op);
    }

    Builder.setInsertionPoint(InsertionPoint);
    Builder.setCurrentDebugScope(AI.getDebugScope());

    // The thunk that implements the partial apply calls the closure function
    // that expects all arguments to be consumed by the function. However, the
    // captured arguments are not arguments of *this* apply, so they are not
    // pre-incremented. When we combine the partial_apply and this apply into
    // a new apply we need to retain all of the closure non-address type
    // arguments.
    auto ParamInfo = PAI->getSubstCalleeType()->getParameters();
    auto PartialApplyArgs = PAI->getArguments();
    // Set of arguments that need to be released after each invocation.
    SmallVector<SILValue, 8> ToBeReleasedArgs;
    for (unsigned i = 0, e = PartialApplyArgs.size(); i < e; ++i) {
        SILValue Arg = PartialApplyArgs[i];
        if (!Arg->getType().isAddress()) {
            // Retain the argument as the callee may consume it.
            Builder.emitRetainValueOperation(PAI->getLoc(), Arg);
            // For non consumed parameters (e.g. guaranteed), we also need to
            // insert releases after each apply instruction that we create.
            if (!ParamInfo[ParamInfo.size() - PartialApplyArgs.size() + i].
                    isConsumed())
                ToBeReleasedArgs.push_back(Arg);
        }
    }

    auto *F = FRI->getReferencedFunction();
    SILType FnType = F->getLoweredType();
    SILType ResultTy = F->getLoweredFunctionType()->getSILResult();
    ArrayRef<Substitution> Subs = PAI->getSubstitutions();
    if (!Subs.empty()) {
        FnType = FnType.substGenericArgs(PAI->getModule(), Subs);
        ResultTy = FnType.getAs<SILFunctionType>()->getSILResult();
    }

    FullApplySite NAI;
    if (auto *TAI = dyn_cast<TryApplyInst>(AI))
        NAI =
            Builder.createTryApply(AI.getLoc(), FRI, FnType, Subs, Args,
                                   TAI->getNormalBB(), TAI->getErrorBB());
    else
        NAI =
            Builder.createApply(AI.getLoc(), FRI, FnType, ResultTy, Subs, Args,
                                cast<ApplyInst>(AI)->isNonThrowing());

    // We also need to release the partial_apply instruction itself because it
    // is consumed by the apply_instruction.
    if (auto *TAI = dyn_cast<TryApplyInst>(AI)) {
        Builder.setInsertionPoint(TAI->getNormalBB()->begin());
        for (auto Arg : ToBeReleasedArgs) {
            Builder.emitReleaseValueOperation(PAI->getLoc(), Arg);
        }
        Builder.createStrongRelease(AI.getLoc(), PAI, Atomicity::Atomic);
        Builder.setInsertionPoint(TAI->getErrorBB()->begin());
        // Release the non-consumed parameters.
        for (auto Arg : ToBeReleasedArgs) {
            Builder.emitReleaseValueOperation(PAI->getLoc(), Arg);
        }
        Builder.createStrongRelease(AI.getLoc(), PAI, Atomicity::Atomic);
        Builder.setInsertionPoint(AI.getInstruction());
    } else {
        // Release the non-consumed parameters.
        for (auto Arg : ToBeReleasedArgs) {
            Builder.emitReleaseValueOperation(PAI->getLoc(), Arg);
        }
        Builder.createStrongRelease(AI.getLoc(), PAI, Atomicity::Atomic);
    }

    SilCombiner->replaceInstUsesWith(*AI.getInstruction(), NAI.getInstruction());
    SilCombiner->eraseInstFromFunction(*AI.getInstruction());
    return true;
}
Esempio n. 24
0
/// Returns true on success.
bool PartialApplyCombiner::allocateTemporaries() {
    // Copy the original arguments of the partial_apply into
    // newly created temporaries and use these temporaries instead of
    // the original arguments afterwards.
    // This is done to "extend" the life-time of original partial_apply
    // arguments, as they may be destroyed/deallocated before the last
    // use by one of the apply instructions.
    // TODO:
    // Copy arguments of the partial_apply into new temporaries
    // only if the lifetime of arguments ends before their uses
    // by apply instructions.
    bool needsReleases = false;
    CanSILFunctionType PAITy =
        dyn_cast<SILFunctionType>(PAI->getCallee()->getType().getSwiftType());

    // Emit a destroy value for each captured closure argument.
    ArrayRef<SILParameterInfo> Params = PAITy->getParameters();
    auto Args = PAI->getArguments();
    unsigned Delta = Params.size() - Args.size();

    llvm::SmallVector<std::pair<SILValue, unsigned>, 8> ArgsToHandle;
    for (unsigned AI = 0, AE = Args.size(); AI != AE; ++AI) {
        SILValue Arg = Args[AI];
        SILParameterInfo Param = Params[AI + Delta];
        if (Param.isIndirectMutating())
            continue;
        // Create a temporary and copy the argument into it, if:
        // - the argument stems from an alloc_stack
        // - the argument is consumed by the callee and is indirect
        //   (e.g. it is an @in argument)
        if (isa<AllocStackInst>(Arg) ||
                (Param.isConsumed() && Param.isIndirect())) {
            // If the temporary is non-trivial, we need to release it later.
            if (!Arg->getType().isTrivial(PAI->getModule()))
                needsReleases = true;
            ArgsToHandle.push_back(std::make_pair(Arg, AI));
        }
    }

    if (needsReleases) {
        // Compute the set of endpoints, which will be used to insert releases of
        // temporaries. This may fail if the frontier is located on a critical edge
        // which we may not split (no CFG changes in SILCombine).
        ValueLifetimeAnalysis VLA(PAI);
        if (!VLA.computeFrontier(PAFrontier, ValueLifetimeAnalysis::DontModifyCFG))
            return false;
    }

    for (auto ArgWithIdx : ArgsToHandle) {
        SILValue Arg = ArgWithIdx.first;
        Builder.setInsertionPoint(PAI->getFunction()->begin()->begin());
        // Create a new temporary at the beginning of a function.
        auto *Tmp = Builder.createAllocStack(PAI->getLoc(), Arg->getType(),
        {/*Constant*/ true, ArgWithIdx.second});
        Builder.setInsertionPoint(PAI);
        // Copy argument into this temporary.
        Builder.createCopyAddr(PAI->getLoc(), Arg, Tmp,
                               IsTake_t::IsNotTake,
                               IsInitialization_t::IsInitialization);

        Tmps.push_back(Tmp);
        ArgToTmp.insert(std::make_pair(Arg, Tmp));
    }
    return true;
}
Esempio n. 25
0
namespace pyston {

using namespace pyston::assembler;

#define MAX_RETRY_BACKOFF 1024

// TODO not right place for this...
int64_t ICInvalidator::version() {
    return cur_version;
}

void ICInvalidator::addDependent(ICSlotInfo* entry_info) {
    dependents.insert(entry_info);
}

void ICInvalidator::invalidateAll() {
    cur_version++;
    for (ICSlotInfo* slot : dependents) {
        slot->clear();
    }
    dependents.clear();
}

void ICSlotInfo::clear() {
    ic->clear(this);
}

ICSlotRewrite::ICSlotRewrite(ICInfo* ic, const char* debug_name)
    : ic(ic), debug_name(debug_name), buf((uint8_t*)malloc(ic->getSlotSize())), assembler(buf, ic->getSlotSize()) {
    assembler.nop();

    if (VERBOSITY() >= 4)
        printf("starting %s icentry\n", debug_name);
}

ICSlotRewrite::~ICSlotRewrite() {
    free(buf);
}

void ICSlotRewrite::abort() {
    ic->retry_backoff = std::min(MAX_RETRY_BACKOFF, 2 * ic->retry_backoff);
    ic->retry_in = ic->retry_backoff;
}

ICSlotInfo* ICSlotRewrite::prepareEntry() {
    this->ic_entry = ic->pickEntryForRewrite(debug_name);
    return this->ic_entry;
}

uint8_t* ICSlotRewrite::getSlotStart() {
    assert(ic_entry != NULL);
    return (uint8_t*)ic->start_addr + ic_entry->idx * ic->getSlotSize();
}

// Map of gc pointers -> number of ics that point tot hem.
static llvm::DenseMap<void*, int> ic_gc_references;

void ICSlotRewrite::commit(CommitHook* hook, std::vector<void*> gc_references) {
    bool still_valid = true;
    for (int i = 0; i < dependencies.size(); i++) {
        int orig_version = dependencies[i].second;
        ICInvalidator* invalidator = dependencies[i].first;
        if (orig_version != invalidator->version()) {
            still_valid = false;
            break;
        }
    }
    if (!still_valid) {
        if (VERBOSITY() >= 3)
            printf("not committing %s icentry since a dependency got updated before commit\n", debug_name);
        return;
    }

    uint8_t* slot_start = getSlotStart();
    uint8_t* continue_point = (uint8_t*)ic->continue_addr;

    bool do_commit = hook->finishAssembly(continue_point - slot_start);

    if (!do_commit)
        return;

    assert(!assembler.hasFailed());

    for (int i = 0; i < dependencies.size(); i++) {
        ICInvalidator* invalidator = dependencies[i].first;
        invalidator->addDependent(ic_entry);
    }

    ic->next_slot_to_try++;

    // if (VERBOSITY()) printf("Commiting to %p-%p\n", start, start + ic->slot_size);
    memcpy(slot_start, buf, ic->getSlotSize());

    for (auto p : ic_entry->gc_references) {
        int& i = ic_gc_references[p];
        if (i == 1)
            ic_gc_references.erase(p);
        else
            --i;
    }
    ic_entry->gc_references = std::move(gc_references);
    for (auto p : ic_entry->gc_references)
        ic_gc_references[p]++;

    ic->times_rewritten++;

    if (ic->times_rewritten == IC_MEGAMORPHIC_THRESHOLD) {
        static StatCounter megamorphic_ics("megamorphic_ics");
        megamorphic_ics.log();
    }

    llvm::sys::Memory::InvalidateInstructionCache(slot_start, ic->getSlotSize());
}

void ICSlotRewrite::gc_visit(GCVisitor* visitor) {
    for (auto& dependency : dependencies) {
        visitor->visitPotentialRedundant(dependency.first);
    }
}

void ICSlotRewrite::addDependenceOn(ICInvalidator& invalidator) {
    dependencies.push_back(std::make_pair(&invalidator, invalidator.version()));
}

int ICSlotRewrite::getSlotSize() {
    return ic->getSlotSize();
}

int ICSlotRewrite::getScratchRspOffset() {
    assert(ic->stack_info.scratch_size);
    return ic->stack_info.scratch_rsp_offset;
}

int ICSlotRewrite::getScratchSize() {
    return ic->stack_info.scratch_size;
}

TypeRecorder* ICSlotRewrite::getTypeRecorder() {
    return ic->type_recorder;
}

assembler::GenericRegister ICSlotRewrite::returnRegister() {
    return ic->return_register;
}



std::unique_ptr<ICSlotRewrite> ICInfo::startRewrite(const char* debug_name) {
    return std::unique_ptr<ICSlotRewrite>(new ICSlotRewrite(this, debug_name));
}

ICSlotInfo* ICInfo::pickEntryForRewrite(const char* debug_name) {
    int num_slots = getNumSlots();
    for (int _i = 0; _i < num_slots; _i++) {
        int i = (_i + next_slot_to_try) % num_slots;

        ICSlotInfo& sinfo = slots[i];
        assert(sinfo.num_inside >= 0);
        if (sinfo.num_inside)
            continue;

        if (VERBOSITY() >= 4) {
            printf("picking %s icentry to in-use slot %d at %p\n", debug_name, i, start_addr);
        }

        next_slot_to_try = i;
        return &sinfo;
    }
    if (VERBOSITY() >= 4)
        printf("not committing %s icentry since there are no available slots\n", debug_name);
    return NULL;
}

// Keep track of all ICInfo(s) that we create because they contain pointers to Pyston heap objects
// that we have written into the generated code and we may need to scan those.
static llvm::DenseSet<ICInfo*> ics_list;
static llvm::DenseMap<void*, ICInfo*> ics_by_return_addr;

void registerGCTrackedICInfo(ICInfo* ic) {
#if MOVING_GC
    assert(ics_list.count(ic) == 0);
    ics_list.insert(ic);
#endif
}

void deregisterGCTrackedICInfo(ICInfo* ic) {
#if MOVING_GC
    assert(ics_list.count(ic) == 1);
    ics_list.erase(ic);
#endif
}

ICInfo::ICInfo(void* start_addr, void* slowpath_rtn_addr, void* continue_addr, StackInfo stack_info, int num_slots,
               int slot_size, llvm::CallingConv::ID calling_conv, LiveOutSet _live_outs,
               assembler::GenericRegister return_register, TypeRecorder* type_recorder)
    : next_slot_to_try(0),
      stack_info(stack_info),
      num_slots(num_slots),
      slot_size(slot_size),
      calling_conv(calling_conv),
      live_outs(std::move(_live_outs)),
      return_register(return_register),
      type_recorder(type_recorder),
      retry_in(0),
      retry_backoff(1),
      times_rewritten(0),
      start_addr(start_addr),
      slowpath_rtn_addr(slowpath_rtn_addr),
      continue_addr(continue_addr) {
    for (int i = 0; i < num_slots; i++) {
        slots.emplace_back(this, i);
    }

#if MOVING_GC
    assert(ics_list.count(this) == 0);
#endif
}

ICInfo::~ICInfo() {
#if MOVING_GC
    assert(ics_list.count(this) == 0);
#endif
}

std::unique_ptr<ICInfo> registerCompiledPatchpoint(uint8_t* start_addr, uint8_t* slowpath_start_addr,
                                                   uint8_t* continue_addr, uint8_t* slowpath_rtn_addr,
                                                   const ICSetupInfo* ic, StackInfo stack_info, LiveOutSet live_outs) {
    assert(slowpath_start_addr - start_addr >= ic->num_slots * ic->slot_size);
    assert(slowpath_rtn_addr > slowpath_start_addr);
    assert(slowpath_rtn_addr <= start_addr + ic->totalSize());

    assembler::GenericRegister return_register;
    assert(ic->getCallingConvention() == llvm::CallingConv::C
           || ic->getCallingConvention() == llvm::CallingConv::PreserveAll);

    if (ic->hasReturnValue()) {
        static const int DWARF_RAX = 0;
        // It's possible that the return value doesn't get used, in which case
        // we can avoid copying back into RAX at the end
        live_outs.clear(DWARF_RAX);

        // TODO we only need to do this if 0 was in live_outs, since if it wasn't, that indicates
        // the return value won't be used and we can optimize based on that.
        return_register = assembler::RAX;
    }

    // we can let the user just slide down the nop section, but instead
    // emit jumps to the end.
    // Not sure if this is worth it or not?
    for (int i = 0; i < ic->num_slots; i++) {
        uint8_t* start = start_addr + i * ic->slot_size;
        // std::unique_ptr<MCWriter> writer(createMCWriter(start, ic->slot_size * (ic->num_slots - i), 0));
        // writer->emitNop();
        // writer->emitGuardFalse();

        Assembler writer(start, ic->slot_size);
        writer.nop();
        // writer.trap();
        // writer.jmp(JumpDestination::fromStart(ic->slot_size * (ic->num_slots - i)));
        writer.jmp(JumpDestination::fromStart(slowpath_start_addr - start));
    }

    ICInfo* icinfo = new ICInfo(start_addr, slowpath_rtn_addr, continue_addr, stack_info, ic->num_slots, ic->slot_size,
                                ic->getCallingConvention(), std::move(live_outs), return_register, ic->type_recorder);

    assert(!ics_by_return_addr.count(slowpath_rtn_addr));
    ics_by_return_addr[slowpath_rtn_addr] = icinfo;

    registerGCTrackedICInfo(icinfo);

    return std::unique_ptr<ICInfo>(icinfo);
}

void deregisterCompiledPatchpoint(ICInfo* ic) {
    assert(ics_by_return_addr.count(ic->slowpath_rtn_addr));
    ics_by_return_addr.erase(ic->slowpath_rtn_addr);

    deregisterGCTrackedICInfo(ic);
}

ICInfo* getICInfo(void* rtn_addr) {
    // TODO: load this from the CF instead of tracking it separately
    auto&& it = ics_by_return_addr.find(rtn_addr);
    if (it == ics_by_return_addr.end())
        return NULL;
    return it->second;
}

void ICInfo::clear(ICSlotInfo* icentry) {
    assert(icentry);

    uint8_t* start = (uint8_t*)start_addr + icentry->idx * getSlotSize();

    if (VERBOSITY() >= 4)
        printf("clearing patchpoint %p, slot at %p\n", start_addr, start);

    Assembler writer(start, getSlotSize());
    writer.nop();
    writer.jmp(JumpDestination::fromStart(getSlotSize()));
    assert(writer.bytesWritten() <= IC_INVALDITION_HEADER_SIZE);

    // std::unique_ptr<MCWriter> writer(createMCWriter(start, getSlotSize(), 0));
    // writer->emitNop();
    // writer->emitGuardFalse();

    // writer->endWithSlowpath();
    llvm::sys::Memory::InvalidateInstructionCache(start, getSlotSize());
}

bool ICInfo::shouldAttempt() {
    if (retry_in) {
        retry_in--;
        return false;
    }
    // Note(kmod): in some pathological deeply-recursive cases, it's important that we set the
    // retry counter even if we attempt it again.  We could probably handle this by setting
    // the backoff to 0 on commit, and then setting the retry to the backoff here.

    return !isMegamorphic();
}

bool ICInfo::isMegamorphic() {
    return times_rewritten >= IC_MEGAMORPHIC_THRESHOLD;
}

void ICInfo::visitGCReferences(gc::GCVisitor* v) {
    for (auto&& p : ic_gc_references) {
        v->visitNonRelocatable(p.first);
    }
#if MOVING_GC
    for (const auto& p : ics_list) {
        for (auto& slot : p->slots) {
            v->visitNonRelocatableRange(&slot.gc_references[0], &slot.gc_references[slot.gc_references.size()]);
        }
    }
#endif
}
}
Esempio n. 26
0
 bool insertAsUnhandled(SILBasicBlock *Pred) {
   return Block2StackDepth.insert({Pred, -2}).second;
 }
Esempio n. 27
0
void deregisterCompiledPatchpoint(ICInfo* ic) {
    assert(ics_by_return_addr.count(ic->slowpath_rtn_addr));
    ics_by_return_addr.erase(ic->slowpath_rtn_addr);

    deregisterGCTrackedICInfo(ic);
}
Esempio n. 28
0
 int getStackDepth(SILBasicBlock *BB) {
   assert(Block2StackDepth.find(BB) != Block2StackDepth.end());
   int Depth = Block2StackDepth.lookup(BB);
   assert(Depth >= 0 && "EndBlock not reachable from StartBlock");
   return Depth;
 }
Esempio n. 29
0
 // a function which iterates over the map passed in, printing the key & value fields of the map
 int printMap(llvm::DenseMap<llvm::Instruction*, int>&map){
     llvm::DenseMap<llvm::Instruction*, int>::iterator i = map.begin();
     for(; i!= map.end(); ++i)
         std::cerr << "Key: " << i->first << "\tValue: " << i->second << "\n";
     return 0;
 }
Esempio n. 30
0
 bool isClosureScope(SILFunction *F) { return scopeToIndexMap.count(F); }