//===----------------------------------------------------------------------===// /// Scop class implement Scop::Scop(TempScop &tempScop, LoopInfo &LI, ScalarEvolution &ScalarEvolution) : SE(&ScalarEvolution), R(tempScop.getMaxRegion()), MaxLoopDepth(tempScop.getMaxLoopDepth()) { isl_ctx *ctx = isl_ctx_alloc(); ParamSetType &Params = tempScop.getParamSet(); Parameters.insert(Parameters.begin(), Params.begin(), Params.end()); isl_dim *dim = isl_dim_set_alloc(ctx, getNumParams(), 0); // TODO: Insert relations between parameters. // TODO: Insert constraints on parameters. Context = isl_set_universe (dim); SmallVector<Loop*, 8> NestLoops; SmallVector<unsigned, 8> Scatter; Scatter.assign(MaxLoopDepth + 1, 0); // Build the iteration domain, access functions and scattering functions // traversing the region tree. buildScop(tempScop, getRegion(), NestLoops, Scatter, LI); Stmts.push_back(new ScopStmt(*this, Scatter)); assert(NestLoops.empty() && "NestLoops not empty at top level!"); }
/*virtual*/ void GatherPackedNode<ElemType>::Validate(bool isFinalValidationPass) /*override*/ { ComputationNodeBase::Validate(isFinalValidationPass); // inherit MBLayout from indexData m_pMBLayout = Input(INDEXDATA)->GetMBLayout(); if (isFinalValidationPass && (!Input(INDEXDATA)->HasMBLayout())) LogicError("%ls requires first argument (index data) to have a time dimension.", NodeDescription().c_str()); bool sourceHasTimeDimension = Input(SOURCEDATA)->HasMBLayout(); if (isFinalValidationPass && Input(INDEXDATA)->GetSampleLayout().GetNumElements() != 1) InvalidArgument("%ls requires the first argument (index data) to be a scalar time sequence.", NodeDescription().c_str()); // inherit tensor dimension from sourceData, minus the last (column or time) dimension. TODO this needs to become simpler... if (sourceHasTimeDimension) SetDims(Input(SOURCEDATA)->GetSampleLayout(), HasMBLayout()); else { SmallVector<size_t> layout = { 1 }; // Scalar if (Input(SOURCEDATA)->GetSampleLayout().GetRank() > 1) { auto srcLayout = Input(SOURCEDATA)->GetSampleLayout().GetDims(); layout.assign(srcLayout.begin(), srcLayout.end() - 1); } SetDims(TensorShape(layout), HasMBLayout()); } }
// Helper for AddGlue to clone node operands. static void CloneNodeWithValues(SDNode *N, SelectionDAG *DAG, ArrayRef<EVT> VTs, SDValue ExtraOper = SDValue()) { SmallVector<SDValue, 8> Ops(N->op_begin(), N->op_end()); if (ExtraOper.getNode()) Ops.push_back(ExtraOper); SDVTList VTList = DAG->getVTList(VTs); MachineSDNode *MN = dyn_cast<MachineSDNode>(N); // Store memory references. SmallVector<MachineMemOperand *, 2> MMOs; if (MN) MMOs.assign(MN->memoperands_begin(), MN->memoperands_end()); DAG->MorphNodeTo(N, N->getOpcode(), VTList, Ops); // Reset the memory references if (MN) DAG->setNodeMemRefs(MN, MMOs); }
Scop::Scop(TempScop &tempScop, LoopInfo &LI, ScalarEvolution &ScalarEvolution, isl_ctx *Context) : SE(&ScalarEvolution), R(tempScop.getMaxRegion()), MaxLoopDepth(tempScop.getMaxLoopDepth()) { IslCtx = Context; buildContext(); SmallVector<Loop *, 8> NestLoops; SmallVector<unsigned, 8> Scatter; Scatter.assign(MaxLoopDepth + 1, 0); // Build the iteration domain, access functions and scattering functions // traversing the region tree. buildScop(tempScop, getRegion(), NestLoops, Scatter, LI); realignParams(); addParameterBounds(); assert(NestLoops.empty() && "NestLoops not empty at top level!"); }
/// parseTypeTupleBody /// type-tuple: /// '(' type-tuple-body? ')' /// type-tuple-body: /// type-tuple-element (',' type-tuple-element)* '...'? /// type-tuple-element: /// identifier ':' type /// type ParserResult<TupleTypeRepr> Parser::parseTypeTupleBody() { Parser::StructureMarkerRAII ParsingTypeTuple(*this, Tok); SourceLoc RPLoc, LPLoc = consumeToken(tok::l_paren); SourceLoc EllipsisLoc; unsigned EllipsisIdx; SmallVector<TypeRepr *, 8> ElementsR; // We keep track of the labels separately, and apply them at the end. SmallVector<std::tuple<Identifier, SourceLoc, Identifier, SourceLoc>, 4> Labels; ParserStatus Status = parseList(tok::r_paren, LPLoc, RPLoc, tok::comma, /*OptionalSep=*/false, /*AllowSepAfterLast=*/false, diag::expected_rparen_tuple_type_list, [&] () -> ParserStatus { // If this is a deprecated use of the inout marker in an argument list, // consume the inout. SourceLoc InOutLoc; bool hasAnyInOut = false; bool hasValidInOut = false; if (consumeIf(tok::kw_inout, InOutLoc)) { hasAnyInOut = true; hasValidInOut = false; } // If the tuple element starts with a potential argument label followed by a // ':' or another potential argument label, then the identifier is an // element tag, and it is followed by a type annotation. if (Tok.canBeArgumentLabel() && (peekToken().is(tok::colon) || peekToken().canBeArgumentLabel())) { // Consume the name Identifier name; if (!Tok.is(tok::kw__)) name = Context.getIdentifier(Tok.getText()); SourceLoc nameLoc = consumeToken(); // If there is a second name, consume it as well. Identifier secondName; SourceLoc secondNameLoc; if (Tok.canBeArgumentLabel()) { if (!Tok.is(tok::kw__)) secondName = Context.getIdentifier(Tok.getText()); secondNameLoc = consumeToken(); } // Consume the ':'. if (!consumeIf(tok::colon)) diagnose(Tok, diag::expected_parameter_colon); SourceLoc postColonLoc = Tok.getLoc(); // Consume 'inout' if present. if (!hasAnyInOut && consumeIf(tok::kw_inout, InOutLoc)) { hasValidInOut = true; } SourceLoc extraneousInOutLoc; while (consumeIf(tok::kw_inout, extraneousInOutLoc)) { diagnose(Tok, diag::parameter_inout_var_let_repeated) .fixItRemove(extraneousInOutLoc); } // Parse the type annotation. ParserResult<TypeRepr> type = parseType(diag::expected_type); if (type.hasCodeCompletion()) return makeParserCodeCompletionStatus(); if (type.isNull()) return makeParserError(); if (!hasValidInOut && hasAnyInOut) { diagnose(Tok.getLoc(), diag::inout_as_attr_disallowed) .fixItRemove(InOutLoc) .fixItInsert(postColonLoc, "inout "); } // If an 'inout' marker was specified, build the type. Note that we bury // the inout locator within the named locator. This is weird but required // by sema apparently. if (InOutLoc.isValid()) type = makeParserResult(new (Context) InOutTypeRepr(type.get(), InOutLoc)); // Record the label. We will look at these at the end. if (Labels.empty()) { Labels.assign(ElementsR.size(), std::make_tuple(Identifier(), SourceLoc(), Identifier(), SourceLoc())); } Labels.push_back(std::make_tuple(name, nameLoc, secondName, secondNameLoc)); ElementsR.push_back(type.get()); } else { // Otherwise, this has to be a type. ParserResult<TypeRepr> type = parseType(); if (type.hasCodeCompletion()) return makeParserCodeCompletionStatus(); if (type.isNull()) return makeParserError(); if (InOutLoc.isValid()) type = makeParserResult(new (Context) InOutTypeRepr(type.get(), InOutLoc)); if (!Labels.empty()) { Labels.push_back(std::make_tuple(Identifier(), SourceLoc(), Identifier(), SourceLoc())); } ElementsR.push_back(type.get()); } // Parse '= expr' here so we can complain about it directly, rather // than dying when we see it. if (Tok.is(tok::equal)) { SourceLoc equalLoc = consumeToken(tok::equal); auto init = parseExpr(diag::expected_init_value); auto inFlight = diagnose(equalLoc, diag::tuple_type_init); if (init.isNonNull()) inFlight.fixItRemove(SourceRange(equalLoc, init.get()->getEndLoc())); } if (Tok.isEllipsis()) { if (EllipsisLoc.isValid()) { diagnose(Tok, diag::multiple_ellipsis_in_tuple) .highlight(EllipsisLoc) .fixItRemove(Tok.getLoc()); (void)consumeToken(); } else { EllipsisLoc = consumeToken(); EllipsisIdx = ElementsR.size() - 1; } } return makeParserSuccess(); }); if (EllipsisLoc.isValid() && ElementsR.empty()) { EllipsisLoc = SourceLoc(); } if (EllipsisLoc.isInvalid()) EllipsisIdx = ElementsR.size(); // If there were any labels, figure out which labels should go into the type // representation. if (!Labels.empty()) { assert(Labels.size() == ElementsR.size()); bool isFunctionType = Tok.isAny(tok::arrow, tok::kw_throws, tok::kw_rethrows); for (unsigned i : indices(ElementsR)) { auto ¤tLabel = Labels[i]; Identifier firstName = std::get<0>(currentLabel); SourceLoc firstNameLoc = std::get<1>(currentLabel); Identifier secondName = std::get<2>(currentLabel); SourceLoc secondNameLoc = std::get<3>(currentLabel); // True tuples have labels. if (!isFunctionType) { // If there were two names, complain. if (firstNameLoc.isValid() && secondNameLoc.isValid()) { auto diag = diagnose(firstNameLoc, diag::tuple_type_multiple_labels); if (firstName.empty()) { diag.fixItRemoveChars(firstNameLoc, ElementsR[i]->getStartLoc()); } else { diag.fixItRemove( SourceRange(Lexer::getLocForEndOfToken(SourceMgr,firstNameLoc), secondNameLoc)); } } // Form the named type representation. ElementsR[i] = new (Context) NamedTypeRepr(firstName, ElementsR[i], firstNameLoc); continue; } // If there was a first name, complain; arguments in function types are // always unlabeled. if (firstNameLoc.isValid() && !firstName.empty()) { auto diag = diagnose(firstNameLoc, diag::function_type_argument_label, firstName); if (secondNameLoc.isInvalid()) diag.fixItInsert(firstNameLoc, "_ "); else if (secondName.empty()) diag.fixItRemoveChars(firstNameLoc, ElementsR[i]->getStartLoc()); else diag.fixItReplace(SourceRange(firstNameLoc), "_"); } if (firstNameLoc.isValid() || secondNameLoc.isValid()) { // Form the named parameter type representation. ElementsR[i] = new (Context) NamedTypeRepr(secondName, ElementsR[i], secondNameLoc, firstNameLoc); } } } return makeParserResult(Status, TupleTypeRepr::create(Context, ElementsR, SourceRange(LPLoc, RPLoc), EllipsisLoc, EllipsisIdx)); }
bool X86CallLowering::lowerCall(MachineIRBuilder &MIRBuilder, CallingConv::ID CallConv, const MachineOperand &Callee, const ArgInfo &OrigRet, ArrayRef<ArgInfo> OrigArgs) const { MachineFunction &MF = MIRBuilder.getMF(); const Function &F = MF.getFunction(); MachineRegisterInfo &MRI = MF.getRegInfo(); auto &DL = F.getParent()->getDataLayout(); const X86Subtarget &STI = MF.getSubtarget<X86Subtarget>(); const TargetInstrInfo &TII = *STI.getInstrInfo(); auto TRI = STI.getRegisterInfo(); // Handle only Linux C, X86_64_SysV calling conventions for now. if (!STI.isTargetLinux() || !(CallConv == CallingConv::C || CallConv == CallingConv::X86_64_SysV)) return false; unsigned AdjStackDown = TII.getCallFrameSetupOpcode(); auto CallSeqStart = MIRBuilder.buildInstr(AdjStackDown); // Create a temporarily-floating call instruction so we can add the implicit // uses of arg registers. bool Is64Bit = STI.is64Bit(); unsigned CallOpc = Callee.isReg() ? (Is64Bit ? X86::CALL64r : X86::CALL32r) : (Is64Bit ? X86::CALL64pcrel32 : X86::CALLpcrel32); auto MIB = MIRBuilder.buildInstrNoInsert(CallOpc).add(Callee).addRegMask( TRI->getCallPreservedMask(MF, CallConv)); SmallVector<ArgInfo, 8> SplitArgs; for (const auto &OrigArg : OrigArgs) { // TODO: handle not simple cases. if (OrigArg.Flags.isByVal()) return false; if (!splitToValueTypes(OrigArg, SplitArgs, DL, MRI, [&](ArrayRef<unsigned> Regs) { MIRBuilder.buildUnmerge(Regs, OrigArg.Reg); })) return false; } // Do the actual argument marshalling. OutgoingValueHandler Handler(MIRBuilder, MRI, MIB, CC_X86); if (!handleAssignments(MIRBuilder, SplitArgs, Handler)) return false; bool IsFixed = OrigArgs.empty() ? true : OrigArgs.back().IsFixed; if (STI.is64Bit() && !IsFixed && !STI.isCallingConvWin64(CallConv)) { // From AMD64 ABI document: // For calls that may call functions that use varargs or stdargs // (prototype-less calls or calls to functions containing ellipsis (...) in // the declaration) %al is used as hidden argument to specify the number // of SSE registers used. The contents of %al do not need to match exactly // the number of registers, but must be an ubound on the number of SSE // registers used and is in the range 0 - 8 inclusive. MIRBuilder.buildInstr(X86::MOV8ri) .addDef(X86::AL) .addImm(Handler.getNumXmmRegs()); MIB.addUse(X86::AL, RegState::Implicit); } // Now we can add the actual call instruction to the correct basic block. MIRBuilder.insertInstr(MIB); // If Callee is a reg, since it is used by a target specific // instruction, it must have a register class matching the // constraint of that instruction. if (Callee.isReg()) MIB->getOperand(0).setReg(constrainOperandRegClass( MF, *TRI, MRI, *MF.getSubtarget().getInstrInfo(), *MF.getSubtarget().getRegBankInfo(), *MIB, MIB->getDesc(), Callee, 0)); // Finally we can copy the returned value back into its virtual-register. In // symmetry with the arguments, the physical register must be an // implicit-define of the call instruction. if (OrigRet.Reg) { SplitArgs.clear(); SmallVector<unsigned, 8> NewRegs; if (!splitToValueTypes(OrigRet, SplitArgs, DL, MRI, [&](ArrayRef<unsigned> Regs) { NewRegs.assign(Regs.begin(), Regs.end()); })) return false; CallReturnHandler Handler(MIRBuilder, MRI, RetCC_X86, MIB); if (!handleAssignments(MIRBuilder, SplitArgs, Handler)) return false; if (!NewRegs.empty()) MIRBuilder.buildMerge(OrigRet.Reg, NewRegs); } CallSeqStart.addImm(Handler.getStackSize()) .addImm(0 /* see getFrameTotalSize */) .addImm(0 /* see getFrameAdjustment */); unsigned AdjStackUp = TII.getCallFrameDestroyOpcode(); MIRBuilder.buildInstr(AdjStackUp) .addImm(Handler.getStackSize()) .addImm(0 /* NumBytesForCalleeToPop */); return true; }