BlockStmt* ForLoop::copyBody(SymbolMap* map) { BlockStmt* retval = new BlockStmt(); retval->astloc = astloc; retval->blockTag = blockTag; for_alist(expr, body) retval->insertAtTail(expr->copy(map, true)); update_symbols(retval, map); return retval; }
BlockStmt* ForLoop::buildForLoop(Expr* indices, Expr* iteratorExpr, BlockStmt* body, bool coforall, bool zippered) { VarSymbol* index = newTemp("_indexOfInterest"); VarSymbol* iterator = newTemp("_iterator"); CallExpr* iterInit = 0; CallExpr* iterMove = 0; ForLoop* loop = new ForLoop(index, iterator, body, zippered); LabelSymbol* continueLabel = new LabelSymbol("_continueLabel"); LabelSymbol* breakLabel = new LabelSymbol("_breakLabel"); BlockStmt* retval = new BlockStmt(); iterator->addFlag(FLAG_EXPR_TEMP); // Unzippered loop, treat all objects (including tuples) the same if (zippered == false) iterInit = new CallExpr(PRIM_MOVE, iterator, new CallExpr("_getIterator", iteratorExpr)); // Expand tuple to a tuple containing appropriate iterators for each value. else iterInit = new CallExpr(PRIM_MOVE, iterator, new CallExpr("_getIteratorZip", iteratorExpr)); // try to optimize anonymous range iteration, replaces iterExpr in place optimizeAnonymousRangeIteration(iteratorExpr, zippered); index->addFlag(FLAG_INDEX_OF_INTEREST); iterMove = new CallExpr(PRIM_MOVE, index, new CallExpr("iteratorIndex", iterator)); if (indices == 0) indices = new UnresolvedSymExpr("chpl__elidedIdx"); checkIndices(indices); destructureIndices(loop, indices, new SymExpr(index), coforall); if (coforall) index->addFlag(FLAG_COFORALL_INDEX_VAR); loop->mContinueLabel = continueLabel; loop->mBreakLabel = breakLabel; loop->insertAtTail(new DefExpr(continueLabel)); retval->insertAtTail(new DefExpr(index)); retval->insertAtTail(new DefExpr(iterator)); retval->insertAtTail(iterInit); retval->insertAtTail(new BlockStmt(iterMove, BLOCK_TYPE)); retval->insertAtTail(loop); retval->insertAtTail(new DefExpr(breakLabel)); retval->insertAtTail(new CallExpr("_freeIterator", iterator)); return retval; }
static void addCloneOfDeinitBlock(Expr* aFini, SymbolMap& map, ShadowVarSymbol* svar) { BlockStmt* copyDB = svar->deinitBlock()->copy(&map); aFini->insertAfter(copyDB); // Let's drop the BlockStmt wrapper, to simplify the AST. copyDB->flattenAndRemove(); }
BlockStmt* ForLoop::buildForLoop(Expr* indices, Expr* iteratorExpr, BlockStmt* body, bool coforall, bool zippered) { VarSymbol* index = newTemp("_indexOfInterest"); VarSymbol* iterator = newTemp("_iterator"); CallExpr* iterInit = 0; CallExpr* iterMove = 0; ForLoop* loop = new ForLoop(index, iterator, body, zippered); LabelSymbol* continueLabel = new LabelSymbol("_continueLabel"); LabelSymbol* breakLabel = new LabelSymbol("_breakLabel"); BlockStmt* retval = new BlockStmt(); iterator->addFlag(FLAG_EXPR_TEMP); // Unzippered loop, treat all objects (including tuples) the same if (zippered == false) { iterInit = new CallExpr(PRIM_MOVE, iterator, new CallExpr("_getIterator", iteratorExpr)); // try to optimize anonymous range iteration tryToReplaceWithDirectRangeIterator(iteratorExpr); } // Zippered loop: Expand args to a tuple with an iterator for each element. else { CallExpr* zipExpr = toCallExpr(iteratorExpr); if (zipExpr && zipExpr->isPrimitive(PRIM_ZIP)) { // The PRIM_ZIP indicates this is a new-style zip() AST. // Expand arguments to a tuple with appropriate iterators for each value. // // Specifically, change: // zip(a, b, c, ...) // into the tuple: // (_getIterator(a), _getIterator(b), _getIterator(c), ...) // // (ultimately, we will probably want to make this style of // rewrite into a utility function for the other get*Zip // functions as we convert parallel loops over to use PRIM_ZIP). // zipExpr->primitive = NULL; // remove the primitive // If there's just one argument... if (zipExpr->argList.length == 1) { Expr* zipArg = zipExpr->argList.only(); CallExpr* zipArgCall = toCallExpr(zipArg); // ...and it is a tuple expansion '(...t)' then remove the // tuple expansion primitive and simply pass the tuple itself // to _getIteratorZip(). This will not require any more // tuples than the user introduced themselves. // if (zipArgCall && zipArgCall->isPrimitive(PRIM_TUPLE_EXPAND)) { zipExpr->baseExpr = new UnresolvedSymExpr("_getIteratorZip"); Expr* tupleArg = zipArgCall->argList.only(); tupleArg->remove(); zipArgCall->replace(tupleArg); } else { // ...otherwise, make the expression into a _getIterator() // call zipExpr->baseExpr = new UnresolvedSymExpr("_getIterator"); // try to optimize anonymous range iteration tryToReplaceWithDirectRangeIterator(zipArg); } } else { // // Otherwise, if there's more than one argument, build up the // tuple by applying _getIterator() to each element. // zipExpr->baseExpr = new UnresolvedSymExpr("_build_tuple"); Expr* arg = zipExpr->argList.first(); while (arg) { Expr* next = arg->next; Expr* argCopy = arg->copy(); arg->replace(new CallExpr("_getIterator", argCopy)); // try to optimize anonymous range iteration tryToReplaceWithDirectRangeIterator(argCopy); arg = next; } } iterInit = new CallExpr(PRIM_MOVE, iterator, zipExpr); assert(zipExpr == iteratorExpr); } else { // // This is an old-style zippered loop so handle it in the old style // iterInit = new CallExpr(PRIM_MOVE, iterator, new CallExpr("_getIteratorZip", iteratorExpr)); // try to optimize anonymous range iteration if (CallExpr* call = toCallExpr(iteratorExpr)) if (call->isNamed("_build_tuple")) for_actuals(actual, call) tryToReplaceWithDirectRangeIterator(actual); } } index->addFlag(FLAG_INDEX_OF_INTEREST); iterMove = new CallExpr(PRIM_MOVE, index, new CallExpr("iteratorIndex", iterator)); if (indices == 0) indices = new UnresolvedSymExpr("chpl__elidedIdx"); checkIndices(indices); destructureIndices(loop, indices, new SymExpr(index), coforall); if (coforall) index->addFlag(FLAG_COFORALL_INDEX_VAR); loop->mContinueLabel = continueLabel; loop->mBreakLabel = breakLabel; loop->insertAtTail(new DefExpr(continueLabel)); retval->insertAtTail(new DefExpr(index)); retval->insertAtTail(new DefExpr(iterator)); retval->insertAtTail(iterInit); retval->insertAtTail(new BlockStmt(iterMove, BLOCK_TYPE)); retval->insertAtTail(loop); retval->insertAtTail(new DefExpr(breakLabel)); retval->insertAtTail(new CallExpr("_freeIterator", iterator)); return retval; }
BlockStmt* ParamForLoop::buildParamForLoop(VarSymbol* indexVar, Expr* range, BlockStmt* stmts) { VarSymbol* lowVar = newParamVar(); VarSymbol* highVar = newParamVar(); VarSymbol* strideVar = newParamVar(); LabelSymbol* breakLabel = new LabelSymbol("_breakLabel"); LabelSymbol* continueLabel = new LabelSymbol("_unused_continueLabel"); CallExpr* call = toCallExpr(range); Expr* low = NULL; Expr* high = NULL; Expr* stride = NULL; BlockStmt* outer = new BlockStmt(); if (call && call->isNamed("chpl_by")) { stride = call->get(2)->remove(); call = toCallExpr(call->get(1)); } else { stride = new SymExpr(new_IntSymbol(1)); } if (call && call->isNamed("chpl_build_bounded_range")) { low = call->get(1)->remove(); high = call->get(1)->remove(); } else { USR_FATAL(range, "iterators for param-for-loops must be bounded literal ranges"); } outer->insertAtTail(new DefExpr(indexVar, new_IntSymbol((int64_t) 0))); outer->insertAtTail(new DefExpr(lowVar)); outer->insertAtTail(new CallExpr(PRIM_MOVE, lowVar, low)); outer->insertAtTail(new DefExpr(highVar)); outer->insertAtTail(new CallExpr(PRIM_MOVE, highVar, high)); outer->insertAtTail(new DefExpr(strideVar)); outer->insertAtTail(new CallExpr(PRIM_MOVE, strideVar, stride)); outer->insertAtTail(new ParamForLoop(indexVar, lowVar, highVar, strideVar, continueLabel, breakLabel, stmts)); // this continueLabel will be replaced by a per-iteration one. outer->insertAtTail(new DefExpr(continueLabel)); outer->insertAtTail(new DefExpr(breakLabel)); return buildChapelStmt(outer); }
void AutoDestroyScope::variablesDestroy(Expr* refStmt, VarSymbol* excludeVar, std::set<VarSymbol*>* ignored) const { // Handle the primary locals if (mLocalsHandled == false) { Expr* insertBeforeStmt = refStmt; Expr* noop = NULL; size_t count = mLocalsAndDefers.size(); // If this is a simple nested block, insert after the final stmt // But always insert the destruction calls in reverse declaration order. // Do not get tricked by sequences of unreachable code if (refStmt->next == NULL) { if (mParent != NULL && isGotoStmt(refStmt) == false) { SET_LINENO(refStmt); // Add a PRIM_NOOP to insert before noop = new CallExpr(PRIM_NOOP); refStmt->insertAfter(noop); insertBeforeStmt = noop; } } for (size_t i = 1; i <= count; i++) { BaseAST* localOrDefer = mLocalsAndDefers[count - i]; VarSymbol* var = toVarSymbol(localOrDefer); DeferStmt* defer = toDeferStmt(localOrDefer); // This code only handles VarSymbols and DeferStmts. // It handles both in one vector because the order // of interleaving matters. INT_ASSERT(var || defer); if (var != NULL && var != excludeVar && (ignored == NULL || ignored->count(var) == 0)) { if (FnSymbol* autoDestroyFn = autoDestroyMap.get(var->type)) { SET_LINENO(var); INT_ASSERT(autoDestroyFn->hasFlag(FLAG_AUTO_DESTROY_FN)); CallExpr* autoDestroy = new CallExpr(autoDestroyFn, var); insertBeforeStmt->insertBefore(autoDestroy); } } if (defer != NULL) { SET_LINENO(defer); BlockStmt* deferBlockCopy = defer->body()->copy(); insertBeforeStmt->insertBefore(deferBlockCopy); deferBlockCopy->flattenAndRemove(); } } // remove the PRIM_NOOP if we added one. if (noop != NULL) noop->remove(); } // Handle the formal temps if (isReturnStmt(refStmt) == true) { size_t count = mFormalTemps.size(); for (size_t i = 1; i <= count; i++) { VarSymbol* var = mFormalTemps[count - i]; if (FnSymbol* autoDestroyFn = autoDestroyMap.get(var->type)) { SET_LINENO(var); refStmt->insertBefore(new CallExpr(autoDestroyFn, var)); } } } }
void CallStmt::ExpandFunction(FunctionDef *func, Fragment *fragment) { size_t argCount = func->GetArgCount(); // check number of parameters if (argCount != fParams.size()) { Error(kErr_ParamCount).Raise(&fLocation); return; } /* statement should look like this: * * CallStmt * | * InlineStmt * | * ScopeStmt * | * BlockStmt * / | \ * DeclareStmt... body of function */ BlockStmt *block = new BlockStmt(); SetBody( new InlineStmt(new ScopeStmt(block), func)); Mapping mapping; for(size_t i=0; i<argCount; i++) { const Expr* arg = fParams[i]; int var = func->GetArgVar(i); int val; switch(func->GetArgType(i)) { case FunctionDef::kConstantArg: if (!arg->Evaluate(val)) { Error(kErr_ParamType, "constant").Raise(&arg->GetLoc()); return; } mapping.Add(var, new AtomExpr(kRCX_ConstantType, val, fLocation)); break; case FunctionDef::kIntegerArg: val = gProgram->NextVirtualVar(); mapping.Add(var, new AtomExpr(kRCX_VariableType, val, fLocation)); { DeclareStmt *ds = new DeclareStmt(func->GetArgName(i), val, fLocation, 1, false, true); ds->SetInitialValue(arg->Clone(0)); block->Add(ds); } break; case FunctionDef::kReferenceArg: val = arg->GetLValue(); if (val == kIllegalVar) { Error(kErr_ParamType, "variable").Raise(&arg->GetLoc()); return; } mapping.Add(var, new AtomExpr(kRCX_VariableType, val, fLocation)); break; case FunctionDef::kConstRefArg: mapping.Add(var, arg->Clone(0)); break; case FunctionDef::kSensorArg: if (RCX_VALUE_TYPE(arg->GetStaticEA()) != kRCX_InputValueType) { Error(kErr_ParamType, "sensor").Raise(&arg->GetLoc()); return; } mapping.Add(var, arg->Clone(0)); break; case FunctionDef::kPointerArg: if (!arg->LValueIsPointer()) { Error(kErr_ParamType, "pointer").Raise(&arg->GetLoc()); return; } mapping.Add(var, arg->Clone(0)); break; case FunctionDef::kConstPtrArg: if (!arg->LValueIsPointer()) { Error(kErr_ParamType, "pointer").Raise(&arg->GetLoc()); return; } val = gProgram->NextVirtualVar(); { DeclareStmt *ds = new DeclareStmt(func->GetArgName(i), val, fLocation, 1, true, true); ds->SetInitialValue(arg->Clone(0)); block->Add(ds); } mapping.Add(var, new AtomExpr(kRCX_VariableType, val, fLocation, true)); break; default: Error(kErr_ParamType, "???").Raise(&fParams[i]->GetLoc()); return; } } // add body of inline and then expand block->Add(func->GetBody()->Clone(&mapping)); Expander e(fragment); Apply(GetBody(), e); }