void ExprCompiler::visit(AssignExpr& expr, int dest) { // Compile the value and also make it the result of the expression. compile(expr.value(), dest); // Now assign it to the left-hand side. expr.lvalue()->accept(*this, dest); }
void Resolver::visit(AssignExpr& expr, int dummy) { resolve(expr.value()); expr.lvalue()->accept(*this, dummy); }
/** Given an AST node, check to see if it's safe if we happen to run the code for that node with the execution mask all off. */ static bool lCheckAllOffSafety(ASTNode *node, void *data) { bool *okPtr = (bool *)data; FunctionCallExpr *fce; if ((fce = llvm::dyn_cast<FunctionCallExpr>(node)) != NULL) { if (fce->func == NULL) return false; const Type *type = fce->func->GetType(); const PointerType *pt = CastType<PointerType>(type); if (pt != NULL) type = pt->GetBaseType(); const FunctionType *ftype = CastType<FunctionType>(type); Assert(ftype != NULL); if (ftype->isSafe == false) { *okPtr = false; return false; } } if (llvm::dyn_cast<AssertStmt>(node) != NULL) { // While it's fine to run the assert for varying tests, it's not // desirable to check an assert on a uniform variable if all of the // lanes are off. *okPtr = false; return false; } if (llvm::dyn_cast<PrintStmt>(node) != NULL) { *okPtr = false; return false; } if (llvm::dyn_cast<NewExpr>(node) != NULL || llvm::dyn_cast<DeleteStmt>(node) != NULL) { // We definitely don't want to run the uniform variants of these if // the mask is all off. It's also worth skipping the overhead of // executing the varying versions of them in the all-off mask case. *okPtr = false; return false; } if (llvm::dyn_cast<ForeachStmt>(node) != NULL || llvm::dyn_cast<ForeachActiveStmt>(node) != NULL || llvm::dyn_cast<ForeachUniqueStmt>(node) != NULL || llvm::dyn_cast<UnmaskedStmt>(node) != NULL) { // The various foreach statements also shouldn't be run with an // all-off mask. Since they can re-establish an 'all on' mask, // this would be pretty unintuitive. (More generally, it's // possibly a little strange to allow foreach in the presence of // any non-uniform control flow...) // // Similarly, the implementation of foreach_unique assumes as a // precondition that the mask won't be all off going into it, so // we'll enforce that here... *okPtr = false; return false; } if (llvm::dyn_cast<BinaryExpr>(node) != NULL) { BinaryExpr* binaryExpr = llvm::dyn_cast<BinaryExpr>(node); if (binaryExpr->op == BinaryExpr::Mod || binaryExpr->op == BinaryExpr::Div) { *okPtr = false; return false; } } IndexExpr *ie; if ((ie = llvm::dyn_cast<IndexExpr>(node)) != NULL && ie->baseExpr != NULL) { const Type *type = ie->baseExpr->GetType(); if (type == NULL) return true; if (CastType<ReferenceType>(type) != NULL) type = type->GetReferenceTarget(); ConstExpr *ce = llvm::dyn_cast<ConstExpr>(ie->index); if (ce == NULL) { // indexing with a variable... -> not safe *okPtr = false; return false; } const PointerType *pointerType = CastType<PointerType>(type); if (pointerType != NULL) { // pointer[index] -> can't be sure -> not safe *okPtr = false; return false; } const SequentialType *seqType = CastType<SequentialType>(type); Assert(seqType != NULL); int nElements = seqType->GetElementCount(); if (nElements == 0) { // Unsized array, so we can't be sure -> not safe *okPtr = false; return false; } int32_t indices[ISPC_MAX_NVEC]; int count = ce->GetValues(indices); for (int i = 0; i < count; ++i) { if (indices[i] < 0 || indices[i] >= nElements) { // Index is out of bounds -> not safe *okPtr = false; return false; } } // All indices are in-bounds return true; } MemberExpr *me; if ((me = llvm::dyn_cast<MemberExpr>(node)) != NULL && me->dereferenceExpr) { *okPtr = false; return false; } if (llvm::dyn_cast<PtrDerefExpr>(node) != NULL) { *okPtr = false; return false; } /* Don't allow turning if/else to straight-line-code if we assign to a uniform. */ AssignExpr *ae; if ((ae = llvm::dyn_cast<AssignExpr>(node)) != NULL) { if (ae->GetType()) { if (ae->GetType()->IsUniformType()) { *okPtr = false; return false; } } } return true; }