Ejemplo n.º 1
0
void
ion::AssertGraphCoherency(MIRGraph &graph)
{
#ifdef DEBUG
    // Assert successor and predecessor list coherency.
    uint32_t count = 0;
    for (MBasicBlockIterator block(graph.begin()); block != graph.end(); block++) {
        count++;

        for (size_t i = 0; i < block->numSuccessors(); i++)
            JS_ASSERT(CheckSuccessorImpliesPredecessor(*block, block->getSuccessor(i)));

        for (size_t i = 0; i < block->numPredecessors(); i++)
            JS_ASSERT(CheckPredecessorImpliesSuccessor(*block, block->getPredecessor(i)));

        for (MInstructionIterator ins = block->begin(); ins != block->end(); ins++) {
            for (uint32_t i = 0; i < ins->numOperands(); i++)
                JS_ASSERT(CheckMarkedAsUse(*ins, ins->getOperand(i)));
        }
    }

    JS_ASSERT(graph.numBlocks() == count);

    AssertReversePostOrder(graph);
#endif
}
Ejemplo n.º 2
0
bool
MIRGenerator::usesSimd()
{
    if (usesSimdCached_)
        return usesSimd_;

    usesSimdCached_ = true;
    for (ReversePostorderIterator block = graph_->rpoBegin(),
                                  end   = graph_->rpoEnd();
         block != end;
         block++)
    {
        // It's fine to use MInstructionIterator here because we don't have to
        // worry about Phis, since any reachable phi (or phi cycle) will have at
        // least one instruction as an input.
        for (MInstructionIterator inst = block->begin(); inst != block->end(); inst++) {
            // Instructions that have SIMD inputs but not a SIMD type are fine
            // to ignore, as their inputs are also reached at some point. By
            // induction, at least one instruction with a SIMD type is reached
            // at some point.
            if (IsSimdType(inst->type())) {
                MOZ_ASSERT(SupportsSimd);
                usesSimd_ = true;
                return true;
            }
        }
    }
    usesSimd_ = false;
    return false;
}
Ejemplo n.º 3
0
MInstructionIterator
MBasicBlock::discardAt(MInstructionIterator &iter)
{
    for (size_t i = 0; i < iter->numOperands(); i++)
        iter->replaceOperand(i, NULL);

    return instructions_.removeAt(iter);
}
Ejemplo n.º 4
0
MInstructionIterator
MBasicBlock::discardAt(MInstructionIterator &iter)
{
    AssertSafelyDiscardable(*iter);
    for (size_t i = 0; i < iter->numOperands(); i++)
        iter->discardOperand(i);

    return instructions_.removeAt(iter);
}
Ejemplo n.º 5
0
void
MBasicBlock::discardAllInstructionsStartingAt(MInstructionIterator &iter)
{
    while (iter != end()) {
        for (size_t i = 0, e = iter->numOperands(); i < e; i++)
            iter->discardOperand(i);
        iter = instructions_.removeAt(iter);
    }
}
Ejemplo n.º 6
0
void
MBasicBlock::discardAllInstructions()
{
    for (MInstructionIterator iter = begin(); iter != end(); ) {
        for (size_t i = 0, e = iter->numOperands(); i < e; i++)
            iter->discardOperand(i);
        iter = instructions_.removeAt(iter);
    }
    lastIns_ = NULL;
}
// This analysis converts patterns of the form:
//   truncate(x + (y << {0,1,2,3}))
//   truncate(x + (y << {0,1,2,3}) + imm32)
// into a single lea instruction, and patterns of the form:
//   asmload(x + imm32)
//   asmload(x << {0,1,2,3})
//   asmload((x << {0,1,2,3}) + imm32)
//   asmload((x << {0,1,2,3}) & mask)            (where mask is redundant with shift)
//   asmload(((x << {0,1,2,3}) + imm32) & mask)  (where mask is redundant with shift + imm32)
// into a single asmload instruction (and for asmstore too).
//
// Additionally, we should consider the general forms:
//   truncate(x + y + imm32)
//   truncate((y << {0,1,2,3}) + imm32)
bool
EffectiveAddressAnalysis::analyze()
{
    for (ReversePostorderIterator block(graph_.rpoBegin()); block != graph_.rpoEnd(); block++) {
        for (MInstructionIterator i = block->begin(); i != block->end(); i++) {
            if (i->isLsh())
                AnalyzeLsh(graph_.alloc(), i->toLsh());
        }
    }
    return true;
}
Ejemplo n.º 8
0
HashNumber
MBasicBlock::BackupPoint::computeInstructionsCheckSum(MBasicBlock* block)
{
    HashNumber h = 0;
    MOZ_ASSERT_IF(lastIns_, lastIns_->block() == block);
    for (MInstructionIterator ins = block->begin(); ins != block->end(); ++ins) {
        h += ins->valueHash();
        h += h << 10;
        h ^= h >> 6;
    }
    return h;
}
bool
AlignmentMaskAnalysis::analyze()
{
    for (ReversePostorderIterator block(graph_.rpoBegin()); block != graph_.rpoEnd(); block++) {
        for (MInstructionIterator i = block->begin(); i != block->end(); i++) {
            if (!graph_.alloc().ensureBallast())
                return false;

            // Note that we don't check for MAsmJSCompareExchangeHeap
            // or MAsmJSAtomicBinopHeap, because the backend and the OOB
            // mechanism don't support non-zero offsets for them yet.
            if (i->isAsmJSLoadHeap())
                AnalyzeAsmHeapAddress(i->toAsmJSLoadHeap()->base(), graph_);
            else if (i->isAsmJSStoreHeap())
                AnalyzeAsmHeapAddress(i->toAsmJSStoreHeap()->base(), graph_);
        }
    }
    return true;
}
Ejemplo n.º 10
0
MInstruction*
MBasicBlock::safeInsertTop(MDefinition* ins, IgnoreTop ignore)
{
    // Beta nodes and interrupt checks are required to be located at the
    // beginnings of basic blocks, so we must insert new instructions after any
    // such instructions.
    MInstructionIterator insertIter = !ins || ins->isPhi()
                                    ? begin()
                                    : begin(ins->toInstruction());
    while (insertIter->isBeta() ||
           insertIter->isInterruptCheck() ||
           insertIter->isConstant() ||
           (!(ignore & IgnoreRecover) && insertIter->isRecoveredOnBailout()))
    {
        insertIter++;
    }

    return *insertIter;
}
bool
FoldLinearArithConstants(MIRGenerator* mir, MIRGraph& graph)
{
    for (PostorderIterator block(graph.poBegin()); block != graph.poEnd(); block++) {
        if (mir->shouldCancel("Fold Linear Arithmetic Constants (main loop)"))
            return false;

        for (MInstructionIterator i = block->begin(); i != block->end(); i++) {
            if (!graph.alloc().ensureBallast())
                return false;

            if (mir->shouldCancel("Fold Linear Arithmetic Constants (inner loop)"))
                return false;

            if (i->isAdd())
                AnalyzeAdd(graph.alloc(), i->toAdd());
        }
    }
    return true;
}
// This analysis converts patterns of the form:
//   truncate(x + (y << {0,1,2,3}))
//   truncate(x + (y << {0,1,2,3}) + imm32)
// into a single lea instruction, and patterns of the form:
//   asmload(x + imm32)
//   asmload(x << {0,1,2,3})
//   asmload((x << {0,1,2,3}) + imm32)
//   asmload((x << {0,1,2,3}) & mask)            (where mask is redundant with shift)
//   asmload(((x << {0,1,2,3}) + imm32) & mask)  (where mask is redundant with shift + imm32)
// into a single asmload instruction (and for asmstore too).
//
// Additionally, we should consider the general forms:
//   truncate(x + y + imm32)
//   truncate((y << {0,1,2,3}) + imm32)
bool
EffectiveAddressAnalysis::analyze()
{
    for (ReversePostorderIterator block(graph_.rpoBegin()); block != graph_.rpoEnd(); block++) {
        for (MInstructionIterator i = block->begin(); i != block->end(); i++) {
            if (!graph_.alloc().ensureBallast())
                return false;

            // Note that we don't check for MAsmJSCompareExchangeHeap
            // or MAsmJSAtomicBinopHeap, because the backend and the OOB
            // mechanism don't support non-zero offsets for them yet
            // (TODO bug 1254935).
            if (i->isLsh())
                AnalyzeLsh(graph_.alloc(), i->toLsh());
            else if (i->isLoadUnboxedScalar())
                AnalyzeLoadUnboxedScalar(graph_.alloc(), i->toLoadUnboxedScalar());
            else if (i->isAsmJSLoadHeap())
                analyzeAsmJSHeapAccess(i->toAsmJSLoadHeap());
            else if (i->isAsmJSStoreHeap())
                analyzeAsmJSHeapAccess(i->toAsmJSStoreHeap());
        }
    }
    return true;
}
Ejemplo n.º 13
0
MInstruction*
MBasicBlock::safeInsertTop(MDefinition* ins, IgnoreTop ignore)
{
    MOZ_ASSERT(graph().osrBlock() != this,
               "We are not supposed to add any instruction in OSR blocks.");

    // Beta nodes and interrupt checks are required to be located at the
    // beginnings of basic blocks, so we must insert new instructions after any
    // such instructions.
    MInstructionIterator insertIter = !ins || ins->isPhi()
                                    ? begin()
                                    : begin(ins->toInstruction());
    while (insertIter->isBeta() ||
           insertIter->isInterruptCheck() ||
           insertIter->isConstant() ||
           insertIter->isParameter() ||
           (!(ignore & IgnoreRecover) && insertIter->isRecoveredOnBailout()))
    {
        insertIter++;
    }

    return *insertIter;
}
Ejemplo n.º 14
0
bool
jit::ReorderInstructions(MIRGraph& graph)
{
    // Renumber all instructions in the graph as we go.
    size_t nextId = 0;

    // List of the headers of any loops we are in.
    Vector<MBasicBlock*, 4, SystemAllocPolicy> loopHeaders;

    for (ReversePostorderIterator block(graph.rpoBegin()); block != graph.rpoEnd(); block++) {
        // Renumber all definitions inside the basic blocks.
        for (MPhiIterator iter(block->phisBegin()); iter != block->phisEnd(); iter++)
            iter->setId(nextId++);

        for (MInstructionIterator iter(block->begin()); iter != block->end(); iter++)
            iter->setId(nextId++);

        // Don't reorder instructions within entry blocks, which have special requirements.
        if (*block == graph.entryBlock() || *block == graph.osrBlock())
            continue;

        if (block->isLoopHeader()) {
            if (!loopHeaders.append(*block))
                return false;
        }

        MBasicBlock* innerLoop = loopHeaders.empty() ? nullptr : loopHeaders.back();

        MInstruction* top = block->safeInsertTop();
        MInstructionReverseIterator rtop = ++block->rbegin(top);
        for (MInstructionIterator iter(block->begin(top)); iter != block->end(); ) {
            MInstruction* ins = *iter;

            // Filter out some instructions which are never reordered.
            if (ins->isEffectful() ||
                !ins->isMovable() ||
                ins->resumePoint() ||
                ins == block->lastIns())
            {
                iter++;
                continue;
            }

            // Move constants with a single use in the current block to the
            // start of the block. Constants won't be reordered by the logic
            // below, as they have no inputs. Moving them up as high as
            // possible can allow their use to be moved up further, though,
            // and has no cost if the constant is emitted at its use.
            if (ins->isConstant() &&
                ins->hasOneUse() &&
                ins->usesBegin()->consumer()->block() == *block &&
                !IsFloatingPointType(ins->type()))
            {
                iter++;
                MInstructionIterator targetIter = block->begin();
                while (targetIter->isConstant() || targetIter->isInterruptCheck()) {
                    if (*targetIter == ins)
                        break;
                    targetIter++;
                }
                MoveBefore(*block, *targetIter, ins);
                continue;
            }

            // Look for inputs where this instruction is the last use of that
            // input. If we move this instruction up, the input's lifetime will
            // be shortened, modulo resume point uses (which don't need to be
            // stored in a register, and can be handled by the register
            // allocator by just spilling at some point with no reload).
            Vector<MDefinition*, 4, SystemAllocPolicy> lastUsedInputs;
            for (size_t i = 0; i < ins->numOperands(); i++) {
                MDefinition* input = ins->getOperand(i);
                if (!input->isConstant() && IsLastUse(ins, input, innerLoop)) {
                    if (!lastUsedInputs.append(input))
                        return false;
                }
            }

            // Don't try to move instructions which aren't the last use of any
            // of their inputs (we really ought to move these down instead).
            if (lastUsedInputs.length() < 2) {
                iter++;
                continue;
            }

            MInstruction* target = ins;
            for (MInstructionReverseIterator riter = ++block->rbegin(ins); riter != rtop; riter++) {
                MInstruction* prev = *riter;
                if (prev->isInterruptCheck())
                    break;

                // The instruction can't be moved before any of its uses.
                bool isUse = false;
                for (size_t i = 0; i < ins->numOperands(); i++) {
                    if (ins->getOperand(i) == prev) {
                        isUse = true;
                        break;
                    }
                }
                if (isUse)
                    break;

                // The instruction can't be moved before an instruction that
                // stores to a location read by the instruction.
                if (prev->isEffectful() &&
                    (ins->getAliasSet().flags() & prev->getAliasSet().flags()) &&
                    ins->mightAlias(prev) != MDefinition::AliasType::NoAlias)
                {
                    break;
                }

                // Make sure the instruction will still be the last use of one
                // of its inputs when moved up this far.
                for (size_t i = 0; i < lastUsedInputs.length(); ) {
                    bool found = false;
                    for (size_t j = 0; j < prev->numOperands(); j++) {
                        if (prev->getOperand(j) == lastUsedInputs[i]) {
                            found = true;
                            break;
                        }
                    }
                    if (found) {
                        lastUsedInputs[i] = lastUsedInputs.back();
                        lastUsedInputs.popBack();
                    } else {
                        i++;
                    }
                }
                if (lastUsedInputs.length() < 2)
                    break;

                // We can move the instruction before this one.
                target = prev;
            }

            iter++;
            MoveBefore(*block, target, ins);
        }

        if (block->isLoopBackedge())
            loopHeaders.popBack();
    }

    return true;
}
Ejemplo n.º 15
0
// Operands to a resume point which are dead at the point of the resume can be
// replaced with undefined values. This analysis supports limited detection of
// dead operands, pruning those which are defined in the resume point's basic
// block and have no uses outside the block or at points later than the resume
// point.
//
// This is intended to ensure that extra resume points within a basic block
// will not artificially extend the lifetimes of any SSA values. This could
// otherwise occur if the new resume point captured a value which is created
// between the old and new resume point and is dead at the new resume point.
bool
ion::EliminateDeadResumePointOperands(MIRGenerator *mir, MIRGraph &graph)
{
    for (PostorderIterator block = graph.poBegin(); block != graph.poEnd(); block++) {
        if (mir->shouldCancel("Eliminate Dead Resume Point Operands (main loop)"))
            return false;

        // The logic below can get confused on infinite loops.
        if (block->isLoopHeader() && block->backedge() == *block)
            continue;

        for (MInstructionIterator ins = block->begin(); ins != block->end(); ins++) {
            // No benefit to replacing constant operands with other constants.
            if (ins->isConstant())
                continue;

            // Scanning uses does not give us sufficient information to tell
            // where instructions that are involved in box/unbox operations or
            // parameter passing might be live. Rewriting uses of these terms
            // in resume points may affect the interpreter's behavior. Rather
            // than doing a more sophisticated analysis, just ignore these.
            if (ins->isUnbox() || ins->isParameter())
                continue;

            // If the instruction's behavior has been constant folded into a
            // separate instruction, we can't determine precisely where the
            // instruction becomes dead and can't eliminate its uses.
            if (ins->isFolded())
                continue;

            // Check if this instruction's result is only used within the
            // current block, and keep track of its last use in a definition
            // (not resume point). This requires the instructions in the block
            // to be numbered, ensured by running this immediately after alias
            // analysis.
            uint32_t maxDefinition = 0;
            for (MUseDefIterator uses(*ins); uses; uses++) {
                if (uses.def()->block() != *block ||
                    uses.def()->isBox() ||
                    uses.def()->isPassArg() ||
                    uses.def()->isPhi())
                {
                    maxDefinition = UINT32_MAX;
                    break;
                }
                maxDefinition = Max(maxDefinition, uses.def()->id());
            }
            if (maxDefinition == UINT32_MAX)
                continue;

            // Walk the uses a second time, removing any in resume points after
            // the last use in a definition.
            for (MUseIterator uses(ins->usesBegin()); uses != ins->usesEnd(); ) {
                if (uses->node()->isDefinition()) {
                    uses++;
                    continue;
                }
                MResumePoint *mrp = uses->node()->toResumePoint();
                if (mrp->block() != *block ||
                    !mrp->instruction() ||
                    mrp->instruction() == *ins ||
                    mrp->instruction()->id() <= maxDefinition)
                {
                    uses++;
                    continue;
                }

                // Store an undefined value in place of all dead resume point
                // operands. Making any such substitution can in general alter
                // the interpreter's behavior, even though the code is dead, as
                // the interpreter will still execute opcodes whose effects
                // cannot be observed. If the undefined value were to flow to,
                // say, a dead property access the interpreter could throw an
                // exception; we avoid this problem by removing dead operands
                // before removing dead code.
                MConstant *constant = MConstant::New(UndefinedValue());
                block->insertBefore(*(block->begin()), constant);
                uses = mrp->replaceOperand(uses, constant);
            }
        }
    }

    return true;
}