Esempio n. 1
0
ArenaCellSet*
js::gc::AllocateWholeCellSet(Arena* arena)
{
    Zone* zone = arena->zone;
    JSRuntime* rt = zone->runtimeFromMainThread();
    if (!rt->gc.nursery.isEnabled())
        return nullptr;

    AutoEnterOOMUnsafeRegion oomUnsafe;
    Nursery& nursery = rt->gc.nursery;
    void* data = nursery.allocateBuffer(zone, sizeof(ArenaCellSet));
    if (!data) {
        oomUnsafe.crash("Failed to allocate WholeCellSet");
        return nullptr;
    }

    if (nursery.freeSpace() < ArenaCellSet::NurseryFreeThresholdBytes)
        rt->gc.storeBuffer.setAboutToOverflow();

    auto cells = static_cast<ArenaCellSet*>(data);
    new (cells) ArenaCellSet(arena);
    arena->bufferedCells = cells;
    rt->gc.storeBuffer.addToWholeCellBuffer(cells);
    return cells;
}
/*
 * Move an IonBuilder for which compilation has either finished, failed, or
 * been cancelled into the global finished compilation list. All off thread
 * compilations which are started must eventually be finished.
 */
static void
FinishOffThreadIonCompile(jit::IonBuilder* builder)
{
    AutoEnterOOMUnsafeRegion oomUnsafe;
    if (!HelperThreadState().ionFinishedList().append(builder))
        oomUnsafe.crash("FinishOffThreadIonCompile");
}
Esempio n. 3
0
/* static */ size_t
ArgumentsObject::objectMovedDuringMinorGC(JSTracer* trc, JSObject* dst, JSObject* src)
{
    ArgumentsObject* ndst = &dst->as<ArgumentsObject>();
    ArgumentsObject* nsrc = &src->as<ArgumentsObject>();
    MOZ_ASSERT(ndst->data() == nsrc->data());

    Nursery& nursery = trc->runtime()->gc.nursery;

    if (!nursery.isInside(nsrc->data())) {
        nursery.removeMallocedBuffer(nsrc->data());
        return 0;
    }

    AutoEnterOOMUnsafeRegion oomUnsafe;
    uint32_t nbytes = nsrc->data()->dataBytes;
    uint8_t* data = nsrc->zone()->pod_malloc<uint8_t>(nbytes);
    if (!data)
        oomUnsafe.crash("Failed to allocate ArgumentsObject data while tenuring.");
    ndst->initFixedSlot(DATA_SLOT, PrivateValue(data));

    mozilla::PodCopy(data, reinterpret_cast<uint8_t*>(nsrc->data()), nbytes);

    ArgumentsData* dstData = ndst->data();
    dstData->deletedBits = reinterpret_cast<size_t*>(dstData->args + dstData->numArgs);

    return nbytes;
}
Esempio n. 4
0
jit::JitRuntime*
JSRuntime::createJitRuntime(JSContext* cx)
{
    // The shared stubs are created in the atoms compartment, which may be
    // accessed by other threads with an exclusive context.
    AutoLockForExclusiveAccess atomsLock(cx);

    MOZ_ASSERT(!jitRuntime_);

    jit::JitRuntime* jrt = cx->new_<jit::JitRuntime>(cx->runtime());
    if (!jrt)
        return nullptr;

    // Protect jitRuntime_ from being observed (by InterruptRunningJitCode)
    // while it is being initialized. Unfortunately, initialization depends on
    // jitRuntime_ being non-null, so we can't just wait to assign jitRuntime_.
    JitRuntime::AutoPreventBackedgePatching apbp(cx->runtime(), jrt);
    jitRuntime_ = jrt;

    AutoEnterOOMUnsafeRegion noOOM;
    if (!jitRuntime_->initialize(cx, atomsLock)) {
        // Handling OOM here is complicated: if we delete jitRuntime_ now, we
        // will destroy the ExecutableAllocator, even though there may still be
        // JitCode instances holding references to ExecutablePools.
        noOOM.crash("OOM in createJitRuntime");
    }

    return jitRuntime_;
}
Esempio n. 5
0
js::detail::MutexImpl::MutexImpl()
{
  AutoEnterOOMUnsafeRegion oom;
  platformData_ = js_new<PlatformData>();
  if (!platformData_)
    oom.crash("js::detail::MutexImpl::MutexImpl");

  pthread_mutexattr_t* attrp = nullptr;

#ifdef DEBUG
  pthread_mutexattr_t attr;

  TRY_CALL_PTHREADS(pthread_mutexattr_init(&attr),
                    "js::detail::MutexImpl::MutexImpl: pthread_mutexattr_init failed");

  TRY_CALL_PTHREADS(pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_ERRORCHECK),
                    "js::detail::MutexImpl::MutexImpl: pthread_mutexattr_settype failed");

  attrp = &attr;
#endif

  TRY_CALL_PTHREADS(pthread_mutex_init(&platformData()->ptMutex, attrp),
                    "js::detail::MutexImpl::MutexImpl: pthread_mutex_init failed");

#ifdef DEBUG
  TRY_CALL_PTHREADS(pthread_mutexattr_destroy(&attr),
                    "js::detail::MutexImpl::MutexImpl: pthread_mutexattr_destroy failed");
#endif
}
Esempio n. 6
0
/* static */ bool
ArrayBufferObject::detach(JSContext* cx, Handle<ArrayBufferObject*> buffer,
                          BufferContents newContents)
{
    if (buffer->isWasm()) {
        JS_ReportErrorNumber(cx, GetErrorMessage, nullptr, JSMSG_OUT_OF_MEMORY);
        return false;
    }

    // When detaching buffers where we don't know all views, the new data must
    // match the old data. All missing views are typed objects, which do not
    // expect their data to ever change.
    MOZ_ASSERT_IF(buffer->forInlineTypedObject(),
                  newContents.data() == buffer->dataPointer());

    // When detaching a buffer with typed object views, any jitcode accessing
    // such views must be deoptimized so that detachment checks are performed.
    // This is done by setting a compartment-wide flag indicating that buffers
    // with typed object views have been detached.
    if (buffer->hasTypedObjectViews()) {
        // Make sure the global object's group has been instantiated, so the
        // flag change will be observed.
        AutoEnterOOMUnsafeRegion oomUnsafe;
        if (!cx->global()->getGroup(cx))
            oomUnsafe.crash("ArrayBufferObject::detach");
        MarkObjectGroupFlags(cx, cx->global(), OBJECT_FLAG_TYPED_OBJECT_HAS_DETACHED_BUFFER);
        cx->compartment()->detachedTypedObjects = 1;
    }

    // Update all views of the buffer to account for the buffer having been
    // detached, and clear the buffer's data and list of views.

    if (InnerViewTable::ViewVector* views = cx->compartment()->innerViews.maybeViewsUnbarriered(buffer)) {
        for (size_t i = 0; i < views->length(); i++)
            NoteViewBufferWasDetached((*views)[i], newContents, cx);
        cx->compartment()->innerViews.removeViews(buffer);
    }
    if (buffer->firstView()) {
        if (buffer->forInlineTypedObject()) {
            // The buffer points to inline data in its first view, so to keep
            // this pointer alive we don't clear out the first view.
            MOZ_ASSERT(buffer->firstView()->is<InlineTransparentTypedObject>());
        } else {
            NoteViewBufferWasDetached(buffer->firstView(), newContents, cx);
            buffer->setFirstView(nullptr);
        }
    }

    if (newContents.data() != buffer->dataPointer())
        buffer->setNewOwnedData(cx->runtime()->defaultFreeOp(), newContents);

    buffer->setByteLength(0);
    buffer->setIsDetached();
    return true;
}
Esempio n. 7
0
void
MIRGenerator::addAbortedPreliminaryGroup(ObjectGroup* group)
{
    for (size_t i = 0; i < abortedPreliminaryGroups_.length(); i++) {
        if (group == abortedPreliminaryGroups_[i])
            return;
    }
    AutoEnterOOMUnsafeRegion oomUnsafe;
    if (!abortedPreliminaryGroups_.append(group))
        oomUnsafe.crash("addAbortedPreliminaryGroup");
}
Esempio n. 8
0
/* static */ bool
ArrayBufferObject::neuter(JSContext* cx, Handle<ArrayBufferObject*> buffer,
                          BufferContents newContents)
{
    if (buffer->isAsmJS() && !OnDetachAsmJSArrayBuffer(cx, buffer))
        return false;

    // When neutering buffers where we don't know all views, the new data must
    // match the old data. All missing views are typed objects, which do not
    // expect their data to ever change.
    MOZ_ASSERT_IF(buffer->forInlineTypedObject(),
                  newContents.data() == buffer->dataPointer());

    // When neutering a buffer with typed object views, any jitcode which
    // accesses such views needs to be deoptimized so that neuter checks are
    // performed. This is done by setting a compartment wide flag indicating
    // that buffers with typed object views have been neutered.
    if (buffer->hasTypedObjectViews()) {
        // Make sure the global object's group has been instantiated, so the
        // flag change will be observed.
        AutoEnterOOMUnsafeRegion oomUnsafe;
        if (!cx->global()->getGroup(cx))
            oomUnsafe.crash("ArrayBufferObject::neuter");
        MarkObjectGroupFlags(cx, cx->global(), OBJECT_FLAG_TYPED_OBJECT_NEUTERED);
        cx->compartment()->neuteredTypedObjects = 1;
    }

    // Neuter all views on the buffer, clear out the list of views and the
    // buffer's data.

    if (InnerViewTable::ViewVector* views = cx->compartment()->innerViews.maybeViewsUnbarriered(buffer)) {
        for (size_t i = 0; i < views->length(); i++)
            buffer->neuterView(cx, (*views)[i], newContents);
        cx->compartment()->innerViews.removeViews(buffer);
    }
    if (buffer->firstView()) {
        if (buffer->forInlineTypedObject()) {
            // The buffer points to inline data in its first view, so to keep
            // this pointer alive we don't clear out the first view.
            MOZ_ASSERT(buffer->firstView()->is<InlineTransparentTypedObject>());
        } else {
            buffer->neuterView(cx, buffer->firstView(), newContents);
            buffer->setFirstView(nullptr);
        }
    }

    if (newContents.data() != buffer->dataPointer())
        buffer->setNewOwnedData(cx->runtime()->defaultFreeOp(), newContents);

    buffer->setByteLength(0);
    buffer->setIsNeutered();
    return true;
}
void
HelperThread::handleParseWorkload()
{
    MOZ_ASSERT(HelperThreadState().isLocked());
    MOZ_ASSERT(HelperThreadState().canStartParseTask());
    MOZ_ASSERT(idle());

    currentTask.emplace(HelperThreadState().parseWorklist().popCopy());
    ParseTask* task = parseTask();
    task->cx->setHelperThread(this);

    {
        AutoUnlockHelperThreadState unlock;
        PerThreadData::AutoEnterRuntime enter(threadData.ptr(),
                                              task->exclusiveContextGlobal->runtimeFromAnyThread());
        SourceBufferHolder srcBuf(task->chars, task->length,
                                  SourceBufferHolder::NoOwnership);

        // ! WARNING WARNING WARNING !
        //
        // See comment in Parser::bindLexical about optimizing global lexical
        // bindings. If we start optimizing them, passing in task->cx's
        // global lexical scope would be incorrect!
        //
        // ! WARNING WARNING WARNING !
        ExclusiveContext* parseCx = task->cx;
        Rooted<ClonedBlockObject*> globalLexical(parseCx, &parseCx->global()->lexicalScope());
        Rooted<ScopeObject*> staticScope(parseCx, &globalLexical->staticBlock());
        task->script = frontend::CompileScript(parseCx, &task->alloc,
                                               globalLexical, staticScope, nullptr,
                                               task->options, srcBuf,
                                               /* source_ = */ nullptr,
                                               /* extraSct = */ nullptr,
                                               /* sourceObjectOut = */ &(task->sourceObject));
    }

    // The callback is invoked while we are still off the main thread.
    task->callback(task, task->callbackData);

    // FinishOffThreadScript will need to be called on the script to
    // migrate it into the correct compartment.
    {
        AutoEnterOOMUnsafeRegion oomUnsafe;
        if (!HelperThreadState().parseFinishedList().append(task))
            oomUnsafe.crash("handleParseWorkload");
    }

    currentTask.reset();

    // Notify the main thread in case it is waiting for the parse/emit to finish.
    HelperThreadState().notifyAll(GlobalHelperThreadState::CONSUMER);
}
Esempio n. 10
0
JS::AutoRelinquishZoneGroups::AutoRelinquishZoneGroups(JSContext* cx)
  : cx(cx)
{
    MOZ_ASSERT(cx == TlsContext.get());

    AutoEnterOOMUnsafeRegion oomUnsafe;
    for (ZoneGroupsIter group(cx->runtime()); !group.done(); group.next()) {
        while (group->ownerContext().context() == cx) {
            group->leave();
            if (!enterList.append(group))
                oomUnsafe.crash("AutoRelinquishZoneGroups");
        }
    }
}
Esempio n. 11
0
/* static */ js::Mutex::MutexVector&
js::Mutex::heldMutexStack()
{
  MOZ_ASSERT(js::IsInitialized());
  auto stack = HeldMutexStack.get();
  if (!stack) {
    AutoEnterOOMUnsafeRegion oomUnsafe;
    stack = js_new<MutexVector>();
    if (!stack)
      oomUnsafe.crash("js::Mutex::heldMutexStack");
    HeldMutexStack.set(stack);
  }
  return *stack;
}
Esempio n. 12
0
void
JSCompartment::setNewObjectMetadata(JSContext* cx, HandleObject obj)
{
    assertSameCompartment(cx, this, obj);

    AutoEnterOOMUnsafeRegion oomUnsafe;
    if (JSObject* metadata = allocationMetadataBuilder->build(cx, obj, oomUnsafe)) {
        assertSameCompartment(cx, metadata);
        if (!objectMetadataTable) {
            objectMetadataTable = cx->new_<ObjectWeakMap>(cx);
            if (!objectMetadataTable || !objectMetadataTable->init())
                oomUnsafe.crash("setNewObjectMetadata");
        }
        if (!objectMetadataTable->add(cx, obj, metadata))
            oomUnsafe.crash("setNewObjectMetadata");
    }
}
Esempio n. 13
0
void
JSCompartment::setNewObjectMetadata(JSContext* cx, JSObject* obj)
{
    assertSameCompartment(cx, this, obj);

    if (JSObject* metadata = objectMetadataCallback(cx, obj)) {
        AutoEnterOOMUnsafeRegion oomUnsafe;
        assertSameCompartment(cx, metadata);
        if (!objectMetadataTable) {
            objectMetadataTable = cx->new_<ObjectWeakMap>(cx);
            if (!objectMetadataTable || !objectMetadataTable->init())
                oomUnsafe.crash("setNewObjectMetadata");
        }
        if (!objectMetadataTable->add(cx, obj, metadata))
            oomUnsafe.crash("setNewObjectMetadata");
    }
}
Esempio n. 14
0
/* static */ size_t
ArgumentsObject::objectMoved(JSObject* dst, JSObject* src)
{
    ArgumentsObject* ndst = &dst->as<ArgumentsObject>();
    const ArgumentsObject* nsrc = &src->as<ArgumentsObject>();
    MOZ_ASSERT(ndst->data() == nsrc->data());

    if (!IsInsideNursery(src))
        return 0;

    Nursery& nursery = dst->zone()->group()->nursery();

    size_t nbytesTotal = 0;
    if (!nursery.isInside(nsrc->data())) {
        nursery.removeMallocedBuffer(nsrc->data());
    } else {
        AutoEnterOOMUnsafeRegion oomUnsafe;
        uint32_t nbytes = ArgumentsData::bytesRequired(nsrc->data()->numArgs);
        uint8_t* data = nsrc->zone()->pod_malloc<uint8_t>(nbytes);
        if (!data)
            oomUnsafe.crash("Failed to allocate ArgumentsObject data while tenuring.");
        ndst->initFixedSlot(DATA_SLOT, PrivateValue(data));

        mozilla::PodCopy(data, reinterpret_cast<uint8_t*>(nsrc->data()), nbytes);
        nbytesTotal += nbytes;
    }

    if (RareArgumentsData* srcRareData = nsrc->maybeRareData()) {
        if (!nursery.isInside(srcRareData)) {
            nursery.removeMallocedBuffer(srcRareData);
        } else {
            AutoEnterOOMUnsafeRegion oomUnsafe;
            uint32_t nbytes = RareArgumentsData::bytesRequired(nsrc->initialLength());
            uint8_t* dstRareData = nsrc->zone()->pod_malloc<uint8_t>(nbytes);
            if (!dstRareData)
                oomUnsafe.crash("Failed to allocate RareArgumentsData data while tenuring.");
            ndst->data()->rareData = (RareArgumentsData*)dstRareData;

            mozilla::PodCopy(dstRareData, reinterpret_cast<uint8_t*>(srcRareData), nbytes);
            nbytesTotal += nbytes;
        }
    }

    return nbytesTotal;
}
Esempio n. 15
0
void
TraceLoggerThread::startEvent(uint32_t id)
{
    MOZ_ASSERT(TLTextIdIsTreeEvent(id) || id == TraceLogger_Error);
    MOZ_ASSERT(traceLoggerState);
    if (!traceLoggerState->isTextIdEnabled(id))
       return;

#ifdef DEBUG
    if (enabled_ > 0) {
        AutoEnterOOMUnsafeRegion oomUnsafe;
        if (!graphStack.append(id))
            oomUnsafe.crash("Could not add item to debug stack.");
    }
#endif

    log(id);
}
Esempio n. 16
0
/* static */ HashNumber
MovableCellHasher<T>::hash(const Lookup& l)
{
    if (!l)
        return 0;

    // We have to access the zone from-any-thread here: a worker thread may be
    // cloning a self-hosted object from the main-thread-runtime-owned self-
    // hosting zone into the off-main-thread runtime. The zone's uid lock will
    // protect against multiple workers doing this simultaneously.
    MOZ_ASSERT(CurrentThreadCanAccessZone(l->zoneFromAnyThread()) ||
               l->zoneFromAnyThread()->isSelfHostingZone());

    HashNumber hn;
    AutoEnterOOMUnsafeRegion oomUnsafe;
    if (!l->zoneFromAnyThread()->getHashCode(l, &hn))
        oomUnsafe.crash("failed to get a stable hash code");
    return hn;
}
Esempio n. 17
0
void
js::Mutex::lock()
{
  auto& stack = heldMutexStack();
  if (!stack.empty()) {
    const Mutex& prev = *stack.back();
    if (id_.order <= prev.id_.order) {
      fprintf(stderr,
              "Attempt to acquire mutex %s with order %d while holding %s with order %d\n",
              id_.name, id_.order, prev.id_.name, prev.id_.order);
      MOZ_CRASH("Mutex ordering violation");
    }
  }

  MutexImpl::lock();

  AutoEnterOOMUnsafeRegion oomUnsafe;
  if (!stack.append(this))
    oomUnsafe.crash("js::Mutex::lock");
}
Esempio n. 18
0
void
js::EnqueuePendingParseTasksAfterGC(JSRuntime* rt)
{
    MOZ_ASSERT(!OffThreadParsingMustWaitForGC(rt));

    GlobalHelperThreadState::ParseTaskVector newTasks;
    {
        AutoLockHelperThreadState lock;
        GlobalHelperThreadState::ParseTaskVector& waiting = HelperThreadState().parseWaitingOnGC();

        for (size_t i = 0; i < waiting.length(); i++) {
            ParseTask* task = waiting[i];
            if (task->runtimeMatches(rt)) {
                AutoEnterOOMUnsafeRegion oomUnsafe;
                if (!newTasks.append(task))
                    oomUnsafe.crash("EnqueuePendingParseTasksAfterGC");
                HelperThreadState().remove(waiting, &i);
            }
        }
    }

    if (newTasks.empty())
        return;

    // This logic should mirror the contents of the !activeGCInAtomsZone()
    // branch in StartOffThreadParseScript:

    for (size_t i = 0; i < newTasks.length(); i++)
        newTasks[i]->activate(rt);

    AutoLockHelperThreadState lock;

    {
        AutoEnterOOMUnsafeRegion oomUnsafe;
        if (!HelperThreadState().parseWorklist().appendAll(newTasks))
            oomUnsafe.crash("EnqueuePendingParseTasksAfterGC");
    }

    HelperThreadState().notifyAll(GlobalHelperThreadState::PRODUCER);
}
Esempio n. 19
0
UniqueChars
LAllocation::toString() const
{
    AutoEnterOOMUnsafeRegion oomUnsafe;

    char* buf;
    if (isBogus()) {
        buf = JS_smprintf("bogus");
    } else {
        switch (kind()) {
        case LAllocation::CONSTANT_VALUE:
        case LAllocation::CONSTANT_INDEX:
            buf = JS_smprintf("c");
            break;
        case LAllocation::GPR:
            buf = JS_smprintf("%s", toGeneralReg()->reg().name());
            break;
        case LAllocation::FPU:
            buf = JS_smprintf("%s", toFloatReg()->reg().name());
            break;
        case LAllocation::STACK_SLOT:
            buf = JS_smprintf("stack:%d", toStackSlot()->slot());
            break;
        case LAllocation::ARGUMENT_SLOT:
            buf = JS_smprintf("arg:%d", toArgument()->index());
            break;
        case LAllocation::USE:
            buf = PrintUse(toUse());
            break;
        default:
            MOZ_CRASH("what?");
        }
    }

    if (!buf)
        oomUnsafe.crash("LAllocation::toString()");

    return UniqueChars(buf);
}
Esempio n. 20
0
void
GraphSpewer::spewPass(const char* pass)
{
    if (!isSpewing())
        return;

    c1Spewer_.spewPass(pass);

    jsonSpewer_.beginPass(pass);
    jsonSpewer_.spewMIR(graph_);
    jsonSpewer_.spewLIR(graph_);
    jsonSpewer_.endPass();

    ionspewer.spewPass(this);

    // As this function is used for debugging, we ignore any of the previous
    // failures and ensure there is enough ballast space, such that we do not
    // exhaust the ballast space before running the next phase.
    AutoEnterOOMUnsafeRegion oomUnsafe;
    if (!graph_->alloc().ensureBallast())
        oomUnsafe.crash("Could not ensure enough ballast space after spewing graph information.");
}
Esempio n. 21
0
UniqueChars
LDefinition::toString() const
{
    AutoEnterOOMUnsafeRegion oomUnsafe;

    char* buf;
    if (isBogusTemp()) {
        buf = JS_smprintf("bogus");
    } else {
        buf = JS_smprintf("v%u<%s>", virtualRegister(), TypeChars[type()]);
        if (buf) {
            if (policy() == LDefinition::FIXED)
                buf = JS_sprintf_append(buf, ":%s", output()->toString().get());
            else if (policy() == LDefinition::MUST_REUSE_INPUT)
                buf = JS_sprintf_append(buf, ":tied(%u)", getReusedInput());
        }
    }

    if (!buf)
        oomUnsafe.crash("LDefinition::toString()");

    return UniqueChars(buf);
}
Esempio n. 22
0
void
MBasicBlock::addPredecessorSameInputsAs(MBasicBlock* pred, MBasicBlock* existingPred)
{
    MOZ_ASSERT(pred);
    MOZ_ASSERT(predecessors_.length() > 0);

    // Predecessors must be finished, and at the correct stack depth.
    MOZ_ASSERT(pred->hasLastIns());
    MOZ_ASSERT(!pred->successorWithPhis());

    AutoEnterOOMUnsafeRegion oomUnsafe;

    if (!phisEmpty()) {
        size_t existingPosition = indexForPredecessor(existingPred);
        for (MPhiIterator iter = phisBegin(); iter != phisEnd(); iter++) {
            if (!iter->addInputSlow(iter->getOperand(existingPosition)))
                oomUnsafe.crash("MBasicBlock::addPredecessorAdjustPhis");
        }
    }

    if (!predecessors_.append(pred))
        oomUnsafe.crash("MBasicBlock::addPredecessorAdjustPhis");
}
Esempio n. 23
0
void
LoopUnroller::go(LoopIterationBound* bound)
{
    // For now we always unroll loops the same number of times.
    static const size_t UnrollCount = 10;

    JitSpew(JitSpew_Unrolling, "Attempting to unroll loop");

    header = bound->header;

    // UCE might have determined this isn't actually a loop.
    if (!header->isLoopHeader())
        return;

    backedge = header->backedge();
    oldPreheader = header->loopPredecessor();

    MOZ_ASSERT(oldPreheader->numSuccessors() == 1);

    // Only unroll loops with two blocks: an initial one ending with the
    // bound's test, and the body ending with the backedge.
    MTest* test = bound->test;
    if (header->lastIns() != test)
        return;
    if (test->ifTrue() == backedge) {
        if (test->ifFalse()->id() <= backedge->id())
            return;
    } else if (test->ifFalse() == backedge) {
        if (test->ifTrue()->id() <= backedge->id())
            return;
    } else {
        return;
    }
    if (backedge->numPredecessors() != 1 || backedge->numSuccessors() != 1)
        return;
    MOZ_ASSERT(backedge->phisEmpty());

    MBasicBlock* bodyBlocks[] = { header, backedge };

    // All instructions in the header and body must be clonable.
    for (size_t i = 0; i < ArrayLength(bodyBlocks); i++) {
        MBasicBlock* block = bodyBlocks[i];
        for (MInstructionIterator iter(block->begin()); iter != block->end(); iter++) {
            MInstruction* ins = *iter;
            if (ins->canClone())
                continue;
            if (ins->isTest() || ins->isGoto() || ins->isInterruptCheck())
                continue;
#ifdef JS_JITSPEW
            JitSpew(JitSpew_Unrolling, "Aborting: can't clone instruction %s", ins->opName());
#endif
            return;
        }
    }

    // Compute the linear inequality we will use for exiting the unrolled loop:
    //
    // iterationBound - iterationCount - UnrollCount >= 0
    //
    LinearSum remainingIterationsInequality(bound->boundSum);
    if (!remainingIterationsInequality.add(bound->currentSum, -1))
        return;
    if (!remainingIterationsInequality.add(-int32_t(UnrollCount)))
        return;

    // Terms in the inequality need to be either loop invariant or phis from
    // the original header.
    for (size_t i = 0; i < remainingIterationsInequality.numTerms(); i++) {
        MDefinition* def = remainingIterationsInequality.term(i).term;
        if (def->isDiscarded())
            return;
        if (def->block()->id() < header->id())
            continue;
        if (def->block() == header && def->isPhi())
            continue;
        return;
    }

    // OK, we've checked everything, now unroll the loop.

    JitSpew(JitSpew_Unrolling, "Unrolling loop");

    // The old preheader will go before the unrolled loop, and the old loop
    // will need a new empty preheader.
    const CompileInfo& info = oldPreheader->info();
    if (header->trackedPc()) {
        unrolledHeader =
            MBasicBlock::New(graph, nullptr, info,
                             oldPreheader, header->trackedSite(), MBasicBlock::LOOP_HEADER);
        unrolledBackedge =
            MBasicBlock::New(graph, nullptr, info,
                             unrolledHeader, backedge->trackedSite(), MBasicBlock::NORMAL);
        newPreheader =
            MBasicBlock::New(graph, nullptr, info,
                             unrolledHeader, oldPreheader->trackedSite(), MBasicBlock::NORMAL);
    } else {
        unrolledHeader = MBasicBlock::NewAsmJS(graph, info, oldPreheader, MBasicBlock::LOOP_HEADER);
        unrolledBackedge = MBasicBlock::NewAsmJS(graph, info, unrolledHeader, MBasicBlock::NORMAL);
        newPreheader = MBasicBlock::NewAsmJS(graph, info, unrolledHeader, MBasicBlock::NORMAL);
    }

    unrolledHeader->discardAllResumePoints();
    unrolledBackedge->discardAllResumePoints();
    newPreheader->discardAllResumePoints();

    // Insert new blocks at their RPO position, and update block ids.
    graph.insertBlockAfter(oldPreheader, unrolledHeader);
    graph.insertBlockAfter(unrolledHeader, unrolledBackedge);
    graph.insertBlockAfter(unrolledBackedge, newPreheader);
    graph.renumberBlocksAfter(oldPreheader);

    // We don't tolerate allocation failure after this point.
    // TODO: This is a bit drastic, is it possible to improve this?
    AutoEnterOOMUnsafeRegion oomUnsafe;

    if (!unrolledDefinitions.init())
        oomUnsafe.crash("LoopUnroller::go");

    // Add phis to the unrolled loop header which correspond to the phis in the
    // original loop header.
    MOZ_ASSERT(header->getPredecessor(0) == oldPreheader);
    for (MPhiIterator iter(header->phisBegin()); iter != header->phisEnd(); iter++) {
        MPhi* old = *iter;
        MOZ_ASSERT(old->numOperands() == 2);
        MPhi* phi = MPhi::New(alloc);
        phi->setResultType(old->type());
        phi->setResultTypeSet(old->resultTypeSet());
        phi->setRange(old->range());

        unrolledHeader->addPhi(phi);

        if (!phi->reserveLength(2))
            oomUnsafe.crash("LoopUnroller::go");

        // Set the first input for the phi for now. We'll set the second after
        // finishing the unroll.
        phi->addInput(old->getOperand(0));

        // The old phi will now take the value produced by the unrolled loop.
        old->replaceOperand(0, phi);

        if (!unrolledDefinitions.putNew(old, phi))
            oomUnsafe.crash("LoopUnroller::go");
    }

    // The loop condition can bail out on e.g. integer overflow, so make a
    // resume point based on the initial resume point of the original header.
    MResumePoint* headerResumePoint = header->entryResumePoint();
    if (headerResumePoint) {
        MResumePoint* rp = makeReplacementResumePoint(unrolledHeader, headerResumePoint);
        if (!rp)
            oomUnsafe.crash("LoopUnroller::makeReplacementResumePoint");
        unrolledHeader->setEntryResumePoint(rp);

        // Perform an interrupt check at the start of the unrolled loop.
        unrolledHeader->add(MInterruptCheck::New(alloc));
    }

    // Generate code for the test in the unrolled loop.
    for (size_t i = 0; i < remainingIterationsInequality.numTerms(); i++) {
        MDefinition* def = remainingIterationsInequality.term(i).term;
        MDefinition* replacement = getReplacementDefinition(def);
        remainingIterationsInequality.replaceTerm(i, replacement);
    }
    MCompare* compare = ConvertLinearInequality(alloc, unrolledHeader, remainingIterationsInequality);
    MTest* unrolledTest = MTest::New(alloc, compare, unrolledBackedge, newPreheader);
    unrolledHeader->end(unrolledTest);

    // Make an entry resume point for the unrolled body. The unrolled header
    // does not have side effects on stack values, even if the original loop
    // header does, so use the same resume point as for the unrolled header.
    if (headerResumePoint) {
        MResumePoint* rp = makeReplacementResumePoint(unrolledBackedge, headerResumePoint);
        if (!rp)
            oomUnsafe.crash("LoopUnroller::makeReplacementResumePoint");
        unrolledBackedge->setEntryResumePoint(rp);
    }

    // Make an entry resume point for the new preheader. There are no
    // instructions which use this but some other stuff wants one to be here.
    if (headerResumePoint) {
        MResumePoint* rp = makeReplacementResumePoint(newPreheader, headerResumePoint);
        if (!rp)
            oomUnsafe.crash("LoopUnroller::makeReplacementResumePoint");
        newPreheader->setEntryResumePoint(rp);
    }

    // Generate the unrolled code.
    MOZ_ASSERT(UnrollCount > 1);
    size_t unrollIndex = 0;
    while (true) {
        // Clone the contents of the original loop into the unrolled loop body.
        for (size_t i = 0; i < ArrayLength(bodyBlocks); i++) {
            MBasicBlock* block = bodyBlocks[i];
            for (MInstructionIterator iter(block->begin()); iter != block->end(); iter++) {
                MInstruction* ins = *iter;
                if (ins->canClone()) {
                    if (!makeReplacementInstruction(*iter))
                        oomUnsafe.crash("LoopUnroller::makeReplacementDefinition");
                } else {
                    // Control instructions are handled separately.
                    MOZ_ASSERT(ins->isTest() || ins->isGoto() || ins->isInterruptCheck());
                }
            }
        }

        // Compute the value of each loop header phi after the execution of
        // this unrolled iteration.
        MDefinitionVector phiValues(alloc);
        MOZ_ASSERT(header->getPredecessor(1) == backedge);
        for (MPhiIterator iter(header->phisBegin()); iter != header->phisEnd(); iter++) {
            MPhi* old = *iter;
            MDefinition* oldInput = old->getOperand(1);
            if (!phiValues.append(getReplacementDefinition(oldInput)))
                oomUnsafe.crash("LoopUnroller::go");
        }

        unrolledDefinitions.clear();

        if (unrollIndex == UnrollCount - 1) {
            // We're at the end of the last unrolled iteration, set the
            // backedge input for the unrolled loop phis.
            size_t phiIndex = 0;
            for (MPhiIterator iter(unrolledHeader->phisBegin()); iter != unrolledHeader->phisEnd(); iter++) {
                MPhi* phi = *iter;
                phi->addInput(phiValues[phiIndex++]);
            }
            MOZ_ASSERT(phiIndex == phiValues.length());
            break;
        }

        // Update the map for the phis in the next iteration.
        size_t phiIndex = 0;
        for (MPhiIterator iter(header->phisBegin()); iter != header->phisEnd(); iter++) {
            MPhi* old = *iter;
            if (!unrolledDefinitions.putNew(old, phiValues[phiIndex++]))
                oomUnsafe.crash("LoopUnroller::go");
        }
        MOZ_ASSERT(phiIndex == phiValues.length());

        unrollIndex++;
    }

    MGoto* backedgeJump = MGoto::New(alloc, unrolledHeader);
    unrolledBackedge->end(backedgeJump);

    // Place the old preheader before the unrolled loop.
    MOZ_ASSERT(oldPreheader->lastIns()->isGoto());
    oldPreheader->discardLastIns();
    oldPreheader->end(MGoto::New(alloc, unrolledHeader));

    // Place the new preheader before the original loop.
    newPreheader->end(MGoto::New(alloc, header));

    // Cleanup the MIR graph.
    if (!unrolledHeader->addPredecessorWithoutPhis(unrolledBackedge))
        oomUnsafe.crash("LoopUnroller::go");
    header->replacePredecessor(oldPreheader, newPreheader);
    oldPreheader->setSuccessorWithPhis(unrolledHeader, 0);
    newPreheader->setSuccessorWithPhis(header, 0);
    unrolledBackedge->setSuccessorWithPhis(unrolledHeader, 1);
}
Esempio n. 24
0
uint32_t
jit::ExceptionHandlerBailout(JSContext* cx, const InlineFrameIterator& frame,
                             ResumeFromException* rfe,
                             const ExceptionBailoutInfo& excInfo,
                             bool* overrecursed)
{
    // We can be propagating debug mode exceptions without there being an
    // actual exception pending. For instance, when we return false from an
    // operation callback like a timeout handler.
    MOZ_ASSERT_IF(!excInfo.propagatingIonExceptionForDebugMode(), cx->isExceptionPending());

    cx->runtime()->jitTop = FAKE_JIT_TOP_FOR_BAILOUT;
    gc::AutoSuppressGC suppress(cx);

    JitActivationIterator jitActivations(cx->runtime());
    BailoutFrameInfo bailoutData(jitActivations, frame.frame());
    JitFrameIterator iter(jitActivations);
    CommonFrameLayout* currentFramePtr = iter.current();

    BaselineBailoutInfo* bailoutInfo = nullptr;
    uint32_t retval;

    {
        // Currently we do not tolerate OOM here so as not to complicate the
        // exception handling code further.
        AutoEnterOOMUnsafeRegion oomUnsafe;

        retval = BailoutIonToBaseline(cx, bailoutData.activation(), iter, true,
                                      &bailoutInfo, &excInfo);
        if (retval == BAILOUT_RETURN_FATAL_ERROR && cx->isThrowingOutOfMemory())
            oomUnsafe.crash("ExceptionHandlerBailout");
    }

    if (retval == BAILOUT_RETURN_OK) {
        MOZ_ASSERT(bailoutInfo);

        // Overwrite the kind so HandleException after the bailout returns
        // false, jumping directly to the exception tail.
        if (excInfo.propagatingIonExceptionForDebugMode())
            bailoutInfo->bailoutKind = Bailout_IonExceptionDebugMode;

        rfe->kind = ResumeFromException::RESUME_BAILOUT;
        rfe->target = cx->runtime()->jitRuntime()->getBailoutTail()->raw();
        rfe->bailoutInfo = bailoutInfo;
    } else {
        // Bailout failed. If the overrecursion check failed, clear the
        // exception to turn this into an uncatchable error, continue popping
        // all inline frames and have the caller report the error.
        MOZ_ASSERT(!bailoutInfo);

        if (retval == BAILOUT_RETURN_OVERRECURSED) {
            *overrecursed = true;
            if (!excInfo.propagatingIonExceptionForDebugMode())
                cx->clearPendingException();
        } else {
            MOZ_ASSERT(retval == BAILOUT_RETURN_FATAL_ERROR);

            // Crash for now so as not to complicate the exception handling code
            // further.
            MOZ_CRASH();
        }
    }

    // Make the frame being bailed out the top profiled frame.
    if (cx->runtime()->jitRuntime()->isProfilerInstrumentationEnabled(cx->runtime()))
        cx->runtime()->jitActivation->setLastProfilingFrame(currentFramePtr);

    return retval;
}
Esempio n. 25
0
// Given a cross-compartment wrapper |wobj|, update it to point to
// |newTarget|. This recomputes the wrapper with JS_WrapValue, and thus can be
// useful even if wrapper already points to newTarget.
// This operation crashes on failure rather than leaving the heap in an
// inconsistent state.
void js::RemapWrapper(JSContext* cx, JSObject* wobjArg,
                      JSObject* newTargetArg) {
  MOZ_ASSERT(!IsInsideNursery(wobjArg));
  MOZ_ASSERT(!IsInsideNursery(newTargetArg));

  RootedObject wobj(cx, wobjArg);
  RootedObject newTarget(cx, newTargetArg);
  MOZ_ASSERT(wobj->is<CrossCompartmentWrapperObject>());
  MOZ_ASSERT(!newTarget->is<CrossCompartmentWrapperObject>());
  JSObject* origTarget = Wrapper::wrappedObject(wobj);
  MOZ_ASSERT(origTarget);
  MOZ_ASSERT(!JS_IsDeadWrapper(origTarget),
             "We don't want a dead proxy in the wrapper map");
  Value origv = ObjectValue(*origTarget);
  JS::Compartment* wcompartment = wobj->compartment();
  MOZ_ASSERT(wcompartment != newTarget->compartment());

  AutoDisableProxyCheck adpc;

  // If we're mapping to a different target (as opposed to just recomputing
  // for the same target), we must not have an existing wrapper for the new
  // target, otherwise this will break.
  MOZ_ASSERT_IF(origTarget != newTarget,
                !wcompartment->lookupWrapper(ObjectValue(*newTarget)));

  // The old value should still be in the cross-compartment wrapper map, and
  // the lookup should return wobj.
  WrapperMap::Ptr p = wcompartment->lookupWrapper(origv);
  MOZ_ASSERT(&p->value().unsafeGet()->toObject() == wobj);
  wcompartment->removeWrapper(p);

  // When we remove origv from the wrapper map, its wrapper, wobj, must
  // immediately cease to be a cross-compartment wrapper. Nuke it.
  NukeCrossCompartmentWrapper(cx, wobj);

  // wobj is no longer a cross-compartment wrapper after nuking it, so we can
  // now use nonCCWRealm.
  Realm* wrealm = wobj->nonCCWRealm();

  // First, we wrap it in the new compartment. We try to use the existing
  // wrapper, |wobj|, since it's been nuked anyway. The wrap() function has
  // the choice to reuse |wobj| or not.
  RootedObject tobj(cx, newTarget);
  AutoRealmUnchecked ar(cx, wrealm);
  AutoEnterOOMUnsafeRegion oomUnsafe;
  if (!wcompartment->rewrap(cx, &tobj, wobj)) {
    oomUnsafe.crash("js::RemapWrapper");
  }

  // If wrap() reused |wobj|, it will have overwritten it and returned with
  // |tobj == wobj|. Otherwise, |tobj| will point to a new wrapper and |wobj|
  // will still be nuked. In the latter case, we replace |wobj| with the
  // contents of the new wrapper in |tobj|.
  if (tobj != wobj) {
    // Now, because we need to maintain object identity, we do a brain
    // transplant on the old object so that it contains the contents of the
    // new one.
    JSObject::swap(cx, wobj, tobj);
  }

  // Before swapping, this wrapper came out of wrap(), which enforces the
  // invariant that the wrapper in the map points directly to the key.
  MOZ_ASSERT(Wrapper::wrappedObject(wobj) == newTarget);

  // Update the entry in the compartment's wrapper map to point to the old
  // wrapper, which has now been updated (via reuse or swap).
  MOZ_ASSERT(wobj->is<WrapperObject>());
  if (!wcompartment->putWrapper(cx, CrossCompartmentKey(newTarget),
                                ObjectValue(*wobj))) {
    oomUnsafe.crash("js::RemapWrapper");
  }
}