Exemplo n.º 1
0
const IDBTransactionInfo& IDBResultData::transactionInfo() const
{
    RELEASE_ASSERT(m_transactionInfo);
    return *m_transactionInfo;
}
Exemplo n.º 2
0
void DirectJITCode::initializeCodeRef(JITCode::CodeRef ref, JITCode::CodePtr withArityCheck)
{
    RELEASE_ASSERT(!m_ref);
    m_ref = ref;
    m_withArityCheck = withArityCheck;
}
Exemplo n.º 3
0
void ShadowChicken::update(VM&, ExecState* exec)
{
    if (verbose) {
        dataLog("Running update on: ", *this, "\n");
        WTFReportBacktrace();
    }
    
    const unsigned logCursorIndex = m_logCursor - m_log;
    
    // We need to figure out how to reconcile the current machine stack with our shadow stack. We do
    // that by figuring out how much of the shadow stack to pop. We apply three different rules. The
    // precise rule relies on the log. The log contains caller frames, which means that we know
    // where we bottomed out after making any call. If we bottomed out but made no calls then 'exec'
    // will tell us. That's why "highestPointSinceLastTime" will go no lower than exec. The third
    // rule, based on comparing to the current real stack, is executed in a later loop.
    CallFrame* highestPointSinceLastTime = exec;
    for (unsigned i = logCursorIndex; i--;) {
        Packet packet = m_log[i];
        if (packet.isPrologue()) {
            CallFrame* watermark;
            if (i && m_log[i - 1].isTail())
                watermark = packet.frame;
            else
                watermark = packet.callerFrame;
            highestPointSinceLastTime = std::max(highestPointSinceLastTime, watermark);
        }
    }
    
    if (verbose)
        dataLog("    Highest point since last time: ", RawPointer(highestPointSinceLastTime), "\n");
    
    while (!m_stack.isEmpty() && m_stack.last().frame < highestPointSinceLastTime)
        m_stack.removeLast();
    
    if (verbose)
        dataLog("    Revised stack: ", listDump(m_stack), "\n");
    
    // It's possible that the top of stack is now tail-deleted. The stack no longer contains any
    // frames below the log's high watermark. That means that we just need to look for the first
    // occurence of a tail packet for the current stack top.
    if (!m_stack.isEmpty()) {
        ASSERT(!m_stack.last().isTailDeleted);
        for (unsigned i = 0; i < logCursorIndex; ++i) {
            Packet& packet = m_log[i];
            if (packet.isTail() && packet.frame == m_stack.last().frame) {
                m_stack.last().isTailDeleted = true;
                break;
            }
        }
    }
    
    if (verbose)
        dataLog("    Revised stack: ", listDump(m_stack), "\n");
    
    // The log-based and exec-based rules require that ShadowChicken was enabled. The point of
    // ShadowChicken is to give sensible-looking results even if we had not logged. This means that
    // we need to reconcile the shadow stack and the real stack by actually looking at the real
    // stack. This reconciliation allows the shadow stack to have extra tail-deleted frames, but it
    // forbids it from diverging from the real stack on normal frames.
    if (!m_stack.isEmpty()) {
        Vector<Frame> stackRightNow;
        StackVisitor::visit(
            exec, [&] (StackVisitor& visitor) -> StackVisitor::Status {
                if (visitor->isInlinedFrame())
                    return StackVisitor::Continue;
                bool isTailDeleted = false;
                stackRightNow.append(Frame(visitor->callee(), visitor->callFrame(), isTailDeleted));
                return StackVisitor::Continue;
            });
        stackRightNow.reverse();
        
        if (verbose)
            dataLog("    Stack right now: ", listDump(stackRightNow), "\n");
        
        unsigned shadowIndex = 0;
        unsigned rightNowIndex = 0;
        while (shadowIndex < m_stack.size() && rightNowIndex < stackRightNow.size()) {
            if (m_stack[shadowIndex].isTailDeleted) {
                shadowIndex++;
                continue;
            }
            if (m_stack[shadowIndex] == stackRightNow[rightNowIndex]) {
                shadowIndex++;
                rightNowIndex++;
                continue;
            }
            break;
        }
        m_stack.resize(shadowIndex);
        
        if (verbose)
            dataLog("    Revised stack: ", listDump(m_stack), "\n");
    }
    
    // It's possible that the top stack frame is actually lower than highestPointSinceLastTime.
    // Account for that here.
    highestPointSinceLastTime = nullptr;
    for (unsigned i = m_stack.size(); i--;) {
        if (!m_stack[i].isTailDeleted) {
            highestPointSinceLastTime = m_stack[i].frame;
            break;
        }
    }
    
    if (verbose)
        dataLog("    Highest point since last time: ", RawPointer(highestPointSinceLastTime), "\n");
    
    // Set everything up so that we know where the top frame is in the log.
    unsigned indexInLog = logCursorIndex;
    
    auto advanceIndexInLogTo =
        [&] (CallFrame* frame, JSObject* callee, CallFrame* callerFrame) -> bool {
        if (verbose)
            dataLog("    Advancing to frame = ", RawPointer(frame), " from indexInLog = ", indexInLog, "\n");
        if (indexInLog > logCursorIndex) {
            if (verbose)
                dataLog("    Bailing.\n");
            return false;
        }
        
        unsigned oldIndexInLog = indexInLog;
        
        while (indexInLog--) {
            Packet packet = m_log[indexInLog];
            
            // If all callees opt into ShadowChicken, then this search will rapidly terminate when
            // we find our frame. But if our frame's callee didn't emit a prologue packet because it
            // didn't opt in, then we will keep looking backwards until we *might* find a different
            // frame. If we've been given the callee and callerFrame as a filter, then it's unlikely
            // that we will hit the wrong frame. But we don't always have that information.
            //
            // This means it's worth adding other filters. For example, we could track changes in
            // stack size. Once we've seen a frame at some height, we're no longer interested in
            // frames below that height. Also, we can break as soon as we see a frame higher than
            // the one we're looking for.
            // FIXME: Add more filters.
            // https://bugs.webkit.org/show_bug.cgi?id=155685
            
            if (packet.isPrologue() && packet.frame == frame
                && (!callee || packet.callee == callee)
                && (!callerFrame || packet.callerFrame == callerFrame)) {
                if (verbose)
                    dataLog("    Found at indexInLog = ", indexInLog, "\n");
                return true;
            }
        }
        
        // This is an interesting eventuality. We will see this if ShadowChicken was not
        // consistently enabled. We have a choice between:
        //
        // - Leaving the log index at -1, which will prevent the log from being considered. This is
        //   the most conservative. It means that we will not be able to recover tail-deleted frames
        //   from anything that sits above a frame that didn't log a prologue packet. This means
        //   that everyone who creates prologues must log prologue packets.
        //
        // - Restoring the log index to what it was before. This prevents us from considering
        //   whether this frame has tail-deleted frames behind it, but that's about it. The problem
        //   with this approach is that it might recover tail-deleted frames that aren't relevant.
        //   I haven't thought about this too deeply, though.
        //
        // It seems like the latter option is less harmful, so that's what we do.
        indexInLog = oldIndexInLog;
        
        if (verbose)
            dataLog("    Didn't find it.\n");
        return false;
    };
    
    Vector<Frame> toPush;
    StackVisitor::visit(
        exec, [&] (StackVisitor& visitor) -> StackVisitor::Status {
            if (visitor->isInlinedFrame()) {
                // FIXME: Handle inlining.
                // https://bugs.webkit.org/show_bug.cgi?id=155686
                return StackVisitor::Continue;
            }
            CallFrame* callFrame = visitor->callFrame();
            if (verbose)
                dataLog("    Examining ", RawPointer(callFrame), "\n");
            if (!toPush.isEmpty() && indexInLog < logCursorIndex
                // This condition protects us from the case where advanceIndexInLogTo didn't find
                // anything.
                && m_log[indexInLog].frame == toPush.last().frame) {
                if (verbose)
                    dataLog("    Going to loop through things with indexInLog = ", indexInLog, " and push-stack top = ", toPush.last(), "\n");
                for (;;) {
                    ASSERT(m_log[indexInLog].frame == toPush.last().frame);
                    
                    // Right now the index is pointing at a prologue packet of the last frame that
                    // we pushed. Peek behind that packet to see if there is a tail packet. If there
                    // is one then we know that there is a corresponding prologue packet that will
                    // tell us about a tail-deleted frame.
                    
                    if (!indexInLog)
                        break;
                    Packet lastPacket = m_log[indexInLog - 1];
                    if (!lastPacket.isTail()) {
                        // Last frame that we recorded was not the outcome of a tail call. So, there
                        // will not be any more deleted frames.
                        // FIXME: We might want to have a filter here. Consider that this was a tail
                        // marker for a tail call to something that didn't log anything. It should
                        // be sufficient to give the tail marker a copy of the caller frame.
                        // https://bugs.webkit.org/show_bug.cgi?id=155687
                        break;
                    }
                    indexInLog--; // Skip over the tail packet.
                    
                    if (!advanceIndexInLogTo(lastPacket.frame, nullptr, nullptr)) {
                        // We were unable to locate the prologue packet for this tail packet. That's
                        // quite suspect, so give up.
                        break;
                    }
                    Packet packet = m_log[indexInLog];
                    bool isTailDeleted = true;
                    toPush.append(Frame(packet.callee, packet.frame, isTailDeleted));
                }
            }
            if (callFrame == highestPointSinceLastTime) {
                if (verbose)
                    dataLog("    Bailing at ", RawPointer(callFrame), " because it's the highest point since last time.\n");
                return StackVisitor::Done;
            }
            advanceIndexInLogTo(callFrame, callFrame->callee(), callFrame->callerFrame());
            bool isTailDeleted = false;
            toPush.append(Frame(visitor->callee(), callFrame, isTailDeleted));
            return StackVisitor::Continue;
        });
    
    if (verbose)
        dataLog("    Pushing: ", listDump(toPush), "\n");
    
    for (unsigned i = toPush.size(); i--;)
        m_stack.append(toPush[i]);
    
    // We want to reset the log. There is a fun corner-case: there could be a tail marker at the end
    // of this log. We could make that work by setting isTailDeleted on the top of stack, but that
    // would require more corner cases in the complicated reconciliation code above. That code
    // already knows how to handle a tail packet at the beginning, so we just leverage that here.
    if (logCursorIndex && m_log[logCursorIndex - 1].isTail()) {
        m_log[0] = m_log[logCursorIndex - 1];
        m_logCursor = m_log + 1;
    } else
        m_logCursor = m_log;

    if (verbose)
        dataLog("    After pushing: ", *this, "\n");
    
    // Remove tail frames until the stack is small enough again.
    const unsigned stackSizeLimit = Options::shadowChickenStackSizeLimit();
    if (m_stack.size() > stackSizeLimit) {
        unsigned dstIndex = 0;
        unsigned srcIndex = 0;
        unsigned size = m_stack.size();
        while (srcIndex < m_stack.size()) {
            Frame frame = m_stack[srcIndex++];
            if (size > stackSizeLimit && frame.isTailDeleted) {
                size--;
                continue;
            }
            m_stack[dstIndex++] = frame;
        }
        RELEASE_ASSERT(dstIndex == size);
        m_stack.resize(size);
    }

    if (verbose)
        dataLog("    After clean-up: ", *this, "\n");
}
Exemplo n.º 4
0
void* JITCodeWithCodeRef::executableAddressAtOffset(size_t offset)
{
    RELEASE_ASSERT(m_ref);
    return reinterpret_cast<char*>(m_ref.code().executableAddress()) + offset;
}
Exemplo n.º 5
0
size_t JITCodeWithCodeRef::size()
{
    RELEASE_ASSERT(m_ref);
    return m_ref.size();
}
Exemplo n.º 6
0
Box* interpretFunction(llvm::Function *f, int nargs, Box* arg1, Box* arg2, Box* arg3, Box* *args) {
    assert(f);

#ifdef TIME_INTERPRETS
    Timer _t("to interpret", 1000000);
    long this_us = 0;
#endif

    static StatCounter interpreted_runs("interpreted_runs");
    interpreted_runs.log();

    llvm::DataLayout dl(f->getParent());

    //f->dump();
    //assert(nargs == f->getArgumentList().size());

    SymMap symbols;

    void* frame_ptr = __builtin_frame_address(0);
    interpreter_roots[frame_ptr] = &symbols;
    UnregisterHelper helper(frame_ptr);

    int arg_num = -1;
    for (llvm::Argument& arg : f->args()) {
        arg_num++;

        if (arg_num == 0) symbols.insert(std::make_pair(static_cast<llvm::Value*>(&arg), Val(arg1)));
        else if (arg_num == 1) symbols.insert(std::make_pair(static_cast<llvm::Value*>(&arg), Val(arg2)));
        else if (arg_num == 2) symbols.insert(std::make_pair(static_cast<llvm::Value*>(&arg), Val(arg3)));
        else {
            assert(arg_num == 3);
            assert(f->getArgumentList().size() == 4);
            assert(f->getArgumentList().back().getType() == g.llvm_value_type_ptr->getPointerTo());
            symbols.insert(std::make_pair(static_cast<llvm::Value*>(&arg), Val((int64_t)args)));
            //printf("loading %%4 with %p\n", (void*)args);
            break;
        }
    }

    llvm::BasicBlock *prevblock = NULL;
    llvm::BasicBlock *curblock = &f->getEntryBlock();


    while (true) {
        for (llvm::Instruction &_inst : *curblock) {
            llvm::Instruction *inst = &_inst;

            if (VERBOSITY("interpreter") >= 2) {
                printf("executing in %s: ", f->getName().data());
                fflush(stdout);
                inst->dump();
                //f->dump();
            }

#define SET(v) set(symbols, inst, (v))
            if (llvm::LoadInst *li = llvm::dyn_cast<llvm::LoadInst>(inst)) {
                llvm::Value *ptr = li->getOperand(0);
                Val v = fetch(ptr, dl, symbols);
                //printf("loading from %p\n", v.o);

                if (width(li, dl) == 1) {
                    Val r = Val(*(bool*)v.o);
                    SET(r);
                    continue;
                } else if (width(li, dl) == 8) {
                    Val r = Val(*(int64_t*)v.o);
                    SET(r);
                    continue;
                } else {
                    li->dump();
                    RELEASE_ASSERT(0, "");
                }
            } else if (llvm::StoreInst *si = llvm::dyn_cast<llvm::StoreInst>(inst)) {
                llvm::Value *val = si->getOperand(0);
                llvm::Value *ptr = si->getOperand(1);
                Val v = fetch(val, dl, symbols);
                Val p = fetch(ptr, dl, symbols);

                //printf("storing %lx at %lx\n", v.n, p.n);

                if (width(val, dl) == 1) {
                    *(bool*)p.o = v.b;
                    continue;
                } else if (width(val, dl) == 8) {
                    *(int64_t*)p.o = v.n;
                    continue;
                } else {
                    si->dump();
                    RELEASE_ASSERT(0, "");
                }
            } else if (llvm::CmpInst *ci = llvm::dyn_cast<llvm::CmpInst>(inst)) {
                assert(ci->getType() == g.i1);

                Val a0 = fetch(ci->getOperand(0), dl, symbols);
                Val a1 = fetch(ci->getOperand(1), dl, symbols);
                llvm::CmpInst::Predicate pred = ci->getPredicate();
                switch (pred) {
                    case llvm::CmpInst::ICMP_EQ:
                        SET(a0.n == a1.n);
                        continue;
                    case llvm::CmpInst::ICMP_NE:
                        SET(a0.n != a1.n);
                        continue;
                    case llvm::CmpInst::ICMP_SLT:
                        SET(a0.n < a1.n);
                        continue;
                    case llvm::CmpInst::ICMP_SLE:
                        SET(a0.n <= a1.n);
                        continue;
                    case llvm::CmpInst::ICMP_SGT:
                        SET(a0.n > a1.n);
                        continue;
                    case llvm::CmpInst::ICMP_SGE:
                        SET(a0.n >= a1.n);
                        continue;
                    case llvm::CmpInst::FCMP_OEQ:
                        SET(a0.d == a1.d);
                        continue;
                    case llvm::CmpInst::FCMP_UNE:
                        SET(a0.d != a1.d);
                        continue;
                    case llvm::CmpInst::FCMP_OLT:
                        SET(a0.d < a1.d);
                        continue;
                    case llvm::CmpInst::FCMP_OLE:
                        SET(a0.d <= a1.d);
                        continue;
                    case llvm::CmpInst::FCMP_OGT:
                        SET(a0.d > a1.d);
                        continue;
                    case llvm::CmpInst::FCMP_OGE:
                        SET(a0.d >= a1.d);
                        continue;
                    default:
                        ci->dump();
                        RELEASE_ASSERT(0, "");
                }
                continue;
            } else if (llvm::BinaryOperator *bo = llvm::dyn_cast<llvm::BinaryOperator>(inst)) {
                if (bo->getOperand(0)->getType() == g.i64 || bo->getOperand(0)->getType() == g.i1) {
                    //assert(bo->getOperand(0)->getType() == g.i64);
                    //assert(bo->getOperand(1)->getType() == g.i64);

                    Val a0 = fetch(bo->getOperand(0), dl, symbols);
                    Val a1 = fetch(bo->getOperand(1), dl, symbols);
                    llvm::Instruction::BinaryOps opcode = bo->getOpcode();
                    switch (opcode) {
                        case llvm::Instruction::Add:
                            SET(a0.n + a1.n);
                            continue;
                        case llvm::Instruction::And:
                            SET(a0.n & a1.n);
                            continue;
                        case llvm::Instruction::AShr:
                            SET(a0.n >> a1.n);
                            continue;
                        case llvm::Instruction::Mul:
                            SET(a0.n * a1.n);
                            continue;
                        case llvm::Instruction::Or:
                            SET(a0.n | a1.n);
                            continue;
                        case llvm::Instruction::Shl:
                            SET(a0.n << a1.n);
                            continue;
                        case llvm::Instruction::Sub:
                            SET(a0.n - a1.n);
                            continue;
                        case llvm::Instruction::Xor:
                            SET(a0.n ^ a1.n);
                            continue;
                        default:
                            bo->dump();
                            RELEASE_ASSERT(0, "");
                    }
                    continue;
                } else if (bo->getOperand(0)->getType() == g.double_) {
                    //assert(bo->getOperand(0)->getType() == g.i64);
                    //assert(bo->getOperand(1)->getType() == g.i64);

                    double lhs = fetch(bo->getOperand(0), dl, symbols).d;
                    double rhs = fetch(bo->getOperand(1), dl, symbols).d;
                    llvm::Instruction::BinaryOps opcode = bo->getOpcode();
                    switch (opcode) {
                        case llvm::Instruction::FAdd:
                            SET(lhs + rhs);
                            continue;
                        case llvm::Instruction::FMul:
                            SET(lhs * rhs);
                            continue;
                        case llvm::Instruction::FSub:
                            SET(lhs - rhs);
                            continue;
                        default:
                            bo->dump();
                            RELEASE_ASSERT(0, "");
                    }
                    continue;
                } else {
                    bo->dump();
                    RELEASE_ASSERT(0, "");
                }
            } else if (llvm::GetElementPtrInst *gep = llvm::dyn_cast<llvm::GetElementPtrInst>(inst)) {
Exemplo n.º 7
0
 bool run()
 {
     ASSERT(m_graph.m_form == ThreadedCPS || m_graph.m_form == SSA);
     
     // First reset the counts to 0 for all nodes.
     for (BlockIndex blockIndex = 0; blockIndex < m_graph.numBlocks(); ++blockIndex) {
         BasicBlock* block = m_graph.block(blockIndex);
         if (!block)
             continue;
         for (unsigned indexInBlock = block->size(); indexInBlock--;)
             block->at(indexInBlock)->setRefCount(0);
         for (unsigned phiIndex = block->phis.size(); phiIndex--;)
             block->phis[phiIndex]->setRefCount(0);
     }
 
     // Now find the roots:
     // - Nodes that are must-generate.
     // - Nodes that are reachable from type checks.
     // Set their ref counts to 1 and put them on the worklist.
     for (BlockIndex blockIndex = 0; blockIndex < m_graph.numBlocks(); ++blockIndex) {
         BasicBlock* block = m_graph.block(blockIndex);
         if (!block)
             continue;
         for (unsigned indexInBlock = block->size(); indexInBlock--;) {
             Node* node = block->at(indexInBlock);
             DFG_NODE_DO_TO_CHILDREN(m_graph, node, findTypeCheckRoot);
             if (!(node->flags() & NodeMustGenerate))
                 continue;
             if (!node->postfixRef())
                 m_worklist.append(node);
         }
     }
     
     while (!m_worklist.isEmpty()) {
         while (!m_worklist.isEmpty()) {
             Node* node = m_worklist.last();
             m_worklist.removeLast();
             ASSERT(node->shouldGenerate()); // It should not be on the worklist unless it's ref'ed.
             DFG_NODE_DO_TO_CHILDREN(m_graph, node, countEdge);
         }
         
         if (m_graph.m_form == SSA) {
             // Find Phi->Upsilon edges, which are represented as meta-data in the
             // Upsilon.
             for (BlockIndex blockIndex = m_graph.numBlocks(); blockIndex--;) {
                 BasicBlock* block = m_graph.block(blockIndex);
                 if (!block)
                     continue;
                 for (unsigned nodeIndex = block->size(); nodeIndex--;) {
                     Node* node = block->at(nodeIndex);
                     if (node->op() != Upsilon)
                         continue;
                     if (node->shouldGenerate())
                         continue;
                     if (node->phi()->shouldGenerate())
                         countNode(node);
                 }
             }
         }
     }
     
     if (m_graph.m_form == SSA) {
         // Need to process the graph in reverse DFS order, so that we get to the uses
         // of a node before we get to the node itself.
         Vector<BasicBlock*> depthFirst;
         m_graph.getBlocksInDepthFirstOrder(depthFirst);
         for (unsigned i = depthFirst.size(); i--;)
             fixupBlock(depthFirst[i]);
     } else {
         RELEASE_ASSERT(m_graph.m_form == ThreadedCPS);
         
         for (BlockIndex blockIndex = 0; blockIndex < m_graph.numBlocks(); ++blockIndex)
             fixupBlock(m_graph.block(blockIndex));
         
         cleanVariables(m_graph.m_arguments);
     }
     
     m_graph.m_refCountState = ExactRefCount;
     
     return true;
 }
void MediaStreamTrack::registerMediaStream(MediaStream* mediaStream)
{
    RELEASE_ASSERT(!m_isIteratingRegisteredMediaStreams);
    RELEASE_ASSERT(!m_registeredMediaStreams.contains(mediaStream));
    m_registeredMediaStreams.add(mediaStream);
}
Exemplo n.º 9
0
void AutomaticThread::start(const LockHolder&)
{
    RELEASE_ASSERT(m_isRunning);
    
    RefPtr<AutomaticThread> preserveThisForThread = this;
    
    m_hasUnderlyingThread = true;
    
    ThreadIdentifier thread = createThread(
        "WTF::AutomaticThread",
        [=] () {
            if (verbose)
                dataLog(RawPointer(this), ": Running automatic thread!\n");
            
            RefPtr<AutomaticThread> thread = preserveThisForThread;
            thread->threadDidStart();
            
            if (!ASSERT_DISABLED) {
                LockHolder locker(*m_lock);
                ASSERT(m_condition->contains(locker, this));
            }
            
            auto stopImpl = [&] (const LockHolder& locker) {
                thread->threadIsStopping(locker);
                thread->m_hasUnderlyingThread = false;
            };
            
            auto stopPermanently = [&] (const LockHolder& locker) {
                m_isRunning = false;
                m_isRunningCondition.notifyAll();
                stopImpl(locker);
            };
            
            auto stopForTimeout = [&] (const LockHolder& locker) {
                stopImpl(locker);
            };
            
            for (;;) {
                {
                    LockHolder locker(*m_lock);
                    for (;;) {
                        PollResult result = poll(locker);
                        if (result == PollResult::Work)
                            break;
                        if (result == PollResult::Stop)
                            return stopPermanently(locker);
                        RELEASE_ASSERT(result == PollResult::Wait);
                        // Shut the thread down after one second.
                        m_isWaiting = true;
                        bool awokenByNotify =
                            m_waitCondition.waitFor(*m_lock, 1_s);
                        if (verbose && !awokenByNotify && !m_isWaiting)
                            dataLog(RawPointer(this), ": waitFor timed out, but notified via m_isWaiting flag!\n");
                        if (m_isWaiting) {
                            m_isWaiting = false;
                            if (verbose)
                                dataLog(RawPointer(this), ": Going to sleep!\n");
                            // It's important that we don't release the lock until we have completely
                            // indicated that the thread is kaput. Otherwise we'll have a a notify
                            // race that manifests as a deadlock on VM shutdown.
                            return stopForTimeout(locker);
                        }
                    }
                }
                
                WorkResult result = work();
                if (result == WorkResult::Stop) {
                    LockHolder locker(*m_lock);
                    return stopPermanently(locker);
                }
                RELEASE_ASSERT(result == WorkResult::Continue);
            }
        });
    detachThread(thread);
}
Exemplo n.º 10
0
void addStatistic(HeapStatistics* stats, GCAllocation* al, int nbytes) {
    stats->total.nallocs++;
    stats->total.nbytes += nbytes;

    if (al->kind_id == GCKind::PYTHON) {
        stats->python.nallocs++;
        stats->python.nbytes += nbytes;

        if (stats->collect_cls_stats) {
            Box* b = (Box*)al->user_data;
            auto& t = stats->by_cls[b->cls];

            t.nallocs++;
            t.nbytes += nbytes;
        }

        if (stats->collect_hcls_stats) {
            Box* b = (Box*)al->user_data;
            if (b->cls->instancesHaveHCAttrs()) {
                HCAttrs* attrs = b->getHCAttrsPtr();
                if (attrs->hcls->attributeArraySize() >= 20) {
                    printf("%s object has %d attributes\n", b->cls->tp_name, attrs->hcls->attributeArraySize());
                }

                stats->hcls_uses[attrs->hcls]++;
            }
        }
    } else if (al->kind_id == GCKind::CONSERVATIVE) {
        stats->conservative.nallocs++;
        stats->conservative.nbytes += nbytes;
    } else if (al->kind_id == GCKind::CONSERVATIVE_PYTHON) {
        stats->conservative_python.nallocs++;
        stats->conservative_python.nbytes += nbytes;

        if (stats->collect_cls_stats) {
            Box* b = (Box*)al->user_data;
            auto& t = stats->by_cls[b->cls];

            t.nallocs++;
            t.nbytes += nbytes;
        }
    } else if (al->kind_id == GCKind::UNTRACKED) {
        stats->untracked.nallocs++;
        stats->untracked.nbytes += nbytes;
    } else if (al->kind_id == GCKind::HIDDEN_CLASS) {
        stats->hcls.nallocs++;
        stats->hcls.nbytes += nbytes;

        if (stats->collect_hcls_stats) {
            HiddenClass* hcls = (HiddenClass*)al->user_data;
            int numattrs = hcls->attributeArraySize();
            if (numattrs <= HCLS_ATTRS_STAT_MAX)
                stats->num_hcls_by_attrs[numattrs]++;
            else
                stats->num_hcls_by_attrs_exceed++;
        }
    } else if (al->kind_id == GCKind::PRECISE) {
        stats->precise.nallocs++;
        stats->precise.nbytes += nbytes;
    } else {
        RELEASE_ASSERT(0, "%d", (int)al->kind_id);
    }
}
Exemplo n.º 11
0
AccessGenerationResult StructureStubInfo::addAccessCase(
    CodeBlock* codeBlock, const Identifier& ident, std::unique_ptr<AccessCase> accessCase)
{
    VM& vm = *codeBlock->vm();
    
    if (verbose)
        dataLog("Adding access case: ", accessCase, "\n");
    
    if (!accessCase)
        return AccessGenerationResult::GaveUp;
    
    AccessGenerationResult result;
    
    if (cacheType == CacheType::Stub) {
        result = u.stub->addCase(vm, codeBlock, *this, ident, WTFMove(accessCase));
        
        if (verbose)
            dataLog("Had stub, result: ", result, "\n");

        if (!result.buffered()) {
            bufferedStructures.clear();
            return result;
        }
    } else {
        std::unique_ptr<PolymorphicAccess> access = std::make_unique<PolymorphicAccess>();
        
        Vector<std::unique_ptr<AccessCase>, 2> accessCases;
        
        std::unique_ptr<AccessCase> previousCase =
            AccessCase::fromStructureStubInfo(vm, codeBlock, *this);
        if (previousCase)
            accessCases.append(WTFMove(previousCase));
        
        accessCases.append(WTFMove(accessCase));
        
        result = access->addCases(vm, codeBlock, *this, ident, WTFMove(accessCases));
        
        if (verbose)
            dataLog("Created stub, result: ", result, "\n");

        if (!result.buffered()) {
            bufferedStructures.clear();
            return result;
        }
        
        initStub(codeBlock, WTFMove(access));
    }
    
    RELEASE_ASSERT(!result.generatedSomeCode());
    
    // If we didn't buffer any cases then bail. If this made no changes then we'll just try again
    // subject to cool-down.
    if (!result.buffered()) {
        if (verbose)
            dataLog("Didn't buffer anything, bailing.\n");
        bufferedStructures.clear();
        return result;
    }
    
    // The buffering countdown tells us if we should be repatching now.
    if (bufferingCountdown) {
        if (verbose)
            dataLog("Countdown is too high: ", bufferingCountdown, ".\n");
        return result;
    }
    
    // Forget the buffered structures so that all future attempts to cache get fully handled by the
    // PolymorphicAccess.
    bufferedStructures.clear();
    
    result = u.stub->regenerate(vm, codeBlock, *this, ident);
    
    if (verbose)
        dataLog("Regeneration result: ", result, "\n");
    
    RELEASE_ASSERT(!result.buffered());
    
    if (!result.generatedSomeCode())
        return result;
    
    // If we generated some code then we don't want to attempt to repatch in the future until we
    // gather enough cases.
    bufferingCountdown = Options::repatchBufferingCountdown();
    return result;
}
Exemplo n.º 12
0
void FloatingPointEnvironment::propagateMainThreadEnvironment()
{
    RELEASE_ASSERT(m_isInitialized);
    RELEASE_ASSERT(!isUIThread());
    fesetenv(&m_mainThreadEnvironment);
}
Exemplo n.º 13
0
JSObject* ProgramExecutable::initializeGlobalProperties(VM& vm, CallFrame* callFrame, JSScope* scope)
{
    auto throwScope = DECLARE_THROW_SCOPE(vm);
    RELEASE_ASSERT(scope);
    JSGlobalObject* globalObject = scope->globalObject();
    RELEASE_ASSERT(globalObject);
    ASSERT(&globalObject->vm() == &vm);

    ParserError error;
    JSParserStrictMode strictMode = isStrictMode() ? JSParserStrictMode::Strict : JSParserStrictMode::NotStrict;
    DebuggerMode debuggerMode = globalObject->hasInteractiveDebugger() ? DebuggerOn : DebuggerOff;

    UnlinkedProgramCodeBlock* unlinkedCodeBlock = vm.codeCache()->getUnlinkedProgramCodeBlock(
        vm, this, source(), strictMode, debuggerMode, error);

    if (globalObject->hasDebugger())
        globalObject->debugger()->sourceParsed(callFrame, source().provider(), error.line(), error.message());

    if (error.isValid())
        return error.toErrorObject(globalObject, source());

    JSValue nextPrototype = globalObject->getPrototypeDirect();
    while (nextPrototype && nextPrototype.isObject()) {
        if (UNLIKELY(asObject(nextPrototype)->type() == ProxyObjectType)) {
            ExecState* exec = globalObject->globalExec();
            return createTypeError(exec, ASCIILiteral("Proxy is not allowed in the global prototype chain."));
        }
        nextPrototype = asObject(nextPrototype)->getPrototypeDirect();
    }
    
    JSGlobalLexicalEnvironment* globalLexicalEnvironment = globalObject->globalLexicalEnvironment();
    const VariableEnvironment& variableDeclarations = unlinkedCodeBlock->variableDeclarations();
    const VariableEnvironment& lexicalDeclarations = unlinkedCodeBlock->lexicalDeclarations();
    // The ES6 spec says that no vars/global properties/let/const can be duplicated in the global scope.
    // This carried out section 15.1.8 of the ES6 spec: http://www.ecma-international.org/ecma-262/6.0/index.html#sec-globaldeclarationinstantiation
    {
        ExecState* exec = globalObject->globalExec();
        // Check for intersection of "var" and "let"/"const"/"class"
        for (auto& entry : lexicalDeclarations) {
            if (variableDeclarations.contains(entry.key))
                return createSyntaxError(exec, makeString("Can't create duplicate variable: '", String(entry.key.get()), "'"));
        }

        // Check if any new "let"/"const"/"class" will shadow any pre-existing global property names, or "var"/"let"/"const" variables.
        // It's an error to introduce a shadow.
        for (auto& entry : lexicalDeclarations) {
            bool hasProperty = globalObject->hasProperty(exec, entry.key.get());
            RETURN_IF_EXCEPTION(throwScope, throwScope.exception());
            if (hasProperty) {
                // The ES6 spec says that just RestrictedGlobalProperty can't be shadowed
                // This carried out section 8.1.1.4.14 of the ES6 spec: http://www.ecma-international.org/ecma-262/6.0/index.html#sec-hasrestrictedglobalproperty
                PropertyDescriptor descriptor;
                globalObject->getOwnPropertyDescriptor(exec, entry.key.get(), descriptor);
                
                if (descriptor.value() != jsUndefined() && !descriptor.configurable())
                    return createSyntaxError(exec, makeString("Can't create duplicate variable that shadows a global property: '", String(entry.key.get()), "'"));
            }

            hasProperty = globalLexicalEnvironment->hasProperty(exec, entry.key.get());
            RETURN_IF_EXCEPTION(throwScope, throwScope.exception());
            if (hasProperty) {
                if (UNLIKELY(entry.value.isConst() && !vm.globalConstRedeclarationShouldThrow() && !isStrictMode())) {
                    // We only allow "const" duplicate declarations under this setting.
                    // For example, we don't "let" variables to be overridden by "const" variables.
                    if (globalLexicalEnvironment->isConstVariable(entry.key.get()))
                        continue;
                }
                return createSyntaxError(exec, makeString("Can't create duplicate variable: '", String(entry.key.get()), "'"));
            }
        }

        // Check if any new "var"s will shadow any previous "let"/"const"/"class" names.
        // It's an error to introduce a shadow.
        if (!globalLexicalEnvironment->isEmpty()) {
            for (auto& entry : variableDeclarations) {
                bool hasProperty = globalLexicalEnvironment->hasProperty(exec, entry.key.get());
                RETURN_IF_EXCEPTION(throwScope, throwScope.exception());
                if (hasProperty)
                    return createSyntaxError(exec, makeString("Can't create duplicate variable: '", String(entry.key.get()), "'"));
            }
        }
    }


    m_unlinkedProgramCodeBlock.set(vm, this, unlinkedCodeBlock);

    BatchedTransitionOptimizer optimizer(vm, globalObject);

    for (size_t i = 0, numberOfFunctions = unlinkedCodeBlock->numberOfFunctionDecls(); i < numberOfFunctions; ++i) {
        UnlinkedFunctionExecutable* unlinkedFunctionExecutable = unlinkedCodeBlock->functionDecl(i);
        ASSERT(!unlinkedFunctionExecutable->name().isEmpty());
        globalObject->addFunction(callFrame, unlinkedFunctionExecutable->name());
        if (vm.typeProfiler() || vm.controlFlowProfiler()) {
            vm.functionHasExecutedCache()->insertUnexecutedRange(sourceID(), 
                unlinkedFunctionExecutable->typeProfilingStartOffset(), 
                unlinkedFunctionExecutable->typeProfilingEndOffset());
        }
    }

    for (auto& entry : variableDeclarations) {
        ASSERT(entry.value.isVar());
        globalObject->addVar(callFrame, Identifier::fromUid(&vm, entry.key.get()));
        ASSERT(!throwScope.exception());
    }

    {
        JSGlobalLexicalEnvironment* globalLexicalEnvironment = jsCast<JSGlobalLexicalEnvironment*>(globalObject->globalScope());
        SymbolTable* symbolTable = globalLexicalEnvironment->symbolTable();
        ConcurrentJSLocker locker(symbolTable->m_lock);
        for (auto& entry : lexicalDeclarations) {
            if (UNLIKELY(entry.value.isConst() && !vm.globalConstRedeclarationShouldThrow() && !isStrictMode())) {
                if (symbolTable->contains(locker, entry.key.get()))
                    continue;
            }
            ScopeOffset offset = symbolTable->takeNextScopeOffset(locker);
            SymbolTableEntry newEntry(VarOffset(offset), entry.value.isConst() ? ReadOnly : 0);
            newEntry.prepareToWatch();
            symbolTable->add(locker, entry.key.get(), newEntry);
            
            ScopeOffset offsetForAssert = globalLexicalEnvironment->addVariables(1, jsTDZValue());
            RELEASE_ASSERT(offsetForAssert == offset);
        }
    }
    return nullptr;
}
Exemplo n.º 14
0
void WebCryptoResult::completeWithBuffer(const WebArrayBuffer& buffer)
{
    RELEASE_ASSERT(!buffer.isNull());
    m_impl->completeWithBuffer(buffer);
    reset();
}
Exemplo n.º 15
0
Arquivo: code.cpp Projeto: jmgc/pyston
    static Box* argcount(Box* b, void*) {
        RELEASE_ASSERT(b->cls == code_cls, "");

        return boxInt(static_cast<BoxedCode*>(b)->f->paramspec.num_args);
    }
Exemplo n.º 16
0
    bool _runOnFunction(llvm::Function& f) {
        Timer _t2("(sum)");
        Timer _t("initializing");
        initialize();
        _t.split("overhead");

        // f.dump();

        llvm::Module* cur_module = f.getParent();

#if LLVMREV < 217548
        llvm::PassManager fake_pm;
#else
        llvm::legacy::PassManager fake_pm;
#endif
        llvm::InlineCostAnalysis* cost_analysis = new llvm::InlineCostAnalysis();
        fake_pm.add(cost_analysis);
        // llvm::errs() << "doing fake run\n";
        fake_pm.run(*fake_module);
        // llvm::errs() << "done with fake run\n";

        bool did_any_inlining = false;

        // TODO I haven't gotten the callgraph-updating part of the inliner to work,
        // so it's not easy to tell what callsites have been inlined into (ie added to)
        // the function.
        // One simple-but-not-great way to handle it is to just iterate over the entire function
        // multiple times and re-inline things until we don't want to inline any more;
        // NPASSES controls the maximum number of times to attempt that.
        // Right now we actually don't need that, since we only inline fully-optimized
        // functions (from the stdlib), and those will already have had inlining
        // applied recursively.
        const int NPASSES = 1;
        for (int passnum = 0; passnum < NPASSES; passnum++) {
            _t.split("collecting calls");

            std::vector<llvm::CallSite> calls;
            for (llvm::inst_iterator I = llvm::inst_begin(f), E = llvm::inst_end(f); I != E; ++I) {
                llvm::CallInst* call = llvm::dyn_cast<llvm::CallInst>(&(*I));
                // From Inliner.cpp:
                if (!call || llvm::isa<llvm::IntrinsicInst>(call))
                    continue;
                // I->dump();
                llvm::CallSite CS(call);

                llvm::Value* v = CS.getCalledValue();
                llvm::ConstantExpr* ce = llvm::dyn_cast<llvm::ConstantExpr>(v);
                if (!ce)
                    continue;

                assert(ce->isCast());
                llvm::ConstantInt* l_addr = llvm::cast<llvm::ConstantInt>(ce->getOperand(0));
                int64_t addr = l_addr->getSExtValue();

                if (addr == (int64_t)printf)
                    continue;
                llvm::Function* f = g.func_addr_registry.getLLVMFuncAtAddress((void*)addr);
                if (f == NULL) {
                    if (VERBOSITY()) {
                        printf("Giving up on inlining %s:\n",
                               g.func_addr_registry.getFuncNameAtAddress((void*)addr, true).c_str());
                        call->dump();
                    }
                    continue;
                }

                // We load the bitcode lazily, so check if we haven't yet fully loaded the function:
                if (f->isMaterializable()) {
#if LLVMREV < 220600
                    f->Materialize();
#else
                    f->materialize();
#endif
                }

                // It could still be a declaration, though I think the code won't generate this case any more:
                if (f->isDeclaration())
                    continue;

                // Keep this section as a release_assert since the code-to-be-inlined, as well as the inlining
                // decisions, can be different in release mode:
                int op_idx = -1;
                for (llvm::Argument& arg : f->args()) {
                    ++op_idx;
                    llvm::Type* op_type = call->getOperand(op_idx)->getType();
                    if (arg.getType() != op_type) {
                        llvm::errs() << f->getName() << " has arg " << op_idx << " mismatched!\n";
                        llvm::errs() << "Given ";
                        op_type->dump();
                        llvm::errs() << " but underlying function expected ";
                        arg.getType()->dump();
                        llvm::errs() << '\n';
                    }
                    RELEASE_ASSERT(arg.getType() == call->getOperand(op_idx)->getType(), "");
                }

                assert(!f->isDeclaration());
                CS.setCalledFunction(f);
                calls.push_back(CS);
            }

            // assert(0 && "TODO");
            // printf("%ld\n", calls.size());

            bool did_inline = false;
            _t.split("doing inlining");
            while (calls.size()) {
                llvm::CallSite cs = calls.back();
                calls.pop_back();

                // if (VERBOSITY("irgen.inlining") >= 1) {
                // llvm::errs() << "Evaluating callsite ";
                // cs->dump();
                //}
                llvm::InlineCost IC = cost_analysis->getInlineCost(cs, threshold);
                bool do_inline = false;
                if (IC.isAlways()) {
                    if (VERBOSITY("irgen.inlining") >= 2)
                        llvm::errs() << "always inline\n";
                    do_inline = true;
                } else if (IC.isNever()) {
                    if (VERBOSITY("irgen.inlining") >= 2)
                        llvm::errs() << "never inline\n";
                    do_inline = false;
                } else {
                    if (VERBOSITY("irgen.inlining") >= 2)
                        llvm::errs() << "Inline cost: " << IC.getCost() << '\n';
                    do_inline = (bool)IC;
                }

                if (VERBOSITY("irgen.inlining") >= 1) {
                    if (!do_inline)
                        llvm::outs() << "not ";
                    llvm::outs() << "inlining ";
                    cs->dump();
                }

                if (do_inline) {
                    static StatCounter num_inlines("num_inlines");
                    num_inlines.log();

                    // llvm::CallGraph cg(*f.getParent());
                    ////cg.addToCallGraph(cs->getCalledFunction());
                    // llvm::InlineFunctionInfo InlineInfo(&cg);

                    llvm::InlineFunctionInfo InlineInfo;
                    bool inlined = llvm::InlineFunction(cs, InlineInfo, false);
                    did_inline = did_inline || inlined;
                    did_any_inlining = did_any_inlining || inlined;

                    // if (inlined)
                    // f.dump();
                }
            }

            if (!did_inline) {
                if (passnum >= NPASSES - 1 && VERBOSITY("irgen.inlining"))
                    printf("quitting after %d passes\n", passnum + 1);
                break;
            }
        }

        // TODO would be nice to break out here and not have to rematerialize the function;
        // I think I have to do that even if no inlining happened from the "setCalledFunction" call above.
        // I thought that'd just change the CS object, but maybe it changes the underlying instruction as well?
        // if (!did_any_inlining)
        // return false;

        _t.split("remapping");

        llvm::ValueToValueMapTy VMap;
        for (llvm::Function::iterator I = f.begin(), E = f.end(); I != E; ++I) {
            VMap[I] = I;
        }
        MyMaterializer materializer(cur_module);
        for (llvm::inst_iterator I = llvm::inst_begin(f), E = llvm::inst_end(f); I != E; ++I) {
            RemapInstruction(&(*I), VMap, llvm::RF_None, NULL, &materializer);
        }

        _t.split("cleaning up");

        std::vector<llvm::GlobalValue*> to_remove;
        for (llvm::Module::global_iterator I = cur_module->global_begin(), E = cur_module->global_end(); I != E; ++I) {
            if (I->use_empty()) {
                to_remove.push_back(I);
                continue;
            }
        }

        for (int i = 0; i < to_remove.size(); i++) {
            to_remove[i]->eraseFromParent();
        }

        for (llvm::Module::iterator I = cur_module->begin(), E = cur_module->end(); I != E;) {
            if (!I->isDeclaration()) {
                ++I;
                continue;
            }

            if (I->use_empty()) {
                I = cur_module->getFunctionList().erase(I);
            } else {
                ++I;
            }
        }

        return did_any_inlining;
    }
Exemplo n.º 17
0
extern "C" Py_ssize_t PyList_Size(PyObject* self) noexcept {
    RELEASE_ASSERT(self->cls == list_cls, "");
    return static_cast<BoxedList*>(self)->size;
}
Exemplo n.º 18
0
extern "C" Py_ssize_t PyTuple_Size(PyObject* op) noexcept {
    RELEASE_ASSERT(PyTuple_Check(op), "");
    return static_cast<BoxedTuple*>(op)->size();
}
Exemplo n.º 19
0
void FramePainter::paintContents(GraphicsContext& context, const GlobalPaintFlags globalPaintFlags, const IntRect& rect)
{
    Document* document = frameView().frame().document();

#ifndef NDEBUG
    bool fillWithRed;
    if (document->printing())
        fillWithRed = false; // Printing, don't fill with red (can't remember why).
    else if (frameView().frame().owner())
        fillWithRed = false; // Subframe, don't fill with red.
    else if (frameView().isTransparent())
        fillWithRed = false; // Transparent, don't fill with red.
    else if (globalPaintFlags & GlobalPaintSelectionOnly)
        fillWithRed = false; // Selections are transparent, don't fill with red.
    else
        fillWithRed = true;

    if (fillWithRed && !LayoutObjectDrawingRecorder::useCachedDrawingIfPossible(context, *frameView().layoutView(), DisplayItem::DebugRedFill, LayoutPoint())) {
        IntRect contentRect(IntPoint(), frameView().contentsSize());
        LayoutObjectDrawingRecorder drawingRecorder(context, *frameView().layoutView(), DisplayItem::DebugRedFill, contentRect, LayoutPoint());
    }
#endif

    LayoutView* layoutView = frameView().layoutView();
    if (!layoutView) {
        WTF_LOG_ERROR("called FramePainter::paint with nil layoutObject");
        return;
    }

    if (!frameView().shouldThrottleRendering()) {
        RELEASE_ASSERT(!frameView().needsLayout());
        ASSERT(document->lifecycle().state() >= DocumentLifecycle::CompositingClean);
    }

    TRACE_EVENT1("devtools.timeline", "Paint", "data", InspectorPaintEvent::data(layoutView, LayoutRect(rect), 0));

    bool isTopLevelPainter = !s_inPaintContents;
    s_inPaintContents = true;

    FontCachePurgePreventer fontCachePurgePreventer;

    // TODO(jchaffraix): GlobalPaintFlags should be const during a paint
    // phase. Thus we should set this flag upfront (crbug.com/510280).
    GlobalPaintFlags localPaintFlags = globalPaintFlags;
    if (document->printing())
        localPaintFlags |= GlobalPaintFlattenCompositingLayers | GlobalPaintPrinting;

    PaintLayer* rootLayer = layoutView->layer();

#if ENABLE(ASSERT)
    if (!frameView().shouldThrottleRendering())
        layoutView->assertSubtreeIsLaidOut();
    LayoutObject::SetLayoutNeededForbiddenScope forbidSetNeedsLayout(*rootLayer->layoutObject());
#endif

    PaintLayerPainter layerPainter(*rootLayer);

    float deviceScaleFactor = blink::deviceScaleFactor(rootLayer->layoutObject()->frame());
    context.setDeviceScaleFactor(deviceScaleFactor);

    layerPainter.paint(context, LayoutRect(rect), localPaintFlags);

    if (rootLayer->containsDirtyOverlayScrollbars())
        layerPainter.paintOverlayScrollbars(context, LayoutRect(rect), localPaintFlags);

    // Regions may have changed as a result of the visibility/z-index of element changing.
    if (document->annotatedRegionsDirty())
        frameView().updateDocumentAnnotatedRegions();

    if (isTopLevelPainter) {
        // Everything that happens after paintContents completions is considered
        // to be part of the next frame.
        memoryCache()->updateFramePaintTimestamp();
        s_inPaintContents = false;
    }

    InspectorInstrumentation::didPaint(layoutView, 0, context, LayoutRect(rect));
}
Exemplo n.º 20
0
Box* tupleNonzero(BoxedTuple* self) {
    RELEASE_ASSERT(isSubclass(self->cls, tuple_cls), "");
    return boxBool(self->size() != 0);
}
Exemplo n.º 21
0
void* prepareOSREntry(ExecState* exec, CodeBlock* codeBlock, unsigned bytecodeIndex)
{
    ASSERT(JITCode::isOptimizingJIT(codeBlock->jitType()));
    ASSERT(codeBlock->alternative());
    ASSERT(codeBlock->alternative()->jitType() == JITCode::BaselineJIT);
    ASSERT(!codeBlock->jitCodeMap());

    if (!Options::enableOSREntryToDFG())
        return 0;

    if (Options::verboseOSR()) {
        dataLog(
            "DFG OSR in ", *codeBlock->alternative(), " -> ", *codeBlock,
            " from bc#", bytecodeIndex, "\n");
    }
    
    VM* vm = &exec->vm();

    sanitizeStackForVM(vm);
    
    if (codeBlock->jitType() != JITCode::DFGJIT) {
        RELEASE_ASSERT(codeBlock->jitType() == JITCode::FTLJIT);
        
        // When will this happen? We could have:
        //
        // - An exit from the FTL JIT into the baseline JIT followed by an attempt
        //   to reenter. We're fine with allowing this to fail. If it happens
        //   enough we'll just reoptimize. It basically means that the OSR exit cost
        //   us dearly and so reoptimizing is the right thing to do.
        //
        // - We have recursive code with hot loops. Consider that foo has a hot loop
        //   that calls itself. We have two foo's on the stack, lets call them foo1
        //   and foo2, with foo1 having called foo2 from foo's hot loop. foo2 gets
        //   optimized all the way into the FTL. Then it returns into foo1, and then
        //   foo1 wants to get optimized. It might reach this conclusion from its
        //   hot loop and attempt to OSR enter. And we'll tell it that it can't. It
        //   might be worth addressing this case, but I just think this case will
        //   be super rare. For now, if it does happen, it'll cause some compilation
        //   thrashing.
        
        if (Options::verboseOSR())
            dataLog("    OSR failed because the target code block is not DFG.\n");
        return 0;
    }
    
    JITCode* jitCode = codeBlock->jitCode()->dfg();
    OSREntryData* entry = jitCode->osrEntryDataForBytecodeIndex(bytecodeIndex);
    
    if (!entry) {
        if (Options::verboseOSR())
            dataLogF("    OSR failed because the entrypoint was optimized out.\n");
        return 0;
    }
    
    ASSERT(entry->m_bytecodeIndex == bytecodeIndex);
    
    // The code below checks if it is safe to perform OSR entry. It may find
    // that it is unsafe to do so, for any number of reasons, which are documented
    // below. If the code decides not to OSR then it returns 0, and it's the caller's
    // responsibility to patch up the state in such a way as to ensure that it's
    // both safe and efficient to continue executing baseline code for now. This
    // should almost certainly include calling either codeBlock->optimizeAfterWarmUp()
    // or codeBlock->dontOptimizeAnytimeSoon().
    
    // 1) Verify predictions. If the predictions are inconsistent with the actual
    //    values, then OSR entry is not possible at this time. It's tempting to
    //    assume that we could somehow avoid this case. We can certainly avoid it
    //    for first-time loop OSR - that is, OSR into a CodeBlock that we have just
    //    compiled. Then we are almost guaranteed that all of the predictions will
    //    check out. It would be pretty easy to make that a hard guarantee. But
    //    then there would still be the case where two call frames with the same
    //    baseline CodeBlock are on the stack at the same time. The top one
    //    triggers compilation and OSR. In that case, we may no longer have
    //    accurate value profiles for the one deeper in the stack. Hence, when we
    //    pop into the CodeBlock that is deeper on the stack, we might OSR and
    //    realize that the predictions are wrong. Probably, in most cases, this is
    //    just an anomaly in the sense that the older CodeBlock simply went off
    //    into a less-likely path. So, the wisest course of action is to simply not
    //    OSR at this time.
    
    for (size_t argument = 0; argument < entry->m_expectedValues.numberOfArguments(); ++argument) {
        if (argument >= exec->argumentCountIncludingThis()) {
            if (Options::verboseOSR()) {
                dataLogF("    OSR failed because argument %zu was not passed, expected ", argument);
                entry->m_expectedValues.argument(argument).dump(WTF::dataFile());
                dataLogF(".\n");
            }
            return 0;
        }
        
        JSValue value;
        if (!argument)
            value = exec->thisValue();
        else
            value = exec->argument(argument - 1);
        
        if (!entry->m_expectedValues.argument(argument).validate(value)) {
            if (Options::verboseOSR()) {
                dataLog(
                    "    OSR failed because argument ", argument, " is ", value,
                    ", expected ", entry->m_expectedValues.argument(argument), ".\n");
            }
            return 0;
        }
    }
    
    for (size_t local = 0; local < entry->m_expectedValues.numberOfLocals(); ++local) {
        int localOffset = virtualRegisterForLocal(local).offset();
        if (entry->m_localsForcedDouble.get(local)) {
            if (!exec->registers()[localOffset].jsValue().isNumber()) {
                if (Options::verboseOSR()) {
                    dataLog(
                        "    OSR failed because variable ", localOffset, " is ",
                        exec->registers()[localOffset].jsValue(), ", expected number.\n");
                }
                return 0;
            }
            continue;
        }
        if (entry->m_localsForcedMachineInt.get(local)) {
            if (!exec->registers()[localOffset].jsValue().isMachineInt()) {
                if (Options::verboseOSR()) {
                    dataLog(
                        "    OSR failed because variable ", localOffset, " is ",
                        exec->registers()[localOffset].jsValue(), ", expected ",
                        "machine int.\n");
                }
                return 0;
            }
            continue;
        }
        if (!entry->m_expectedValues.local(local).validate(exec->registers()[localOffset].jsValue())) {
            if (Options::verboseOSR()) {
                dataLog(
                    "    OSR failed because variable ", localOffset, " is ",
                    exec->registers()[localOffset].jsValue(), ", expected ",
                    entry->m_expectedValues.local(local), ".\n");
            }
            return 0;
        }
    }

    // 2) Check the stack height. The DFG JIT may require a taller stack than the
    //    baseline JIT, in some cases. If we can't grow the stack, then don't do
    //    OSR right now. That's the only option we have unless we want basic block
    //    boundaries to start throwing RangeErrors. Although that would be possible,
    //    it seems silly: you'd be diverting the program to error handling when it
    //    would have otherwise just kept running albeit less quickly.
    
    unsigned frameSizeForCheck = jitCode->common.requiredRegisterCountForExecutionAndExit();
    if (!vm->interpreter->stack().ensureCapacityFor(&exec->registers()[virtualRegisterForLocal(frameSizeForCheck - 1).offset()])) {
        if (Options::verboseOSR())
            dataLogF("    OSR failed because stack growth failed.\n");
        return 0;
    }
    
    if (Options::verboseOSR())
        dataLogF("    OSR should succeed.\n");

    // At this point we're committed to entering. We will do some work to set things up,
    // but we also rely on our caller recognizing that when we return a non-null pointer,
    // that means that we're already past the point of no return and we must succeed at
    // entering.
    
    // 3) Set up the data in the scratch buffer and perform data format conversions.

    unsigned frameSize = jitCode->common.frameRegisterCount;
    unsigned baselineFrameSize = entry->m_expectedValues.numberOfLocals();
    unsigned maxFrameSize = std::max(frameSize, baselineFrameSize);

    Register* scratch = bitwise_cast<Register*>(vm->scratchBufferForSize(sizeof(Register) * (2 + JSStack::CallFrameHeaderSize + maxFrameSize))->dataBuffer());
    
    *bitwise_cast<size_t*>(scratch + 0) = frameSize;
    
    void* targetPC = codeBlock->jitCode()->executableAddressAtOffset(entry->m_machineCodeOffset);
    if (Options::verboseOSR())
        dataLogF("    OSR using target PC %p.\n", targetPC);
    RELEASE_ASSERT(targetPC);
    *bitwise_cast<void**>(scratch + 1) = targetPC;
    
    Register* pivot = scratch + 2 + JSStack::CallFrameHeaderSize;
    
    for (int index = -JSStack::CallFrameHeaderSize; index < static_cast<int>(baselineFrameSize); ++index) {
        VirtualRegister reg(-1 - index);
        
        if (reg.isLocal()) {
            if (entry->m_localsForcedDouble.get(reg.toLocal())) {
                *bitwise_cast<double*>(pivot + index) = exec->registers()[reg.offset()].jsValue().asNumber();
                continue;
            }
            
            if (entry->m_localsForcedMachineInt.get(reg.toLocal())) {
                *bitwise_cast<int64_t*>(pivot + index) = exec->registers()[reg.offset()].jsValue().asMachineInt() << JSValue::int52ShiftAmount;
                continue;
            }
        }
        
        pivot[index] = exec->registers()[reg.offset()].jsValue();
    }
    
    // 4) Reshuffle those registers that need reshuffling.
    Vector<JSValue> temporaryLocals(entry->m_reshufflings.size());
    for (unsigned i = entry->m_reshufflings.size(); i--;)
        temporaryLocals[i] = pivot[VirtualRegister(entry->m_reshufflings[i].fromOffset).toLocal()].jsValue();
    for (unsigned i = entry->m_reshufflings.size(); i--;)
        pivot[VirtualRegister(entry->m_reshufflings[i].toOffset).toLocal()] = temporaryLocals[i];
    
    // 5) Clear those parts of the call frame that the DFG ain't using. This helps GC on
    //    some programs by eliminating some stale pointer pathologies.
    for (unsigned i = frameSize; i--;) {
        if (entry->m_machineStackUsed.get(i))
            continue;
        pivot[i] = JSValue();
    }
    
    // 6) Fix the call frame to have the right code block.
    
    *bitwise_cast<CodeBlock**>(pivot - 1 - JSStack::CodeBlock) = codeBlock;
    
    if (Options::verboseOSR())
        dataLogF("    OSR returning data buffer %p.\n", scratch);
    return scratch;
}
Exemplo n.º 22
0
extern "C" PyObject* PyTuple_New(Py_ssize_t size) noexcept {
    RELEASE_ASSERT(size >= 0, "");

    return BoxedTuple::create(size);
}
Exemplo n.º 23
0
void* JITCodeWithCodeRef::dataAddressAtOffset(size_t offset)
{
    RELEASE_ASSERT(m_ref);
    ASSERT(offset <= size()); // use <= instead of < because it is valid to ask for an address at the exclusive end of the code.
    return reinterpret_cast<char*>(m_ref.code().dataLocation()) + offset;
}
Exemplo n.º 24
0
void JSLock::setExclusiveThread(std::thread::id threadId)
{
    RELEASE_ASSERT(!m_lockCount && m_ownerThreadID == std::thread::id());
    m_hasExclusiveThread = (threadId != std::thread::id());
    m_ownerThreadID = threadId;
}
Exemplo n.º 25
0
bool JITCodeWithCodeRef::contains(void* address)
{
    RELEASE_ASSERT(m_ref);
    return m_ref.executableMemory()->contains(address);
}
Exemplo n.º 26
0
Arquivo: code.cpp Projeto: jmgc/pyston
extern "C" PyCodeObject* PyCode_New(int, int, int, int, PyObject*, PyObject*, PyObject*, PyObject*, PyObject*,
                                    PyObject*, PyObject*, PyObject*, int, PyObject*) noexcept {
    RELEASE_ASSERT(0, "not implemented");
}
Exemplo n.º 27
0
JITCode::CodePtr NativeJITCode::addressForCall(
    VM&, ExecutableBase*, ArityCheckMode, RegisterPreservationMode)
{
    RELEASE_ASSERT(!!m_ref);
    return m_ref.code();
}
Exemplo n.º 28
0
Arquivo: code.cpp Projeto: jmgc/pyston
 static Box* filename(Box* b, void*) {
     RELEASE_ASSERT(b->cls == code_cls, "");
     return boxString(static_cast<BoxedCode*>(b)->f->source->fn);
 }
Exemplo n.º 29
0
void Heap::collect(SweepToggle sweepToggle)
{
#if ENABLE(ALLOCATION_LOGGING)
    dataLogF("JSC GC starting collection.\n");
#endif
    
    double before = 0;
    if (Options::logGC()) {
        dataLog("[GC", sweepToggle == DoSweep ? " (eager sweep)" : "", ": ");
        before = currentTimeMS();
    }
    
    SamplingRegion samplingRegion("Garbage Collection");
    
    RELEASE_ASSERT(!m_deferralDepth);
    GCPHASE(Collect);
    ASSERT(vm()->currentThreadIsHoldingAPILock());
    RELEASE_ASSERT(vm()->identifierTable == wtfThreadData().currentIdentifierTable());
    ASSERT(m_isSafeToCollect);
    JAVASCRIPTCORE_GC_BEGIN();
    RELEASE_ASSERT(m_operationInProgress == NoOperation);
    
    m_deferralDepth++; // Make sure that we don't GC in this call.
    m_vm->prepareToDiscardCode();
    m_deferralDepth--; // Decrement deferal manually, so we don't GC when we do so, since we are already GCing!.
    
    m_operationInProgress = Collection;
    m_extraMemoryUsage = 0;

    if (m_activityCallback)
        m_activityCallback->willCollect();

    double lastGCStartTime = WTF::monotonicallyIncreasingTime();
    if (lastGCStartTime - m_lastCodeDiscardTime > minute) {
        deleteAllCompiledCode();
        m_lastCodeDiscardTime = WTF::monotonicallyIncreasingTime();
    }

    {
        GCPHASE(StopAllocation);
        m_objectSpace.stopAllocating();
    }

    markRoots();
    
    {
        GCPHASE(ReapingWeakHandles);
        m_objectSpace.reapWeakSets();
    }

    JAVASCRIPTCORE_GC_MARKED();
    
    {
        GCPHASE(SweepingArrayBuffers);
        m_arrayBuffers.sweep();
    }

    {
        m_blockSnapshot.resize(m_objectSpace.blocks().set().size());
        MarkedBlockSnapshotFunctor functor(m_blockSnapshot);
        m_objectSpace.forEachBlock(functor);
    }

    copyBackingStores();

    {
        GCPHASE(FinalizeUnconditionalFinalizers);
        finalizeUnconditionalFinalizers();
    }

    {
        GCPHASE(DeleteCodeBlocks);
        deleteUnmarkedCompiledCode();
    }

    {
        GCPHASE(DeleteSourceProviderCaches);
        m_vm->clearSourceProviderCaches();
    }

    if (sweepToggle == DoSweep) {
        SamplingRegion samplingRegion("Garbage Collection: Sweeping");
        GCPHASE(Sweeping);
        m_objectSpace.sweep();
        m_objectSpace.shrink();
    }

    m_sweeper->startSweeping(m_blockSnapshot);
    m_bytesAbandoned = 0;

    {
        GCPHASE(ResetAllocators);
        m_objectSpace.resetAllocators();
    }
    
    size_t currentHeapSize = sizeAfterCollect();
    if (Options::gcMaxHeapSize() && currentHeapSize > Options::gcMaxHeapSize())
        HeapStatistics::exitWithFailure();

    m_sizeAfterLastCollect = currentHeapSize;

    // To avoid pathological GC churn in very small and very large heaps, we set
    // the new allocation limit based on the current size of the heap, with a
    // fixed minimum.
    size_t maxHeapSize = max(minHeapSize(m_heapType, m_ramSize), proportionalHeapSize(currentHeapSize, m_ramSize));
    m_bytesAllocatedLimit = maxHeapSize - currentHeapSize;

    m_bytesAllocated = 0;
    double lastGCEndTime = WTF::monotonicallyIncreasingTime();
    m_lastGCLength = lastGCEndTime - lastGCStartTime;

    if (Options::recordGCPauseTimes())
        HeapStatistics::recordGCPauseTime(lastGCStartTime, lastGCEndTime);
    RELEASE_ASSERT(m_operationInProgress == Collection);

    m_operationInProgress = NoOperation;
    JAVASCRIPTCORE_GC_END();

    if (Options::useZombieMode())
        zombifyDeadObjects();

    if (Options::objectsAreImmortal())
        markDeadObjects();

    if (Options::showObjectStatistics())
        HeapStatistics::showObjectStatistics(this);
    
    if (Options::logGC()) {
        double after = currentTimeMS();
        dataLog(after - before, " ms, ", currentHeapSize / 1024, " kb]\n");
    }

#if ENABLE(ALLOCATION_LOGGING)
    dataLogF("JSC GC finishing collection.\n");
#endif
}
Exemplo n.º 30
0
const IDBDatabaseInfo& IDBResultData::databaseInfo() const
{
    RELEASE_ASSERT(m_databaseInfo);
    return *m_databaseInfo;
}