예제 #1
0
파일: jsarena.cpp 프로젝트: 3l13/APE_Server
JS_ArenaAllocate(JSArenaPool *pool, size_t nb)
{
    JSArena **ap, *a, *b;
    jsuword extra, hdrsz, gross;
    void *p;

    /*
     * Search pool from current forward till we find or make enough space.
     *
     * NB: subtract nb from a->limit in the loop condition, instead of adding
     * nb to a->avail, to avoid overflowing a 32-bit address space (possible
     * when running a 32-bit program on a 64-bit system where the kernel maps
     * the heap up against the top of the 32-bit address space).
     *
     * Thanks to Juergen Kreileder <*****@*****.**>, who brought this up in
     * https://bugzilla.mozilla.org/show_bug.cgi?id=279273.
     */
    JS_ASSERT((nb & pool->mask) == 0);
    for (a = pool->current; nb > a->limit || a->avail > a->limit - nb;
         pool->current = a) {
        ap = &a->next;
        if (!*ap) {
            /* Not enough space in pool, so we must malloc. */
            extra = (nb > pool->arenasize) ? HEADER_SIZE(pool) : 0;
            hdrsz = sizeof *a + extra + pool->mask;
            gross = hdrsz + JS_MAX(nb, pool->arenasize);
            if (gross < nb)
                return NULL;
            if (pool->quotap) {
                if (gross > *pool->quotap)
                    return NULL;
                b = (JSArena *) js_malloc(gross);
                if (!b)
                    return NULL;
                *pool->quotap -= gross;
            } else {
                b = (JSArena *) js_malloc(gross);
                if (!b)
                    return NULL;
            }

            b->next = NULL;
            b->limit = (jsuword)b + gross;
            JS_COUNT_ARENA(pool,++);
            COUNT(pool, nmallocs);

            /* If oversized, store ap in the header, just before a->base. */
            *ap = a = b;
            JS_ASSERT(gross <= JS_UPTRDIFF(a->limit, a));
            if (extra) {
                a->base = a->avail =
                    ((jsuword)a + hdrsz) & ~HEADER_BASE_MASK(pool);
                SET_HEADER(pool, a, ap);
            } else {
                a->base = a->avail = JS_ARENA_ALIGN(pool, a + 1);
            }
            continue;
        }
        a = *ap;                                /* move to next arena */
    }
예제 #2
0
int64_t
DSTOffsetCache::getDSTOffsetMilliseconds(int64_t localTimeMilliseconds, JSContext *cx)
{
    sanityCheck();

    int64_t localTimeSeconds = localTimeMilliseconds / MILLISECONDS_PER_SECOND;

    if (localTimeSeconds > MAX_UNIX_TIMET) {
        localTimeSeconds = MAX_UNIX_TIMET;
    } else if (localTimeSeconds < 0) {
        /* Go ahead a day to make localtime work (does not work with 0). */
        localTimeSeconds = SECONDS_PER_DAY;
    }

    /*
     * NB: Be aware of the initial range values when making changes to this
     *     code: the first call to this method, with those initial range
     *     values, must result in a cache miss.
     */

    if (rangeStartSeconds <= localTimeSeconds &&
        localTimeSeconds <= rangeEndSeconds) {
        return offsetMilliseconds;
    }

    if (oldRangeStartSeconds <= localTimeSeconds &&
        localTimeSeconds <= oldRangeEndSeconds) {
        return oldOffsetMilliseconds;
    }

    oldOffsetMilliseconds = offsetMilliseconds;
    oldRangeStartSeconds = rangeStartSeconds;
    oldRangeEndSeconds = rangeEndSeconds;

    if (rangeStartSeconds <= localTimeSeconds) {
        int64_t newEndSeconds = JS_MIN(rangeEndSeconds + RANGE_EXPANSION_AMOUNT, MAX_UNIX_TIMET);
        if (newEndSeconds >= localTimeSeconds) {
            int64_t endOffsetMilliseconds = computeDSTOffsetMilliseconds(newEndSeconds);
            if (endOffsetMilliseconds == offsetMilliseconds) {
                rangeEndSeconds = newEndSeconds;
                return offsetMilliseconds;
            }

            offsetMilliseconds = computeDSTOffsetMilliseconds(localTimeSeconds);
            if (offsetMilliseconds == endOffsetMilliseconds) {
                rangeStartSeconds = localTimeSeconds;
                rangeEndSeconds = newEndSeconds;
            } else {
                rangeEndSeconds = localTimeSeconds;
            }
            return offsetMilliseconds;
        }

        offsetMilliseconds = computeDSTOffsetMilliseconds(localTimeSeconds);
        rangeStartSeconds = rangeEndSeconds = localTimeSeconds;
        return offsetMilliseconds;
    }

    int64_t newStartSeconds = JS_MAX(rangeStartSeconds - RANGE_EXPANSION_AMOUNT, 0);
    if (newStartSeconds <= localTimeSeconds) {
        int64_t startOffsetMilliseconds = computeDSTOffsetMilliseconds(newStartSeconds);
        if (startOffsetMilliseconds == offsetMilliseconds) {
            rangeStartSeconds = newStartSeconds;
            return offsetMilliseconds;
        }

        offsetMilliseconds = computeDSTOffsetMilliseconds(localTimeSeconds);
        if (offsetMilliseconds == startOffsetMilliseconds) {
            rangeStartSeconds = newStartSeconds;
            rangeEndSeconds = localTimeSeconds;
        } else {
            rangeStartSeconds = localTimeSeconds;
        }
        return offsetMilliseconds;
    }

    rangeStartSeconds = rangeEndSeconds = localTimeSeconds;
    offsetMilliseconds = computeDSTOffsetMilliseconds(localTimeSeconds);
    return offsetMilliseconds;
}
예제 #3
0
int64_t
PRMJ_Now(void)
{
    static int nCalls = 0;
    long double lowresTime, highresTimerValue;
    FILETIME ft;
    LARGE_INTEGER now;
    JSBool calibrated = JS_FALSE;
    JSBool needsCalibration = JS_FALSE;
    int64_t returnedTime;
    long double cachedOffset = 0.0;

    /* To avoid regressing startup time (where high resolution is likely
       not needed), give the old behavior for the first few calls.
       This does not appear to be needed on Vista as the timeBegin/timeEndPeriod
       calls seem to immediately take effect. */
    int thiscall = JS_ATOMIC_INCREMENT(&nCalls);
    if (thiscall <= CALIBRATION_DELAY_COUNT) {
        LowResTime(&ft);
        return (FILETIME2INT64(ft)-win2un)/10L;
    }

    /* For non threadsafe platforms, NowInit is not necessary */
#ifdef JS_THREADSAFE
    PR_CallOnce(&calibrationOnce, NowInit);
#endif
    do {
        if (!calibration.calibrated || needsCalibration) {
            MUTEX_LOCK(&calibration.calibration_lock);
            MUTEX_LOCK(&calibration.data_lock);

            /* Recalibrate only if no one else did before us */
            if(calibration.offset == cachedOffset) {
                /* Since calibration can take a while, make any other
                   threads immediately wait */
                MUTEX_SETSPINCOUNT(&calibration.data_lock, 0);

                NowCalibrate();

                calibrated = JS_TRUE;

                /* Restore spin count */
                MUTEX_SETSPINCOUNT(&calibration.data_lock, DATALOCK_SPINCOUNT);
            }
            MUTEX_UNLOCK(&calibration.data_lock);
            MUTEX_UNLOCK(&calibration.calibration_lock);
        }


        /* Calculate a low resolution time */
        LowResTime(&ft);
        lowresTime = 0.1*(long double)(FILETIME2INT64(ft) - win2un);

        if (calibration.freq > 0.0) {
            long double highresTime, diff;

            DWORD timeAdjustment, timeIncrement;
            BOOL timeAdjustmentDisabled;

            /* Default to 15.625 ms if the syscall fails */
            long double skewThreshold = 15625.25;
            /* Grab high resolution time */
            QueryPerformanceCounter(&now);
            highresTimerValue = (long double)now.QuadPart;

            MUTEX_LOCK(&calibration.data_lock);
            highresTime = calibration.offset + PRMJ_USEC_PER_SEC*
                 (highresTimerValue-calibration.timer_offset)/calibration.freq;
            cachedOffset = calibration.offset;

            /* On some dual processor/core systems, we might get an earlier time
               so we cache the last time that we returned */
            calibration.last = JS_MAX(calibration.last, int64_t(highresTime));
            returnedTime = calibration.last;
            MUTEX_UNLOCK(&calibration.data_lock);

            /* Rather than assume the NT kernel ticks every 15.6ms, ask it */
            if (GetSystemTimeAdjustment(&timeAdjustment,
                                        &timeIncrement,
                                        &timeAdjustmentDisabled)) {
                if (timeAdjustmentDisabled) {
                    /* timeAdjustment is in units of 100ns */
                    skewThreshold = timeAdjustment/10.0;
                } else {
                    /* timeIncrement is in units of 100ns */
                    skewThreshold = timeIncrement/10.0;
                }
            }

            /* Check for clock skew */
            diff = lowresTime - highresTime;

            /* For some reason that I have not determined, the skew can be
               up to twice a kernel tick. This does not seem to happen by
               itself, but I have only seen it triggered by another program
               doing some kind of file I/O. The symptoms are a negative diff
               followed by an equally large positive diff. */
            if (fabs(diff) > 2*skewThreshold) {
                /*fprintf(stderr,"Clock skew detected (diff = %f)!\n", diff);*/

                if (calibrated) {
                    /* If we already calibrated once this instance, and the
                       clock is still skewed, then either the processor(s) are
                       wildly changing clockspeed or the system is so busy that
                       we get switched out for long periods of time. In either
                       case, it would be infeasible to make use of high
                       resolution results for anything, so let's resort to old
                       behavior for this call. It's possible that in the
                       future, the user will want the high resolution timer, so
                       we don't disable it entirely. */
                    returnedTime = int64_t(lowresTime);
                    needsCalibration = JS_FALSE;
                } else {
                    /* It is possible that when we recalibrate, we will return a
                       value less than what we have returned before; this is
                       unavoidable. We cannot tell the different between a
                       faulty QueryPerformanceCounter implementation and user
                       changes to the operating system time. Since we must
                       respect user changes to the operating system time, we
                       cannot maintain the invariant that Date.now() never
                       decreases; the old implementation has this behavior as
                       well. */
                    needsCalibration = JS_TRUE;
                }
            } else {
                /* No detectable clock skew */
                returnedTime = int64_t(highresTime);
                needsCalibration = JS_FALSE;
            }
        } else {
            /* No high resolution timer is available, so fall back */
            returnedTime = int64_t(lowresTime);
        }
    } while (needsCalibration);

    return returnedTime;
}
예제 #4
0
JSInt64
DSTOffsetCache::getDSTOffsetMilliseconds(JSInt64 localTimeMilliseconds, JSContext *cx)
{
    sanityCheck();
    noteOffsetCalculation();

    JSInt64 localTimeSeconds = localTimeMilliseconds / MILLISECONDS_PER_SECOND;

    if (localTimeSeconds > MAX_UNIX_TIMET) {
        localTimeSeconds = MAX_UNIX_TIMET;
    } else if (localTimeSeconds < 0) {
        /* Go ahead a day to make localtime work (does not work with 0). */
        localTimeSeconds = SECONDS_PER_DAY;
    }

    /*
     * NB: Be aware of the initial range values when making changes to this
     *     code: the first call to this method, with those initial range
     *     values, must result in a cache miss.
     */

    if (rangeStartSeconds <= localTimeSeconds &&
        localTimeSeconds <= rangeEndSeconds) {
        noteCacheHit();
        return offsetMilliseconds;
    }

    if (oldRangeStartSeconds <= localTimeSeconds &&
        localTimeSeconds <= oldRangeEndSeconds) {
        noteCacheHit();
        return oldOffsetMilliseconds;
    }

    oldOffsetMilliseconds = offsetMilliseconds;
    oldRangeStartSeconds = rangeStartSeconds;
    oldRangeEndSeconds = rangeEndSeconds;

    if (rangeStartSeconds <= localTimeSeconds) {
        JSInt64 newEndSeconds = JS_MIN(rangeEndSeconds + RANGE_EXPANSION_AMOUNT, MAX_UNIX_TIMET);
        if (newEndSeconds >= localTimeSeconds) {
            JSInt64 endOffsetMilliseconds = computeDSTOffsetMilliseconds(newEndSeconds);
            if (endOffsetMilliseconds == offsetMilliseconds) {
                noteCacheMissIncrease();
                rangeEndSeconds = newEndSeconds;
                return offsetMilliseconds;
            }

            offsetMilliseconds = computeDSTOffsetMilliseconds(localTimeSeconds);
            if (offsetMilliseconds == endOffsetMilliseconds) {
                noteCacheMissIncreasingOffsetChangeUpper();
                rangeStartSeconds = localTimeSeconds;
                rangeEndSeconds = newEndSeconds;
            } else {
                noteCacheMissIncreasingOffsetChangeExpand();
                rangeEndSeconds = localTimeSeconds;
            }
            return offsetMilliseconds;
        }

        noteCacheMissLargeIncrease();
        offsetMilliseconds = computeDSTOffsetMilliseconds(localTimeSeconds);
        rangeStartSeconds = rangeEndSeconds = localTimeSeconds;
        return offsetMilliseconds;
    }

    JSInt64 newStartSeconds = JS_MAX(rangeStartSeconds - RANGE_EXPANSION_AMOUNT, 0);
    if (newStartSeconds <= localTimeSeconds) {
        JSInt64 startOffsetMilliseconds = computeDSTOffsetMilliseconds(newStartSeconds);
        if (startOffsetMilliseconds == offsetMilliseconds) {
            noteCacheMissDecrease();
            rangeStartSeconds = newStartSeconds;
            return offsetMilliseconds;
        }

        offsetMilliseconds = computeDSTOffsetMilliseconds(localTimeSeconds);
        if (offsetMilliseconds == startOffsetMilliseconds) {
            noteCacheMissDecreasingOffsetChangeLower();
            rangeStartSeconds = newStartSeconds;
            rangeEndSeconds = localTimeSeconds;
        } else {
            noteCacheMissDecreasingOffsetChangeExpand();
            rangeStartSeconds = localTimeSeconds;
        }
        return offsetMilliseconds;
    }

    noteCacheMissLargeDecrease();
    rangeStartSeconds = rangeEndSeconds = localTimeSeconds;
    offsetMilliseconds = computeDSTOffsetMilliseconds(localTimeSeconds);
    return offsetMilliseconds;
}
예제 #5
0
파일: jsarena.c 프로젝트: Kitiara/UOX3
JS_ArenaAllocate(JSArenaPool *pool, size_t nb)
{
    JSArena **ap, **bp, *a, *b;
    jsuword extra, hdrsz, gross, sz;
    void *p;

    /*
     * Search pool from current forward till we find or make enough space.
     *
     * NB: subtract nb from a->limit in the loop condition, instead of adding
     * nb to a->avail, to avoid overflowing a 32-bit address space (possible
     * when running a 32-bit program on a 64-bit system where the kernel maps
     * the heap up against the top of the 32-bit address space).
     *
     * Thanks to Juergen Kreileder <*****@*****.**>, who brought this up in
     * https://bugzilla.mozilla.org/show_bug.cgi?id=279273.
     */
    JS_ASSERT((nb & pool->mask) == 0);
    for (a = pool->current; nb > a->limit || a->avail > a->limit - nb;
         pool->current = a) {
        ap = &a->next;
        if (!*ap) {
            /* Not enough space in pool -- try to reclaim a free arena. */
            extra = (nb > pool->arenasize) ? HEADER_SIZE(pool) : 0;
            hdrsz = sizeof *a + extra + pool->mask;
            gross = hdrsz + JS_MAX(nb, pool->arenasize);
            if (gross < nb)
                return NULL;

            bp = &arena_freelist;
            JS_ACQUIRE_LOCK(arena_freelist_lock);
            while ((b = *bp) != NULL) {
                /*
                 * Insist on exact arenasize match to avoid leaving alloc'able
                 * space after an oversized allocation as it grows.
                 */
                sz = JS_UPTRDIFF(b->limit, b);
                if (sz == gross) {
                    *bp = b->next;
                    JS_RELEASE_LOCK(arena_freelist_lock);
                    b->next = NULL;
                    COUNT(pool, nreclaims);
                    goto claim;
                }
                bp = &b->next;
            }

            /* Nothing big enough on the freelist, so we must malloc. */
            JS_RELEASE_LOCK(arena_freelist_lock);
            b = (JSArena *) malloc(gross);
            if (!b)
                return NULL;
            b->next = NULL;
            b->limit = (jsuword)b + gross;
            JS_COUNT_ARENA(pool,++);
            COUNT(pool, nmallocs);

        claim:
            /* If oversized, store ap in the header, just before a->base. */
            *ap = a = b;
            JS_ASSERT(gross <= JS_UPTRDIFF(a->limit, a));
            if (extra) {
                a->base = a->avail =
                    ((jsuword)a + hdrsz) & ~HEADER_BASE_MASK(pool);
                SET_HEADER(pool, a, ap);
            } else {
                a->base = a->avail = JS_ARENA_ALIGN(pool, a + 1);
            }
            continue;
        }
        a = *ap;                                /* move to next arena */
    }
예제 #6
0
파일: jsiter.cpp 프로젝트: ahadzi/celtx
/*
 * Called from the JSOP_GENERATOR case in the interpreter, with fp referring
 * to the frame by which the generator function was activated.  Create a new
 * JSGenerator object, which contains its own JSStackFrame that we populate
 * from *fp.  We know that upon return, the JSOP_GENERATOR opcode will return
 * from the activation in fp, so we can steal away fp->callobj and fp->argsobj
 * if they are non-null.
 */
JSObject *
js_NewGenerator(JSContext *cx, JSStackFrame *fp)
{
    JSObject *obj;
    uintN argc, nargs, nvars, nslots;
    JSGenerator *gen;
    jsval *slots;

    /* After the following return, failing control flow must goto bad. */
    obj = js_NewObject(cx, &js_GeneratorClass, NULL, NULL, 0);
    if (!obj)
        return NULL;

    /* Load and compute stack slot counts. */
    argc = fp->argc;
    nargs = JS_MAX(argc, fp->fun->nargs);
    nvars = fp->fun->u.i.nvars;
    nslots = 2 + nargs + fp->script->nslots;

    /* Allocate obj's private data struct. */
    gen = (JSGenerator *)
          JS_malloc(cx, sizeof(JSGenerator) + (nslots - 1) * sizeof(jsval));
    if (!gen)
        goto bad;

    gen->obj = obj;

    /* Steal away objects reflecting fp and point them at gen->frame. */
    gen->frame.callobj = fp->callobj;
    if (fp->callobj) {
        JS_SetPrivate(cx, fp->callobj, &gen->frame);
        fp->callobj = NULL;
    }
    gen->frame.argsobj = fp->argsobj;
    if (fp->argsobj) {
        JS_SetPrivate(cx, fp->argsobj, &gen->frame);
        fp->argsobj = NULL;
    }

    /* These two references can be shared with fp until it goes away. */
    gen->frame.varobj = fp->varobj;
    gen->frame.thisp = fp->thisp;

    /* Copy call-invariant script and function references. */
    gen->frame.script = fp->script;
    gen->frame.callee = fp->callee;
    gen->frame.fun = fp->fun;

    /* Use slots to carve space out of gen->slots. */
    slots = gen->slots;
    gen->arena.next = NULL;
    gen->arena.base = (jsuword) slots;
    gen->arena.limit = gen->arena.avail = (jsuword) (slots + nslots);

    /* Copy rval, argv and vars. */
    gen->frame.rval = fp->rval;
    memcpy(slots, fp->argv - 2, (2 + nargs) * sizeof(jsval));
    gen->frame.argc = nargs;
    gen->frame.argv = slots + 2;
    slots += 2 + nargs;
    memcpy(slots, fp->slots, fp->script->nfixed * sizeof(jsval));

    /* Initialize or copy virtual machine state. */
    gen->frame.down = NULL;
    gen->frame.annotation = NULL;
    gen->frame.scopeChain = fp->scopeChain;

    gen->frame.slots = slots;
    JS_ASSERT(StackBase(fp) == fp->regs->sp);
    gen->savedRegs.sp = slots + fp->script->nfixed;
    gen->savedRegs.pc = fp->regs->pc;
    gen->frame.regs = &gen->savedRegs;

    /* Copy remaining state (XXX sharp* and xml* should be local vars). */
    gen->frame.sharpDepth = 0;
    gen->frame.sharpArray = NULL;
    gen->frame.flags = (fp->flags & ~JSFRAME_ROOTED_ARGV) | JSFRAME_GENERATOR;
    gen->frame.dormantNext = NULL;
    gen->frame.xmlNamespace = NULL;
    gen->frame.blockChain = NULL;

    /* Note that gen is newborn. */
    gen->state = JSGEN_NEWBORN;

    if (!JS_SetPrivate(cx, obj, gen)) {
        JS_free(cx, gen);
        goto bad;
    }
    return obj;

  bad:
    cx->weakRoots.newborn[GCX_OBJECT] = NULL;
    return NULL;
}
예제 #7
0
JS_REQUIRES_STACK AbortableRecordingStatus
TraceRecorder::slurpDownFrames(jsbytecode* return_pc)
{
    /* Missing - no go */
    if (cx->fp->argc != cx->fp->fun->nargs)
        RETURN_STOP_A("argc != nargs");

    LIns* argv_ins;
    unsigned frameDepth;
    unsigned downPostSlots;

    JSStackFrame* fp = cx->fp;
    LIns* fp_ins = addName(lir->insLoad(LIR_ldp, cx_ins, offsetof(JSContext, fp)), "fp");

    /*
     * When first emitting slurp code, do so against the down frame. After
     * popping the interpreter frame, it is illegal to resume here, as the
     * down frame has been moved up. So all this code should be skipped if
     * anchoring off such an exit.
     */
    if (!anchor || anchor->exitType != RECURSIVE_SLURP_FAIL_EXIT) {
        fp_ins = addName(lir->insLoad(LIR_ldp, fp_ins, offsetof(JSStackFrame, down)), "downFp");
        fp = fp->down;

        argv_ins = addName(lir->insLoad(LIR_ldp, fp_ins, offsetof(JSStackFrame, argv)), "argv");

        /* If recovering from a SLURP_MISMATCH, all of this is unnecessary. */
        if (!anchor || anchor->exitType != RECURSIVE_SLURP_MISMATCH_EXIT) {
            /* fp->down should not be NULL. */
            guard(false, lir->ins_peq0(fp_ins), RECURSIVE_LOOP_EXIT);

            /* fp->down->argv should not be NULL. */
            guard(false, lir->ins_peq0(argv_ins), RECURSIVE_LOOP_EXIT);

            /*
             * Guard on the script being the same. This might seem unnecessary,
             * but it lets the recursive loop end cleanly if it doesn't match.
             * With only the pc check, it is harder to differentiate between
             * end-of-recursion and recursion-returns-to-different-pc.
             */
            guard(true,
                  lir->ins2(LIR_peq,
                            addName(lir->insLoad(LIR_ldp,
                                                 fp_ins,
                                                 offsetof(JSStackFrame, script)),
                                    "script"),
                            INS_CONSTPTR(cx->fp->down->script)),
                  RECURSIVE_LOOP_EXIT);
        }

        /* fp->down->regs->pc should be == pc. */
        guard(true,
              lir->ins2(LIR_peq,
                        lir->insLoad(LIR_ldp,
                                     addName(lir->insLoad(LIR_ldp, fp_ins, offsetof(JSStackFrame, regs)),
                                             "regs"),
                                     offsetof(JSFrameRegs, pc)),
                        INS_CONSTPTR(return_pc)),
              RECURSIVE_SLURP_MISMATCH_EXIT);

        /* fp->down->argc should be == argc. */
        guard(true,
              lir->ins2(LIR_eq,
                        addName(lir->insLoad(LIR_ld, fp_ins, offsetof(JSStackFrame, argc)),
                                "argc"),
                        INS_CONST(cx->fp->argc)),
              MISMATCH_EXIT);

        /* Pop the interpreter frame. */
        LIns* args[] = { lirbuf->state, cx_ins };
        guard(false, lir->ins_eq0(lir->insCall(&js_PopInterpFrame_ci, args)), MISMATCH_EXIT);

        /* Compute slots for the down frame. */
        downPostSlots = NativeStackSlots(cx, 1) - NativeStackSlots(cx, 0);
        frameDepth = 1;
    } else {
        /* Note: loading argv from fp, not fp->down. */
        argv_ins = addName(lir->insLoad(LIR_ldp, fp_ins, offsetof(JSStackFrame, argv)), "argv");

        /* Slots for this frame, minus the return value. */
        downPostSlots = NativeStackSlots(cx, 0) - 1;
        frameDepth = 0;
    }

    /*
     * This is a special exit used as a template for the stack-slurping code.
     * LeaveTree will ignore all but the final slot, which contains the return
     * value. The slurpSlot variable keeps track of the last slot that has been
     * unboxed, as to avoid re-unboxing when taking a SLURP_FAIL exit.
     */
    unsigned numGlobalSlots = tree->globalSlots->length();
    unsigned safeSlots = NativeStackSlots(cx, frameDepth) + 1 + numGlobalSlots;
    jsbytecode* recursive_pc = return_pc + JSOP_CALL_LENGTH;
    VMSideExit* exit = (VMSideExit*)
        traceMonitor->traceAlloc->alloc(sizeof(VMSideExit) + sizeof(TraceType) * safeSlots);
    memset(exit, 0, sizeof(VMSideExit));
    exit->pc = (jsbytecode*)recursive_pc;
    exit->from = fragment;
    exit->exitType = RECURSIVE_SLURP_FAIL_EXIT;
    exit->numStackSlots = downPostSlots + 1;
    exit->numGlobalSlots = numGlobalSlots;
    exit->sp_adj = ((downPostSlots + 1) * sizeof(double)) - tree->nativeStackBase;
    exit->recursive_pc = recursive_pc;

    /*
     * Build the exit typemap. This may capture extra types, but they are
     * thrown away.
     */
    TraceType* typeMap = exit->stackTypeMap();
    jsbytecode* oldpc = cx->fp->regs->pc;
    cx->fp->regs->pc = exit->pc;
    CaptureStackTypes(cx, frameDepth, typeMap);
    cx->fp->regs->pc = oldpc;
    if (!anchor || anchor->exitType != RECURSIVE_SLURP_FAIL_EXIT)
        typeMap[downPostSlots] = determineSlotType(&stackval(-1));
    else
        typeMap[downPostSlots] = anchor->stackTypeMap()[anchor->numStackSlots - 1];
    determineGlobalTypes(&typeMap[exit->numStackSlots]);
#if defined JS_JIT_SPEW
    TreevisLogExit(cx, exit);
#endif

    /*
     * Return values are tricky because there are two cases. Anchoring off a
     * slurp failure (the second case) means the return value has already been
     * moved. However it can still be promoted to link trees together, so we
     * load it from the new location.
     *
     * In all other cases, the return value lives in the tracker and it can be
     * grabbed safely.
     */
    LIns* rval_ins;
    TraceType returnType = exit->stackTypeMap()[downPostSlots];
    if (!anchor || anchor->exitType != RECURSIVE_SLURP_FAIL_EXIT) {
        rval_ins = get(&stackval(-1));
        if (returnType == TT_INT32) {
            JS_ASSERT(determineSlotType(&stackval(-1)) == TT_INT32);
            JS_ASSERT(isPromoteInt(rval_ins));
            rval_ins = demote(lir, rval_ins);
        }
        /*
         * The return value must be written out early, before slurping can fail,
         * otherwise it will not be available when there's a type mismatch.
         */
        lir->insStorei(rval_ins, lirbuf->sp, exit->sp_adj - sizeof(double));
    } else {
        switch (returnType)
        {
          case TT_PSEUDOBOOLEAN:
          case TT_INT32:
            rval_ins = lir->insLoad(LIR_ld, lirbuf->sp, exit->sp_adj - sizeof(double));
            break;
          case TT_DOUBLE:
            rval_ins = lir->insLoad(LIR_ldf, lirbuf->sp, exit->sp_adj - sizeof(double));
            break;
          case TT_FUNCTION:
          case TT_OBJECT:
          case TT_STRING:
          case TT_NULL:
            rval_ins = lir->insLoad(LIR_ldp, lirbuf->sp, exit->sp_adj - sizeof(double));
            break;
          default:
            JS_NOT_REACHED("unknown type");
            RETURN_STOP_A("unknown type"); 
        }
    }

    /* Slurp */
    SlurpInfo info;
    info.curSlot = 0;
    info.exit = exit;
    info.typeMap = typeMap;
    info.slurpFailSlot = (anchor && anchor->exitType == RECURSIVE_SLURP_FAIL_EXIT) ?
                         anchor->slurpFailSlot : 0;

    /* callee */
    slurpSlot(lir->insLoad(LIR_ldp, argv_ins, -2 * ptrdiff_t(sizeof(jsval))),
              &fp->argv[-2],
              &info);
    /* this */
    slurpSlot(lir->insLoad(LIR_ldp, argv_ins, -1 * ptrdiff_t(sizeof(jsval))),
              &fp->argv[-1],
              &info);
    /* args[0..n] */
    for (unsigned i = 0; i < JS_MAX(fp->argc, fp->fun->nargs); i++)
        slurpSlot(lir->insLoad(LIR_ldp, argv_ins, i * sizeof(jsval)), &fp->argv[i], &info);
    /* argsobj */
    slurpSlot(addName(lir->insLoad(LIR_ldp, fp_ins, offsetof(JSStackFrame, argsobj)), "argsobj"),
              &fp->argsobj,
              &info);
    /* scopeChain */
    slurpSlot(addName(lir->insLoad(LIR_ldp, fp_ins, offsetof(JSStackFrame, scopeChain)), "scopeChain"),
              (jsval*) &fp->scopeChain,
              &info);
    /* vars */
    LIns* slots_ins = addName(lir->insLoad(LIR_ldp, fp_ins, offsetof(JSStackFrame, slots)),
                              "slots");
    for (unsigned i = 0; i < fp->script->nfixed; i++)
        slurpSlot(lir->insLoad(LIR_ldp, slots_ins, i * sizeof(jsval)), &fp->slots[i], &info);
    /* stack vals */
    unsigned nfixed = fp->script->nfixed;
    jsval* stack = StackBase(fp);
    LIns* stack_ins = addName(lir->ins2(LIR_piadd,
                                        slots_ins,
                                        INS_CONSTWORD(nfixed * sizeof(jsval))),
                              "stackBase");
    size_t limit = size_t(fp->regs->sp - StackBase(fp));
    if (anchor && anchor->exitType == RECURSIVE_SLURP_FAIL_EXIT)
        limit--;
    else
        limit -= fp->fun->nargs + 2;
    for (size_t i = 0; i < limit; i++)
        slurpSlot(lir->insLoad(LIR_ldp, stack_ins, i * sizeof(jsval)), &stack[i], &info);

    JS_ASSERT(info.curSlot == downPostSlots);

    /* Jump back to the start */
    exit = copy(exit);
    exit->exitType = UNSTABLE_LOOP_EXIT;
#if defined JS_JIT_SPEW
    TreevisLogExit(cx, exit);
#endif

    RecursiveSlotMap slotMap(*this, downPostSlots, rval_ins);
    for (unsigned i = 0; i < downPostSlots; i++)
        slotMap.addSlot(typeMap[i]);
    slotMap.addSlot(&stackval(-1), typeMap[downPostSlots]);
    VisitGlobalSlots(slotMap, cx, *tree->globalSlots);
    debug_only_print0(LC_TMTracer, "Compiling up-recursive slurp...\n");
    exit = copy(exit);
    if (exit->recursive_pc == fragment->root->ip)
        exit->exitType = UNSTABLE_LOOP_EXIT;
    else
        exit->exitType = RECURSIVE_UNLINKED_EXIT;
    debug_only_printf(LC_TMTreeVis, "TREEVIS CHANGEEXIT EXIT=%p TYPE=%s\n", (void*)exit,
                      getExitName(exit->exitType));
    JS_ASSERT(tree->recursion >= Recursion_Unwinds);
    return closeLoop(slotMap, exit);
}