static void
FilterSetter(JSContext *cx, JSObject *wrapper, jsid id, js::PropertyDescriptor *desc)
{
    JSErrorReporter reporter = JS_SetErrorReporter(cx, NULL);
    Permission perm = DenyAccess;
    bool setAllowed = Policy::check(cx, wrapper, id, Wrapper::SET, perm);
    JS_ASSERT_IF(setAllowed, perm != DenyAccess);
    if (!setAllowed || JS_IsExceptionPending(cx)) {
        // On branch, we don't have a good way to differentiate between exceptions
        // we want to throw and exceptions we want to squash. Squash them all.
        JS_ClearPendingException(cx);
        desc->setter = nullptr;
    }
    JS_SetErrorReporter(cx, reporter);
}
Exemple #2
0
/* void push (in JSContext cx); */
NS_IMETHODIMP
XPCJSContextStack::Push(JSContext * cx)
{
    JS_ASSERT_IF(cx, JS_GetContextThread(cx));
    if(!mStack.AppendElement(cx))
        return NS_ERROR_OUT_OF_MEMORY;
    if(mStack.Length() > 1)
    {
        XPCJSContextInfo & e = mStack[mStack.Length() - 2];
        if(e.cx)
        {
            if(e.cx == cx)
            {
                nsIScriptSecurityManager* ssm = XPCWrapper::GetSecurityManager();
                if(ssm)
                {
                    nsIPrincipal* globalObjectPrincipal =
                        GetPrincipalFromCx(cx);
                    if(globalObjectPrincipal)
                    {
                        nsIPrincipal* subjectPrincipal = ssm->GetCxSubjectPrincipal(cx);
                        PRBool equals = PR_FALSE;
                        globalObjectPrincipal->Equals(subjectPrincipal, &equals);
                        if(equals)
                        {
                            return NS_OK;
                        }
                    }
                }
            }

            {
                // Push() can be called outside any request for e.cx.
                JSAutoRequest ar(e.cx);
                e.frame = JS_SaveFrameChain(e.cx);
            }

            if(!cx)
                e.suspendDepth = JS_SuspendRequest(e.cx);
        }
    }
    return NS_OK;
}
Exemple #3
0
static void *
MapPages(void *addr, size_t size)
{
    vm_address_t p;
    int flags;
    if (addr) {
        p = (vm_address_t) addr;
        flags = 0;
    } else {
        flags = VM_FLAGS_ANYWHERE;
    }

    kern_return_t err = vm_allocate((vm_map_t) mach_task_self(),
                                    &p, (vm_size_t) size, flags);
    if (err != KERN_SUCCESS)
        return NULL;

    JS_ASSERT(p);
    JS_ASSERT_IF(addr, p == (vm_address_t) addr);
    return (void *) p;
}
Exemple #4
0
static void
generator_trace(JSTracer *trc, JSObject *obj)
{
    JSGenerator *gen;

    gen = (JSGenerator *) JS_GetPrivate(trc->context, obj);
    if (!gen)
        return;

    /*
     * js_TraceStackFrame does not recursively trace the down-linked frame
     * chain, so we insist that gen->frame has no parent to trace when the
     * generator is not running.
     */
    JS_ASSERT_IF(gen->state != JSGEN_RUNNING && gen->state != JSGEN_CLOSING,
                 !gen->frame.down);

    /*
     * FIXME be 390950. Generator's frame is a part of the JS stack when the
     * generator is running or closing. Thus tracing the frame in this case
     * here duplicates the work done in js_TraceContext.
     */
    js_TraceStackFrame(trc, &gen->frame);
}
Exemple #5
0
JSAtom *
js_AtomizeString(JSContext *cx, JSString *str, uintN flags)
{
    jsval v;
    JSAtomState *state;
    JSDHashTable *table;
    JSAtomHashEntry *entry;
    JSString *key;
    uint32 gen;

    JS_ASSERT(!(flags & ~(ATOM_PINNED|ATOM_INTERNED|ATOM_TMPSTR|ATOM_NOCOPY)));
    JS_ASSERT_IF(flags & ATOM_NOCOPY, flags & ATOM_TMPSTR);

    if (str->isAtomized())
        return (JSAtom *) STRING_TO_JSVAL(str);

    size_t length = str->length();
    if (length == 1) {
        jschar c = str->chars()[0];
        if (c < UNIT_STRING_LIMIT)
            return (JSAtom *) STRING_TO_JSVAL(JSString::unitString(c));
    }

    /*
     * Here we know that JSString::intStringTable covers only 256 (or at least
     * not 1000 or more) chars. We rely on order here to resolve the unit vs.
     * int string atom identity issue by giving priority to unit strings for
     * '0' through '9' (see JSString::intString in jsstrinlines.h).
     */
    JS_STATIC_ASSERT(INT_STRING_LIMIT <= 999);
    if (2 <= length && length <= 3) {
        const jschar *chars = str->chars();

        if ('1' <= chars[0] && chars[0] <= '9' &&
            '0' <= chars[1] && chars[1] <= '9' &&
            (length == 2 || ('0' <= chars[2] && chars[2] <= '9'))) {
            jsint i = (chars[0] - '0') * 10 + chars[1] - '0';

            if (length == 3)
                i = i * 10 + chars[2] - '0'; 
            if (jsuint(i) < INT_STRING_LIMIT)
                return (JSAtom *) STRING_TO_JSVAL(JSString::intString(i));
        }
    }

    state = &cx->runtime->atomState;
    table = &state->stringAtoms;

    JS_LOCK(cx, &state->lock);
    entry = TO_ATOM_ENTRY(JS_DHashTableOperate(table, str, JS_DHASH_ADD));
    if (!entry)
        goto failed_hash_add;
    if (entry->keyAndFlags != 0) {
        key = (JSString *)ATOM_ENTRY_KEY(entry);
    } else {
        /*
         * We created a new hashtable entry. Unless str is already allocated
         * from the GC heap and flat, we have to release state->lock as
         * string construction is a complex operation. For example, it can
         * trigger GC which may rehash the table and make the entry invalid.
         */
        ++table->generation;
        if (!(flags & ATOM_TMPSTR) && str->isFlat()) {
            str->flatClearMutable();
            key = str;
        } else {
            gen = table->generation;
            JS_UNLOCK(cx, &state->lock);

            if (flags & ATOM_TMPSTR) {
                if (flags & ATOM_NOCOPY) {
                    key = js_NewString(cx, str->flatChars(), str->flatLength());
                    if (!key)
                        return NULL;

                    /* Finish handing off chars to the GC'ed key string. */
                    str->mChars = NULL;
                } else {
                    key = js_NewStringCopyN(cx, str->flatChars(), str->flatLength());
                    if (!key)
                        return NULL;
                }
            } else {
                JS_ASSERT(str->isDependent());
                if (!js_UndependString(cx, str))
                    return NULL;
                key = str;
            }

            JS_LOCK(cx, &state->lock);
            if (table->generation == gen) {
                JS_ASSERT(entry->keyAndFlags == 0);
            } else {
                entry = TO_ATOM_ENTRY(JS_DHashTableOperate(table, key,
                                                           JS_DHASH_ADD));
                if (!entry)
                    goto failed_hash_add;
                if (entry->keyAndFlags != 0) {
                    key = (JSString *)ATOM_ENTRY_KEY(entry);
                    goto finish;
                }
                ++table->generation;
            }
        }
        INIT_ATOM_ENTRY(entry, key);
        key->flatSetAtomized();
    }

  finish:
    ADD_ATOM_ENTRY_FLAGS(entry, flags & (ATOM_PINNED | ATOM_INTERNED));
    JS_ASSERT(key->isAtomized());
    v = STRING_TO_JSVAL(key);
    cx->weakRoots.lastAtom = v;
    JS_UNLOCK(cx, &state->lock);
    return (JSAtom *)v;

  failed_hash_add:
    JS_UNLOCK(cx, &state->lock);
    JS_ReportOutOfMemory(cx);
    return NULL;
}
void
ForkJoinNursery::pjsCollection(int op)
{
    JS_ASSERT((op & Collect) != (op & Evacuate));

    bool evacuate = op & Evacuate;
    bool recreate = op & Recreate;

    JS_ASSERT(!isEvacuating_);
    JS_ASSERT(!evacuationZone_);
    JS_ASSERT(!head_);
    JS_ASSERT(tail_ == &head_);

    JSRuntime *const rt = shared_->runtime();
    const unsigned currentNumActiveChunks_ = numActiveChunks_;
    const char *msg = "";

    JS_ASSERT(!rt->needsBarrier());

    TIME_START(pjsCollection);

    rt->incFJMinorCollecting();
    if (evacuate) {
        isEvacuating_ = true;
        evacuationZone_ = shared_->zone();
    }

    flip();
    if (recreate) {
        initNewspace();
        // newspace must be at least as large as fromSpace
        numActiveChunks_ = currentNumActiveChunks_;
    }
    ForkJoinNurseryCollectionTracer trc(rt, this);
    forwardFromRoots(&trc);
    collectToFixedPoint(&trc);
#ifdef JS_ION
    jit::UpdateJitActivationsForMinorGC(TlsPerThreadData.get(), &trc);
#endif
    freeFromspace();

    size_t live = movedSize_;
    computeNurserySizeAfterGC(live, &msg);

    sweepHugeSlots();
    JS_ASSERT(hugeSlots[hugeSlotsFrom].empty());
    JS_ASSERT_IF(isEvacuating_, hugeSlots[hugeSlotsNew].empty());

    isEvacuating_ = false;
    evacuationZone_ = nullptr;
    head_ = nullptr;
    tail_ = &head_;
    movedSize_ = 0;

    rt->decFJMinorCollecting();

    TIME_END(pjsCollection);

    // Note, the spew is awk-friendly, non-underlined words serve as markers:
    //   FJGC _tag_ us _value_ copied _value_ size _value_ _message-word_ ...
    shared_->spewGC("FJGC %s us %5" PRId64 "  copied %7" PRIu64 "  size %" PRIu64 "  %s",
                    (evacuate ? "evacuate " : "collect  "),
                    TIME_TOTAL(pjsCollection),
                    (uint64_t)live,
                    (uint64_t)numActiveChunks_*1024*1024,
                    msg);
}