void ImmutableSync::syncCopy(FrameEntry *fe) { JS_ASSERT(fe >= bottom); FrameEntry *backing = fe->copyOf(); SyncEntry &e = entryFor(backing); JS_ASSERT(!backing->isConstant()); Address addr = frame.addressOf(fe); if (fe->isTypeKnown() && !e.learnedType) { e.learnedType = true; e.type = fe->getKnownType(); } if (!fe->data.synced()) masm->storePayload(ensureDataReg(backing, e), addr); if (!fe->type.synced()) { if (e.learnedType) masm->storeTypeTag(ImmType(e.type), addr); else masm->storeTypeTag(ensureTypeReg(backing, e), addr); } }
void FrameState::pushCopyOf(uint32 index) { FrameEntry *backing = entryFor(index); FrameEntry *fe = rawPush(); fe->resetUnsynced(); if (backing->isConstant()) { fe->setConstant(Jsvalify(backing->getValue())); } else { if (backing->isTypeKnown()) fe->setType(backing->getKnownType()); else fe->type.invalidate(); fe->isNumber = backing->isNumber; fe->data.invalidate(); if (backing->isCopy()) { backing = backing->copyOf(); fe->setCopyOf(backing); } else { fe->setCopyOf(backing); backing->setCopied(); } /* Maintain tracker ordering guarantees for copies. */ JS_ASSERT(backing->isCopied()); if (fe->trackerIndex() < backing->trackerIndex()) swapInTracker(fe, backing); } }
void FrameState::syncAndKill(Registers kill, Uses uses, Uses ignore) { /* Backwards, so we can allocate registers to backing slots better. */ FrameEntry *tos = tosFe(); FrameEntry *bottom = tos - uses.nuses; tos -= ignore.nuses; if (inTryBlock) bottom = NULL; for (uint32 i = tracker.nentries - 1; i < tracker.nentries; i--) { FrameEntry *fe = tracker[i]; if (fe >= tos) continue; Address address = addressOf(fe); FrameEntry *backing = fe; if (fe->isCopy()) { if (!inTryBlock && fe < bottom) continue; backing = fe->copyOf(); } JS_ASSERT_IF(i == 0, !fe->isCopy()); bool killData = fe->data.inRegister() && kill.hasReg(fe->data.reg()); if (!fe->data.synced() && (killData || fe >= bottom)) { if (backing != fe && backing->data.inMemory()) tempRegForData(backing); syncData(backing, address, masm); fe->data.sync(); if (fe->isConstant() && !fe->type.synced()) fe->type.sync(); } if (killData) { JS_ASSERT(backing == fe); JS_ASSERT(fe->data.synced()); if (regstate[fe->data.reg()].fe) forgetReg(fe->data.reg()); fe->data.setMemory(); } bool killType = fe->type.inRegister() && kill.hasReg(fe->type.reg()); if (!fe->type.synced() && (killType || fe >= bottom)) { if (backing != fe && backing->type.inMemory()) tempRegForType(backing); syncType(backing, address, masm); fe->type.sync(); } if (killType) { JS_ASSERT(backing == fe); JS_ASSERT(fe->type.synced()); if (regstate[fe->type.reg()].fe) forgetReg(fe->type.reg()); fe->type.setMemory(); } } }
CompileStatus mjit::Compiler::compileArrayWithLength(uint32_t argc) { /* Match Array() or Array(n) for constant n. */ JS_ASSERT(argc == 0 || argc == 1); int32_t length = 0; if (argc == 1) { FrameEntry *arg = frame.peek(-1); if (!arg->isConstant() || !arg->getValue().isInt32()) return Compile_InlineAbort; length = arg->getValue().toInt32(); if (length < 0) return Compile_InlineAbort; } RootedScript script(cx, script_); types::TypeObject *type = types::TypeScript::InitObject(cx, script, PC, JSProto_Array); if (!type) return Compile_Error; JSObject *templateObject = NewDenseUnallocatedArray(cx, length, type->proto); if (!templateObject) return Compile_Error; templateObject->setType(type); RegisterID result = frame.allocReg(); Jump emptyFreeList = getNewObject(cx, result, templateObject); stubcc.linkExit(emptyFreeList, Uses(0)); stubcc.leave(); stubcc.masm.move(Imm32(argc), Registers::ArgReg1); OOL_STUBCALL(stubs::SlowCall, REJOIN_FALLTHROUGH); frame.popn(argc + 2); frame.pushTypedPayload(JSVAL_TYPE_OBJECT, result); stubcc.rejoin(Changes(1)); return Compile_Okay; }
CompileStatus mjit::Compiler::inlineNativeFunction(uint32_t argc, bool callingNew) { if (!cx->typeInferenceEnabled()) return Compile_InlineAbort; if (applyTricks == LazyArgsObj) return Compile_InlineAbort; FrameEntry *origCallee = frame.peek(-((int)argc + 2)); FrameEntry *thisValue = frame.peek(-((int)argc + 1)); types::TypeSet *thisTypes = analysis->poppedTypes(PC, argc); if (!origCallee->isConstant() || !origCallee->isType(JSVAL_TYPE_OBJECT)) return Compile_InlineAbort; JSObject *callee = &origCallee->getValue().toObject(); if (!callee->isFunction()) return Compile_InlineAbort; /* * The callee must have the same parent as the script's global, otherwise * inference may not have accounted for any side effects correctly. */ if (!globalObj || globalObj != &callee->global()) return Compile_InlineAbort; Native native = callee->toFunction()->maybeNative(); if (!native) return Compile_InlineAbort; JSValueType type = knownPushedType(0); JSValueType thisType = thisValue->isTypeKnown() ? thisValue->getKnownType() : JSVAL_TYPE_UNKNOWN; /* * Note: when adding new natives which operate on properties, add relevant * constraint generation to the behavior of TypeConstraintCall. */ /* Handle natives that can be called either with or without 'new'. */ if (native == js_Array && type == JSVAL_TYPE_OBJECT && globalObj) { if (argc == 0 || argc == 1) return compileArrayWithLength(argc); return compileArrayWithArgs(argc); } /* Remaining natives must not be called with 'new'. */ if (callingNew) return Compile_InlineAbort; if (native == js::num_parseInt && argc >= 1) { FrameEntry *arg = frame.peek(-(int32_t)argc); JSValueType argType = arg->isTypeKnown() ? arg->getKnownType() : JSVAL_TYPE_UNKNOWN; if ((argType == JSVAL_TYPE_DOUBLE || argType == JSVAL_TYPE_INT32) && type == JSVAL_TYPE_INT32) { return compileParseInt(argType, argc); } } if (argc == 0) { if ((native == js::array_pop || native == js::array_shift) && thisType == JSVAL_TYPE_OBJECT) { /* * Only handle pop/shift on dense arrays which have never been used * in an iterator --- when popping elements we don't account for * suppressing deleted properties in active iterators. * * Constraints propagating properties directly into the result * type set are generated by TypeConstraintCall during inference. */ if (!thisTypes->hasObjectFlags(cx, types::OBJECT_FLAG_NON_DENSE_ARRAY | types::OBJECT_FLAG_ITERATED) && !types::ArrayPrototypeHasIndexedProperty(cx, outerScript)) { bool packed = !thisTypes->hasObjectFlags(cx, types::OBJECT_FLAG_NON_PACKED_ARRAY); return compileArrayPopShift(thisValue, packed, native == js::array_pop); } } } else if (argc == 1) { FrameEntry *arg = frame.peek(-1); types::TypeSet *argTypes = frame.extra(arg).types; if (!argTypes) return Compile_InlineAbort; JSValueType argType = arg->isTypeKnown() ? arg->getKnownType() : JSVAL_TYPE_UNKNOWN; if (native == js_math_abs) { if (argType == JSVAL_TYPE_INT32 && type == JSVAL_TYPE_INT32) return compileMathAbsInt(arg); if (argType == JSVAL_TYPE_DOUBLE && type == JSVAL_TYPE_DOUBLE) return compileMathAbsDouble(arg); } if (native == js_math_floor && argType == JSVAL_TYPE_DOUBLE && type == JSVAL_TYPE_INT32) { return compileRound(arg, Floor); } if (native == js_math_round && argType == JSVAL_TYPE_DOUBLE && type == JSVAL_TYPE_INT32) { return compileRound(arg, Round); } if (native == js_math_sqrt && type == JSVAL_TYPE_DOUBLE && masm.supportsFloatingPointSqrt() && (argType == JSVAL_TYPE_INT32 || argType == JSVAL_TYPE_DOUBLE)) { return compileMathSqrt(arg); } if (native == js_str_charCodeAt && argType == JSVAL_TYPE_INT32 && thisType == JSVAL_TYPE_STRING && type == JSVAL_TYPE_INT32) { return compileGetChar(thisValue, arg, GetCharCode); } if (native == js_str_charAt && argType == JSVAL_TYPE_INT32 && thisType == JSVAL_TYPE_STRING && type == JSVAL_TYPE_STRING) { return compileGetChar(thisValue, arg, GetChar); } if (native == js::str_fromCharCode && argType == JSVAL_TYPE_INT32 && type == JSVAL_TYPE_STRING) { return compileStringFromCode(arg); } if (native == js::array_push && thisType == JSVAL_TYPE_OBJECT && type == JSVAL_TYPE_INT32) { /* * Constraints propagating properties into the 'this' object are * generated by TypeConstraintCall during inference. */ if (!thisTypes->hasObjectFlags(cx, types::OBJECT_FLAG_NON_DENSE_ARRAY) && !types::ArrayPrototypeHasIndexedProperty(cx, outerScript)) { return compileArrayPush(thisValue, arg); } } if (native == js::array_concat && argType == JSVAL_TYPE_OBJECT && thisType == JSVAL_TYPE_OBJECT && type == JSVAL_TYPE_OBJECT && !thisTypes->hasObjectFlags(cx, types::OBJECT_FLAG_NON_DENSE_ARRAY) && !argTypes->hasObjectFlags(cx, types::OBJECT_FLAG_NON_DENSE_ARRAY)) { return compileArrayConcat(thisTypes, argTypes, thisValue, arg); } } else if (argc == 2) { FrameEntry *arg1 = frame.peek(-2); FrameEntry *arg2 = frame.peek(-1); JSValueType arg1Type = arg1->isTypeKnown() ? arg1->getKnownType() : JSVAL_TYPE_UNKNOWN; JSValueType arg2Type = arg2->isTypeKnown() ? arg2->getKnownType() : JSVAL_TYPE_UNKNOWN; if (native == js_math_pow && type == JSVAL_TYPE_DOUBLE && masm.supportsFloatingPointSqrt() && (arg1Type == JSVAL_TYPE_DOUBLE || arg1Type == JSVAL_TYPE_INT32) && arg2Type == JSVAL_TYPE_DOUBLE && arg2->isConstant()) { Value arg2Value = arg2->getValue(); if (arg2Value.toDouble() == -0.5 || arg2Value.toDouble() == 0.5) return compileMathPowSimple(arg1, arg2); } if ((native == js_math_min || native == js_math_max)) { if (arg1Type == JSVAL_TYPE_INT32 && arg2Type == JSVAL_TYPE_INT32 && type == JSVAL_TYPE_INT32) { return compileMathMinMaxInt(arg1, arg2, native == js_math_min ? Assembler::LessThan : Assembler::GreaterThan); } if ((arg1Type == JSVAL_TYPE_INT32 || arg1Type == JSVAL_TYPE_DOUBLE) && (arg2Type == JSVAL_TYPE_INT32 || arg2Type == JSVAL_TYPE_DOUBLE) && type == JSVAL_TYPE_DOUBLE) { return compileMathMinMaxDouble(arg1, arg2, (native == js_math_min) ? Assembler::DoubleLessThan : Assembler::DoubleGreaterThan); } } } return Compile_InlineAbort; }
CompileStatus mjit::Compiler::compileParseInt(JSValueType argType, uint32_t argc) { bool needStubCall = false; if (argc > 1) { FrameEntry *arg = frame.peek(-(int32_t)argc + 1); if (!arg->isTypeKnown() || arg->getKnownType() != JSVAL_TYPE_INT32) return Compile_InlineAbort; if (arg->isConstant()) { int32_t base = arg->getValue().toInt32(); if (base != 0 && base != 10) return Compile_InlineAbort; } else { RegisterID baseReg = frame.tempRegForData(arg); needStubCall = true; Jump isTen = masm.branch32(Assembler::Equal, baseReg, Imm32(10)); Jump isNotZero = masm.branch32(Assembler::NotEqual, baseReg, Imm32(0)); stubcc.linkExit(isNotZero, Uses(2 + argc)); isTen.linkTo(masm.label(), &masm); } } if (argType == JSVAL_TYPE_INT32) { if (needStubCall) { stubcc.leave(); stubcc.masm.move(Imm32(argc), Registers::ArgReg1); OOL_STUBCALL(stubs::SlowCall, REJOIN_FALLTHROUGH); } /* * Stack looks like callee, this, arg1, arg2, argN. * First pop all args other than arg1. */ frame.popn(argc - 1); /* "Shimmy" arg1 to the callee slot and pop this + arg1. */ frame.shimmy(2); if (needStubCall) { stubcc.rejoin(Changes(1)); } } else { FrameEntry *arg = frame.peek(-(int32_t)argc); FPRegisterID fpScratchReg = frame.allocFPReg(); FPRegisterID fpReg; bool allocate; DebugOnly<MaybeJump> notNumber = loadDouble(arg, &fpReg, &allocate); JS_ASSERT(!((MaybeJump)notNumber).isSet()); masm.slowLoadConstantDouble(1, fpScratchReg); /* Slow path for NaN and numbers < 1. */ Jump lessThanOneOrNan = masm.branchDouble(Assembler::DoubleLessThanOrUnordered, fpReg, fpScratchReg); stubcc.linkExit(lessThanOneOrNan, Uses(2 + argc)); frame.freeReg(fpScratchReg); /* Truncate to integer, slow path if this overflows. */ RegisterID reg = frame.allocReg(); Jump overflow = masm.branchTruncateDoubleToInt32(fpReg, reg); stubcc.linkExit(overflow, Uses(2 + argc)); if (allocate) frame.freeReg(fpReg); stubcc.leave(); stubcc.masm.move(Imm32(argc), Registers::ArgReg1); OOL_STUBCALL(stubs::SlowCall, REJOIN_FALLTHROUGH); frame.popn(2 + argc); frame.pushTypedPayload(JSVAL_TYPE_INT32, reg); stubcc.rejoin(Changes(1)); } return Compile_Okay; }
void FrameState::storeLocal(uint32 n, bool popGuaranteed, bool typeChange) { FrameEntry *localFe = getLocal(n); bool cacheable = !eval && !escaping[n]; if (!popGuaranteed && !cacheable) { JS_ASSERT_IF(base[localIndex(n)] && (!eval || n < script->nfixed), entries[localIndex(n)].type.inMemory() && entries[localIndex(n)].data.inMemory()); Address local(JSFrameReg, sizeof(JSStackFrame) + n * sizeof(Value)); storeTo(peek(-1), local, false); forgetAllRegs(getLocal(n)); localFe->resetSynced(); return; } bool wasSynced = localFe->type.synced(); /* Detect something like (x = x) which is a no-op. */ FrameEntry *top = peek(-1); if (top->isCopy() && top->copyOf() == localFe) { JS_ASSERT(localFe->isCopied()); return; } /* Completely invalidate the local variable. */ if (localFe->isCopied()) { uncopy(localFe); if (!localFe->isCopied()) forgetAllRegs(localFe); } else { forgetAllRegs(localFe); } localFe->resetUnsynced(); /* Constants are easy to propagate. */ if (top->isConstant()) { localFe->setCopyOf(NULL); localFe->setNotCopied(); localFe->setConstant(Jsvalify(top->getValue())); return; } /* * When dealing with copies, there are two important invariants: * * 1) The backing store precedes all copies in the tracker. * 2) The backing store of a local is never a stack slot, UNLESS the local * variable itself is a stack slot (blocks) that precedes the stack * slot. * * If the top is a copy, and the second condition holds true, the local * can be rewritten as a copy of the original backing slot. If the first * condition does not hold, force it to hold by swapping in-place. */ FrameEntry *backing = top; if (top->isCopy()) { backing = top->copyOf(); JS_ASSERT(backing->trackerIndex() < top->trackerIndex()); uint32 backingIndex = indexOfFe(backing); uint32 tol = uint32(spBase - base); if (backingIndex < tol || backingIndex < localIndex(n)) { /* local.idx < backing.idx means local cannot be a copy yet */ if (localFe->trackerIndex() < backing->trackerIndex()) swapInTracker(backing, localFe); localFe->setNotCopied(); localFe->setCopyOf(backing); if (backing->isTypeKnown()) localFe->setType(backing->getKnownType()); else localFe->type.invalidate(); localFe->data.invalidate(); localFe->isNumber = backing->isNumber; return; } /* * If control flow lands here, then there was a bytecode sequence like * * ENTERBLOCK 2 * GETLOCAL 1 * SETLOCAL 0 * * The problem is slot N can't be backed by M if M could be popped * before N. We want a guarantee that when we pop M, even if it was * copied, it has no outstanding copies. * * Because of |let| expressions, it's kind of hard to really know * whether a region on the stack will be popped all at once. Bleh! * * This should be rare except in browser code (and maybe even then), * but even so there's a quick workaround. We take all copies of the * backing fe, and redirect them to be copies of the destination. */ FrameEntry *tos = tosFe(); for (uint32 i = backing->trackerIndex() + 1; i < tracker.nentries; i++) { FrameEntry *fe = tracker[i]; if (fe >= tos) continue; if (fe->isCopy() && fe->copyOf() == backing) fe->setCopyOf(localFe); } } backing->setNotCopied(); /* * This is valid from the top->isCopy() path because we're guaranteed a * consistent ordering - all copies of |backing| are tracked after * |backing|. Transitively, only one swap is needed. */ if (backing->trackerIndex() < localFe->trackerIndex()) swapInTracker(backing, localFe); /* * Move the backing store down - we spill registers here, but we could be * smarter and re-use the type reg. */ RegisterID reg = tempRegForData(backing); localFe->data.setRegister(reg); moveOwnership(reg, localFe); if (typeChange) { if (backing->isTypeKnown()) { localFe->setType(backing->getKnownType()); } else { RegisterID reg = tempRegForType(backing); localFe->type.setRegister(reg); moveOwnership(reg, localFe); } } else { if (!wasSynced) masm.storeTypeTag(ImmType(backing->getKnownType()), addressOf(localFe)); localFe->type.setMemory(); } if (!backing->isTypeKnown()) backing->type.invalidate(); backing->data.invalidate(); backing->setCopyOf(localFe); backing->isNumber = localFe->isNumber; localFe->setCopied(); if (!cacheable) { /* TODO: x64 optimization */ if (!localFe->type.synced()) syncType(localFe, addressOf(localFe), masm); if (!localFe->data.synced()) syncData(localFe, addressOf(localFe), masm); forgetAllRegs(localFe); localFe->type.setMemory(); localFe->data.setMemory(); } JS_ASSERT(top->copyOf() == localFe); }
void FrameState::sync(Assembler &masm, Uses uses) const { /* * Keep track of free registers using a bitmask. If we have to drop into * syncFancy(), then this mask will help avoid eviction. */ Registers avail(freeRegs); Registers temp(Registers::TempRegs); FrameEntry *tos = tosFe(); FrameEntry *bottom = tos - uses.nuses; if (inTryBlock) bottom = NULL; for (uint32 i = tracker.nentries - 1; i < tracker.nentries; i--) { FrameEntry *fe = tracker[i]; if (fe >= tos) continue; Address address = addressOf(fe); if (!fe->isCopy()) { /* Keep track of registers that can be clobbered. */ if (fe->data.inRegister()) avail.putReg(fe->data.reg()); if (fe->type.inRegister()) avail.putReg(fe->type.reg()); /* Sync. */ if (!fe->data.synced() && (fe->data.inRegister() || fe >= bottom)) { syncData(fe, address, masm); if (fe->isConstant()) continue; } if (!fe->type.synced() && (fe->type.inRegister() || fe >= bottom)) syncType(fe, addressOf(fe), masm); } else if (fe >= bottom) { FrameEntry *backing = fe->copyOf(); JS_ASSERT(backing != fe); JS_ASSERT(!backing->isConstant() && !fe->isConstant()); /* * If the copy is backed by something not in a register, fall back * to a slower sync algorithm. */ if ((!fe->type.synced() && !backing->type.inRegister()) || (!fe->data.synced() && !backing->data.inRegister())) { syncFancy(masm, avail, i, bottom); return; } if (!fe->type.synced()) { /* :TODO: we can do better, the type is learned for all copies. */ if (fe->isTypeKnown()) { //JS_ASSERT(fe->getTypeTag() == backing->getTypeTag()); masm.storeTypeTag(ImmType(fe->getKnownType()), address); } else { masm.storeTypeTag(backing->type.reg(), address); } } if (!fe->data.synced()) masm.storePayload(backing->data.reg(), address); } } }