Пример #1
0
StupidAllocator::RegisterIndex
StupidAllocator::allocateRegister(LInstruction *ins, uint32_t vreg)
{
    // Pick a register for vreg, evicting an existing register if necessary.
    // Spill code will be placed before ins, and no existing allocated input
    // for ins will be touched.
    JS_ASSERT(ins);

    LDefinition *def = virtualRegisters[vreg];
    JS_ASSERT(def);

    RegisterIndex best = UINT32_MAX;

    for (size_t i = 0; i < registerCount; i++) {
        AnyRegister reg = registers[i].reg;

        if (reg.isFloat() != (def->type() == LDefinition::DOUBLE))
            continue;

        // Skip the register if it is in use for an allocated input or output.
        if (registerIsReserved(ins, reg))
            continue;

        if (registers[i].vreg == MISSING_ALLOCATION ||
            best == UINT32_MAX ||
            registers[best].age > registers[i].age)
        {
            best = i;
        }
    }

    evictRegister(ins, best);
    return best;
}
void
GreedyAllocator::killReg(VirtualRegister *vr)
{
    if (vr->hasRegister()) {
        AnyRegister reg = vr->reg();
        JS_ASSERT(state[reg] == vr);

        IonSpew(IonSpew_RegAlloc, "    kill vr%d (%s)",
                vr->def->virtualRegister(), reg.name());
        freeReg(reg);
    }
}
void
MacroAssemblerX86Shared::atomicExchangeToTypedIntArray(Scalar::Type arrayType, const T& mem,
                                                       Register value, Register temp, AnyRegister output)
{
    switch (arrayType) {
      case Scalar::Int8:
        atomicExchange8SignExtend(mem, value, output.gpr());
        break;
      case Scalar::Uint8:
        atomicExchange8ZeroExtend(mem, value, output.gpr());
        break;
      case Scalar::Uint8Clamped:
        atomicExchange8ZeroExtend(mem, value, output.gpr());
        break;
      case Scalar::Int16:
        atomicExchange16SignExtend(mem, value, output.gpr());
        break;
      case Scalar::Uint16:
        atomicExchange16ZeroExtend(mem, value, output.gpr());
        break;
      case Scalar::Int32:
        atomicExchange32(mem, value, output.gpr());
        break;
      case Scalar::Uint32:
        // At the moment, the code in MCallOptimize.cpp requires the output
        // type to be double for uint32 arrays.  See bug 1077305.
        MOZ_ASSERT(output.isFloat());
        atomicExchange32(mem, value, temp);
        asMasm().convertUInt32ToDouble(temp, output.fpu());
        break;
      default:
        MOZ_CRASH("Invalid typed array type");
    }
}
Пример #4
0
bool
StupidAllocator::allocationRequiresRegister(const LAllocation* alloc, AnyRegister reg)
{
    if (alloc->isRegister() && alloc->toRegister() == reg)
        return true;
    if (alloc->isUse()) {
        const LUse* use = alloc->toUse();
        if (use->policy() == LUse::FIXED) {
            AnyRegister usedReg = GetFixedRegister(virtualRegisters[use->virtualRegister()], use);
            if (usedReg.aliases(reg))
                return true;
        }
    }
    return false;
}
Пример #5
0
static void
SetRegisterToCoercedUndefined(CONTEXT *context, bool isFloat32, AnyRegister reg)
{
    if (reg.isFloat()) {
        switch (reg.fpu().code()) {
          case JSC::X86Registers::xmm0:  SetXMMRegToNaN(isFloat32, &XMM_sig(context, 0)); break;
          case JSC::X86Registers::xmm1:  SetXMMRegToNaN(isFloat32, &XMM_sig(context, 1)); break;
          case JSC::X86Registers::xmm2:  SetXMMRegToNaN(isFloat32, &XMM_sig(context, 2)); break;
          case JSC::X86Registers::xmm3:  SetXMMRegToNaN(isFloat32, &XMM_sig(context, 3)); break;
          case JSC::X86Registers::xmm4:  SetXMMRegToNaN(isFloat32, &XMM_sig(context, 4)); break;
          case JSC::X86Registers::xmm5:  SetXMMRegToNaN(isFloat32, &XMM_sig(context, 5)); break;
          case JSC::X86Registers::xmm6:  SetXMMRegToNaN(isFloat32, &XMM_sig(context, 6)); break;
          case JSC::X86Registers::xmm7:  SetXMMRegToNaN(isFloat32, &XMM_sig(context, 7)); break;
          case JSC::X86Registers::xmm8:  SetXMMRegToNaN(isFloat32, &XMM_sig(context, 8)); break;
          case JSC::X86Registers::xmm9:  SetXMMRegToNaN(isFloat32, &XMM_sig(context, 9)); break;
          case JSC::X86Registers::xmm10: SetXMMRegToNaN(isFloat32, &XMM_sig(context, 10)); break;
          case JSC::X86Registers::xmm11: SetXMMRegToNaN(isFloat32, &XMM_sig(context, 11)); break;
          case JSC::X86Registers::xmm12: SetXMMRegToNaN(isFloat32, &XMM_sig(context, 12)); break;
          case JSC::X86Registers::xmm13: SetXMMRegToNaN(isFloat32, &XMM_sig(context, 13)); break;
          case JSC::X86Registers::xmm14: SetXMMRegToNaN(isFloat32, &XMM_sig(context, 14)); break;
          case JSC::X86Registers::xmm15: SetXMMRegToNaN(isFloat32, &XMM_sig(context, 15)); break;
          default: MOZ_CRASH();
        }
    } else {
        switch (reg.gpr().code()) {
          case JSC::X86Registers::eax: RAX_sig(context) = 0; break;
          case JSC::X86Registers::ecx: RCX_sig(context) = 0; break;
          case JSC::X86Registers::edx: RDX_sig(context) = 0; break;
          case JSC::X86Registers::ebx: RBX_sig(context) = 0; break;
          case JSC::X86Registers::esp: RSP_sig(context) = 0; break;
          case JSC::X86Registers::ebp: RBP_sig(context) = 0; break;
          case JSC::X86Registers::esi: RSI_sig(context) = 0; break;
          case JSC::X86Registers::edi: RDI_sig(context) = 0; break;
          case JSC::X86Registers::r8:  R8_sig(context)  = 0; break;
          case JSC::X86Registers::r9:  R9_sig(context)  = 0; break;
          case JSC::X86Registers::r10: R10_sig(context) = 0; break;
          case JSC::X86Registers::r11: R11_sig(context) = 0; break;
          case JSC::X86Registers::r12: R12_sig(context) = 0; break;
          case JSC::X86Registers::r13: R13_sig(context) = 0; break;
          case JSC::X86Registers::r14: R14_sig(context) = 0; break;
          case JSC::X86Registers::r15: R15_sig(context) = 0; break;
          default: MOZ_CRASH();
        }
    }
}
void
GreedyAllocator::assign(VirtualRegister *vr, AnyRegister reg)
{
    JS_ASSERT(!state[reg]);
    IonSpew(IonSpew_RegAlloc, "    assign vr%d := %s", vr->def->virtualRegister(), reg.name());
    state[reg] = vr;
    vr->setRegister(reg);
    state.free.take(reg);
}
static void
AddRegisterToSafepoint(LSafepoint *safepoint, AnyRegister reg, const LDefinition &def)
{
    safepoint->addLiveRegister(reg);

    JS_ASSERT(def.type() == LDefinition::GENERAL ||
              def.type() == LDefinition::DOUBLE ||
              def.type() == LDefinition::OBJECT);

    if (def.type() == LDefinition::OBJECT)
        safepoint->addGcRegister(reg.gpr());
}
Пример #8
0
void
CodeGeneratorX86::visitLoadTypedArrayElementStatic(LLoadTypedArrayElementStatic* ins)
{
    const MLoadTypedArrayElementStatic* mir = ins->mir();
    Scalar::Type accessType = mir->accessType();
    MOZ_ASSERT_IF(accessType == Scalar::Float32, mir->type() == MIRType::Float32);

    Register ptr = ToRegister(ins->ptr());
    AnyRegister out = ToAnyRegister(ins->output());
    OutOfLineLoadTypedArrayOutOfBounds* ool = nullptr;
    uint32_t offset = mir->offset();

    if (mir->needsBoundsCheck()) {
        MOZ_ASSERT(offset == 0);
        if (!mir->fallible()) {
            ool = new(alloc()) OutOfLineLoadTypedArrayOutOfBounds(out, accessType);
            addOutOfLineCode(ool, ins->mir());
        }

        masm.cmpPtr(ptr, ImmWord(mir->length()));
        if (ool)
            masm.j(Assembler::AboveOrEqual, ool->entry());
        else
            bailoutIf(Assembler::AboveOrEqual, ins->snapshot());
    }

    Operand srcAddr(ptr, int32_t(mir->base().asValue()) + int32_t(offset));
    switch (accessType) {
      case Scalar::Int8:         masm.movsblWithPatch(srcAddr, out.gpr()); break;
      case Scalar::Uint8Clamped:
      case Scalar::Uint8:        masm.movzblWithPatch(srcAddr, out.gpr()); break;
      case Scalar::Int16:        masm.movswlWithPatch(srcAddr, out.gpr()); break;
      case Scalar::Uint16:       masm.movzwlWithPatch(srcAddr, out.gpr()); break;
      case Scalar::Int32:
      case Scalar::Uint32:       masm.movlWithPatch(srcAddr, out.gpr()); break;
      case Scalar::Float32:      masm.vmovssWithPatch(srcAddr, out.fpu()); break;
      case Scalar::Float64:      masm.vmovsdWithPatch(srcAddr, out.fpu()); break;
      default:                   MOZ_CRASH("Unexpected type");
    }

    if (accessType == Scalar::Float64)
        masm.canonicalizeDouble(out.fpu());
    if (accessType == Scalar::Float32)
        masm.canonicalizeFloat(out.fpu());

    if (ool)
        masm.bind(ool->rejoin());
}
void
MacroAssembler::loadFromTypedArray(int arrayType, const T &src, AnyRegister dest, Register temp,
                                   Label *fail)
{
    switch (arrayType) {
      case TypedArray::TYPE_INT8:
        load8SignExtend(src, dest.gpr());
        break;
      case TypedArray::TYPE_UINT8:
      case TypedArray::TYPE_UINT8_CLAMPED:
        load8ZeroExtend(src, dest.gpr());
        break;
      case TypedArray::TYPE_INT16:
        load16SignExtend(src, dest.gpr());
        break;
      case TypedArray::TYPE_UINT16:
        load16ZeroExtend(src, dest.gpr());
        break;
      case TypedArray::TYPE_INT32:
        load32(src, dest.gpr());
        break;
      case TypedArray::TYPE_UINT32:
        if (dest.isFloat()) {
            load32(src, temp);
            convertUInt32ToDouble(temp, dest.fpu());
        } else {
            load32(src, dest.gpr());
            test32(dest.gpr(), dest.gpr());
            j(Assembler::Signed, fail);
        }
        break;
      case TypedArray::TYPE_FLOAT32:
      case TypedArray::TYPE_FLOAT64:
      {
        if (arrayType == js::TypedArray::TYPE_FLOAT32)
            loadFloatAsDouble(src, dest.fpu());
        else
            loadDouble(src, dest.fpu());

        // Make sure NaN gets canonicalized.
        Label notNaN;
        branchDouble(DoubleOrdered, dest.fpu(), dest.fpu(), &notNaN);
        {
            loadStaticDouble(&js_NaN, dest.fpu());
        }
        bind(&notNaN);
        break;
      }
      default:
        JS_NOT_REACHED("Invalid typed array type");
        break;
    }
}
bool
GreedyAllocator::allocateDefinition(LInstruction *ins, LDefinition *def)
{
    VirtualRegister *vr = getVirtualRegister(def);

    LAllocation output;
    switch (def->policy()) {
      case LDefinition::PASSTHROUGH:
        // This is purely passthru, so ignore it.
        return true;

      case LDefinition::DEFAULT:
      case LDefinition::MUST_REUSE_INPUT:
      {
        AnyRegister reg;
        // Either take the register requested, or allocate a new one.
        if (def->policy() == LDefinition::MUST_REUSE_INPUT &&
            ins->getOperand(def->getReusedInput())->toUse()->isFixedRegister())
        {
            LAllocation *a = ins->getOperand(def->getReusedInput());
            VirtualRegister *vuse = getVirtualRegister(a->toUse());
            reg = GetFixedRegister(vuse->def, a->toUse());
        } else if (vr->hasRegister()) {
            reg = vr->reg();
        } else {
            if (!allocate(vr->type(), DISALLOW, &reg))
                return false;
        }

        if (def->policy() == LDefinition::MUST_REUSE_INPUT) {
            LUse *use = ins->getOperand(def->getReusedInput())->toUse();
            VirtualRegister *vuse = getVirtualRegister(use);
            // If the use already has the given register, we need to evict.
            if (vuse->hasRegister() && vuse->reg() == reg) {
                if (!evict(reg))
                    return false;
            }

            // Make sure our input is using a fixed register.
            if (reg.isFloat())
                *use = LUse(reg.fpu(), use->virtualRegister());
            else
                *use = LUse(reg.gpr(), use->virtualRegister());
        }
        output = LAllocation(reg);
        break;
      }

      case LDefinition::PRESET:
      {
        // Eviction and disallowing occurred during the definition
        // pre-scan pass.
        output = *def->output();
        break;
      }
    }

    if (output.isRegister()) {
        JS_ASSERT_IF(output.isFloatReg(), disallowed.has(output.toFloatReg()->reg()));
        JS_ASSERT_IF(output.isGeneralReg(), disallowed.has(output.toGeneralReg()->reg()));
    }

    // Finally, set the output.
    def->setOutput(output);
    return true;
}
Пример #11
0
void
MacroAssembler::wasmStore(const wasm::MemoryAccessDesc& access, AnyRegister value, Operand dstAddr)
{
    memoryBarrier(access.barrierBefore());

    size_t storeOffset = size();
    switch (access.type()) {
      case Scalar::Int8:
      case Scalar::Uint8:
        movb(value.gpr(), dstAddr);
        break;
      case Scalar::Int16:
      case Scalar::Uint16:
        movw(value.gpr(), dstAddr);
        break;
      case Scalar::Int32:
      case Scalar::Uint32:
        movl(value.gpr(), dstAddr);
        break;
      case Scalar::Int64:
        movq(value.gpr(), dstAddr);
        break;
      case Scalar::Float32:
        storeUncanonicalizedFloat32(value.fpu(), dstAddr);
        break;
      case Scalar::Float64:
        storeUncanonicalizedDouble(value.fpu(), dstAddr);
        break;
      case Scalar::Float32x4:
        switch (access.numSimdElems()) {
          // In memory-to-register mode, movss zeroes out the high lanes.
          case 1: storeUncanonicalizedFloat32(value.fpu(), dstAddr); break;
          // See comment above, which also applies to movsd.
          case 2: storeUncanonicalizedDouble(value.fpu(), dstAddr); break;
          case 4: storeUnalignedSimd128Float(value.fpu(), dstAddr); break;
          default: MOZ_CRASH("unexpected size for partial load");
        }
        break;
      case Scalar::Int32x4:
        switch (access.numSimdElems()) {
          // In memory-to-register mode, movd zeroes out the high lanes.
          case 1: vmovd(value.fpu(), dstAddr); break;
          // See comment above, which also applies to movq.
          case 2: vmovq(value.fpu(), dstAddr); break;
          case 4: storeUnalignedSimd128Int(value.fpu(), dstAddr); break;
          default: MOZ_CRASH("unexpected size for partial load");
        }
        break;
      case Scalar::Int8x16:
        MOZ_ASSERT(access.numSimdElems() == 16, "unexpected partial store");
        storeUnalignedSimd128Int(value.fpu(), dstAddr);
        break;
      case Scalar::Int16x8:
        MOZ_ASSERT(access.numSimdElems() == 8, "unexpected partial store");
        storeUnalignedSimd128Int(value.fpu(), dstAddr);
        break;
      case Scalar::Uint8Clamped:
      case Scalar::MaxTypedArrayViewType:
        MOZ_CRASH("unexpected array type");
    }
    append(access, storeOffset, framePushed());

    memoryBarrier(access.barrierAfter());
}
Пример #12
0
void
MacroAssembler::wasmLoad(const wasm::MemoryAccessDesc& access, Operand srcAddr, AnyRegister out)
{
    memoryBarrier(access.barrierBefore());

    size_t loadOffset = size();
    switch (access.type()) {
      case Scalar::Int8:
        movsbl(srcAddr, out.gpr());
        break;
      case Scalar::Uint8:
        movzbl(srcAddr, out.gpr());
        break;
      case Scalar::Int16:
        movswl(srcAddr, out.gpr());
        break;
      case Scalar::Uint16:
        movzwl(srcAddr, out.gpr());
        break;
      case Scalar::Int32:
      case Scalar::Uint32:
        movl(srcAddr, out.gpr());
        break;
      case Scalar::Float32:
        loadFloat32(srcAddr, out.fpu());
        break;
      case Scalar::Float64:
        loadDouble(srcAddr, out.fpu());
        break;
      case Scalar::Float32x4:
        switch (access.numSimdElems()) {
          // In memory-to-register mode, movss zeroes out the high lanes.
          case 1: loadFloat32(srcAddr, out.fpu()); break;
          // See comment above, which also applies to movsd.
          case 2: loadDouble(srcAddr, out.fpu()); break;
          case 4: loadUnalignedSimd128Float(srcAddr, out.fpu()); break;
          default: MOZ_CRASH("unexpected size for partial load");
        }
        break;
      case Scalar::Int32x4:
        switch (access.numSimdElems()) {
          // In memory-to-register mode, movd zeroes out the high lanes.
          case 1: vmovd(srcAddr, out.fpu()); break;
          // See comment above, which also applies to movq.
          case 2: vmovq(srcAddr, out.fpu()); break;
          case 4: loadUnalignedSimd128Int(srcAddr, out.fpu()); break;
          default: MOZ_CRASH("unexpected size for partial load");
        }
        break;
      case Scalar::Int8x16:
        MOZ_ASSERT(access.numSimdElems() == 16, "unexpected partial load");
        loadUnalignedSimd128Int(srcAddr, out.fpu());
        break;
      case Scalar::Int16x8:
        MOZ_ASSERT(access.numSimdElems() == 8, "unexpected partial load");
        loadUnalignedSimd128Int(srcAddr, out.fpu());
        break;
      case Scalar::Int64:
        MOZ_CRASH("int64 loads must use load64");
      case Scalar::Uint8Clamped:
      case Scalar::MaxTypedArrayViewType:
        MOZ_CRASH("unexpected array type");
    }
    append(access, loadOffset, framePushed());

    memoryBarrier(access.barrierAfter());
}
Пример #13
0
void
MacroAssembler::loadFromTypedArray(int arrayType, const T &src, AnyRegister dest, Register temp,
                                   Label *fail)
{
    switch (arrayType) {
    case TypedArrayObject::TYPE_INT8:
        load8SignExtend(src, dest.gpr());
        break;
    case TypedArrayObject::TYPE_UINT8:
    case TypedArrayObject::TYPE_UINT8_CLAMPED:
        load8ZeroExtend(src, dest.gpr());
        break;
    case TypedArrayObject::TYPE_INT16:
        load16SignExtend(src, dest.gpr());
        break;
    case TypedArrayObject::TYPE_UINT16:
        load16ZeroExtend(src, dest.gpr());
        break;
    case TypedArrayObject::TYPE_INT32:
        load32(src, dest.gpr());
        break;
    case TypedArrayObject::TYPE_UINT32:
        if (dest.isFloat()) {
            load32(src, temp);
            convertUInt32ToDouble(temp, dest.fpu());
        } else {
            load32(src, dest.gpr());
            test32(dest.gpr(), dest.gpr());
            j(Assembler::Signed, fail);
        }
        break;
    case TypedArrayObject::TYPE_FLOAT32:
    case TypedArrayObject::TYPE_FLOAT64:
        if (arrayType == TypedArrayObject::TYPE_FLOAT32)
            loadFloatAsDouble(src, dest.fpu());
        else
            loadDouble(src, dest.fpu());
        canonicalizeDouble(dest.fpu());
        break;
    default:
        MOZ_ASSUME_UNREACHABLE("Invalid typed array type");
    }
}