Example #1
0
bool
ICUnaryArith_Int32::Compiler::generateStubCode(MacroAssembler &masm)
{
    Label failure;
    // by wangqing, 2013-11-26
    masm.movl(ImmTag(JSVAL_TAG_INT32), cmpTempRegister);
    masm.bne(R0.typeReg(), cmpTempRegister, &failure);
    masm.nop();

    switch (op) {
      case JSOP_BITNOT:
        masm.notl(R0.payloadReg());
        break;
      case JSOP_NEG:
        // Guard against 0 and MIN_INT, both result in a double.
		/* rewrite testl(reg, imm), avoid use cmpTemp2Register by wangqing, 2013-11-22*/
	    masm.movl(R0.payloadReg(), cmpTempRegister);
		masm.andl(Imm32(0x7fffffff), cmpTempRegister);
		masm.beq(cmpTempRegister, zero, &failure);
		masm.nop();
        masm.negl(R0.payloadReg());
        break;
      default:
        JS_NOT_REACHED("Unexpected op");
        return false;
    }

    EmitReturnFromIC(masm);

    masm.bindBranch(&failure);
    EmitStubGuardFailure(masm);
    return true;
}
void ScratchRegisterAllocator::restoreUsedRegistersFromScratchBufferForCall(MacroAssembler& jit, ScratchBuffer* scratchBuffer, GPRReg scratchGPR)
{
    RegisterSet usedRegisters = usedRegistersForCall();
    if (!usedRegisters.numberOfSetRegisters())
        return;
    
    if (scratchGPR == InvalidGPRReg) {
        // Find a scratch register.
        for (unsigned i = GPRInfo::numberOfRegisters; i--;) {
            if (m_lockedRegisters.getGPRByIndex(i) || m_scratchRegisters.getGPRByIndex(i))
                continue;
            scratchGPR = GPRInfo::toRegister(i);
            break;
        }
    }
    RELEASE_ASSERT(scratchGPR != InvalidGPRReg);
    
    jit.move(MacroAssembler::TrustedImmPtr(scratchBuffer->activeLengthPtr()), scratchGPR);
    jit.storePtr(MacroAssembler::TrustedImmPtr(0), scratchGPR);

    // Restore double registers first.
    unsigned count = usedRegisters.numberOfSetGPRs();
    for (FPRReg reg = MacroAssembler::firstFPRegister(); reg <= MacroAssembler::lastFPRegister(); reg = MacroAssembler::nextFPRegister(reg)) {
        if (usedRegisters.get(reg)) {
            jit.move(MacroAssembler::TrustedImmPtr(static_cast<EncodedJSValue*>(scratchBuffer->dataBuffer()) + (count++)), scratchGPR);
            jit.loadDouble(scratchGPR, reg);
        }
    }
        
    count = 0;
    for (GPRReg reg = MacroAssembler::firstRegister(); reg <= MacroAssembler::lastRegister(); reg = MacroAssembler::nextRegister(reg)) {
        if (usedRegisters.get(reg))
            jit.loadPtr(static_cast<EncodedJSValue*>(scratchBuffer->dataBuffer()) + (count++), reg);
    }
}
Example #3
0
bool
ICUnaryArith_Int32::Compiler::generateStubCode(MacroAssembler &masm)
{
    Label failure;
    masm.branchTestInt32(Assembler::NotEqual, R0, &failure);

    switch (op) {
      case JSOP_BITNOT:
        masm.not32(R0.payloadReg());
        break;
      case JSOP_NEG:
        // Guard against 0 and MIN_INT, both result in a double.
        masm.branchTest32(Assembler::Zero, R0.payloadReg(), Imm32(INT32_MAX), &failure);

        masm.neg32(R0.payloadReg());
        break;
      default:
        MOZ_ASSUME_UNREACHABLE("Unexpected op");
        return false;
    }

    EmitReturnFromIC(masm);

    masm.bind(&failure);
    EmitStubGuardFailure(masm);
    return true;
}
void ScratchRegisterAllocator::restoreRegistersFromStackForCall(MacroAssembler& jit, const RegisterSet& usedRegisters, const RegisterSet& ignore, unsigned numberOfStackBytesUsedForRegisterPreservation, unsigned extraBytesAtTopOfStack)
{
    RELEASE_ASSERT(extraBytesAtTopOfStack % sizeof(void*) == 0);
    if (!usedRegisters.numberOfSetRegisters()) {
        RELEASE_ASSERT(numberOfStackBytesUsedForRegisterPreservation == 0);
        return;
    }

    unsigned count = 0;
    for (GPRReg reg = MacroAssembler::firstRegister(); reg <= MacroAssembler::lastRegister(); reg = MacroAssembler::nextRegister(reg)) {
        if (usedRegisters.get(reg)) {
            if (!ignore.get(reg))
                jit.loadPtr(MacroAssembler::Address(MacroAssembler::stackPointerRegister, extraBytesAtTopOfStack + (sizeof(EncodedJSValue) * count)), reg);
            count++;
        }
    }
    for (FPRReg reg = MacroAssembler::firstFPRegister(); reg <= MacroAssembler::lastFPRegister(); reg = MacroAssembler::nextFPRegister(reg)) {
        if (usedRegisters.get(reg)) {
            if (!ignore.get(reg))
                jit.loadDouble(MacroAssembler::Address(MacroAssembler::stackPointerRegister, extraBytesAtTopOfStack + (sizeof(EncodedJSValue) * count)), reg);
            count++;
        }
    }

    unsigned stackOffset = (usedRegisters.numberOfSetRegisters()) * sizeof(EncodedJSValue);
    stackOffset += extraBytesAtTopOfStack;
    stackOffset = WTF::roundUpToMultipleOf(stackAlignmentBytes(), stackOffset);

    RELEASE_ASSERT(count == usedRegisters.numberOfSetRegisters());
    RELEASE_ASSERT(stackOffset == numberOfStackBytesUsedForRegisterPreservation);

    jit.addPtr(
        MacroAssembler::TrustedImm32(stackOffset),
        MacroAssembler::stackPointerRegister);
}
Example #5
0
// Generate a stub that is called immediately after the prologue when there is a
// stack overflow. This stub calls a C++ function to report the error and then
// jumps to the throw stub to pop the activation.
static Offsets
GenerateStackOverflow(MacroAssembler& masm)
{
    masm.haltingAlign(CodeAlignment);

    Offsets offsets;
    offsets.begin = masm.currentOffset();

    // If we reach here via the non-profiling prologue, WasmActivation::fp has
    // not been updated. To enable stack unwinding from C++, store to it now. If
    // we reached here via the profiling prologue, we'll just store the same
    // value again. Do not update AsmJSFrame::callerFP as it is not necessary in
    // the non-profiling case (there is no return path from this point) and, in
    // the profiling case, it is already correct.
    Register activation = ABIArgGenerator::NonArgReturnReg0;
    masm.loadWasmActivation(activation);
    masm.storePtr(masm.getStackPointer(), Address(activation, WasmActivation::offsetOfFP()));

    // Prepare the stack for calling C++.
    if (uint32_t d = StackDecrementForCall(ABIStackAlignment, sizeof(AsmJSFrame), ShadowStackSpace))
        masm.subFromStackPtr(Imm32(d));

    // No need to restore the stack; the throw stub pops everything.
    masm.assertStackAlignment(ABIStackAlignment);
    masm.call(SymbolicAddress::ReportOverRecursed);
    masm.jump(JumpTarget::Throw);

    offsets.end = masm.currentOffset();
    return offsets;
}
void ScratchRegisterAllocator::preserveUsedRegistersToScratchBufferForCall(MacroAssembler& jit, ScratchBuffer* scratchBuffer, GPRReg scratchGPR)
{
    RegisterSet usedRegisters = usedRegistersForCall();
    if (!usedRegisters.numberOfSetRegisters())
        return;
    
    unsigned count = 0;
    for (GPRReg reg = MacroAssembler::firstRegister(); reg <= MacroAssembler::lastRegister(); reg = MacroAssembler::nextRegister(reg)) {
        if (usedRegisters.get(reg)) {
            jit.storePtr(reg, static_cast<EncodedJSValue*>(scratchBuffer->dataBuffer()) + count);
            count++;
        }
        if (GPRInfo::toIndex(reg) != GPRInfo::InvalidIndex
            && scratchGPR == InvalidGPRReg
            && !m_lockedRegisters.get(reg) && !m_scratchRegisters.get(reg))
            scratchGPR = reg;
    }
    RELEASE_ASSERT(scratchGPR != InvalidGPRReg);
    for (FPRReg reg = MacroAssembler::firstFPRegister(); reg <= MacroAssembler::lastFPRegister(); reg = MacroAssembler::nextFPRegister(reg)) {
        if (usedRegisters.get(reg)) {
            jit.move(MacroAssembler::TrustedImmPtr(static_cast<EncodedJSValue*>(scratchBuffer->dataBuffer()) + count), scratchGPR);
            count++;
            jit.storeDouble(reg, scratchGPR);
        }
    }
    RELEASE_ASSERT(count * sizeof(JSValue) == desiredScratchBufferSizeForCall());
    
    jit.move(MacroAssembler::TrustedImmPtr(scratchBuffer->activeLengthPtr()), scratchGPR);
    jit.storePtr(MacroAssembler::TrustedImmPtr(static_cast<size_t>(count * sizeof(JSValue))), scratchGPR);
}
unsigned ScratchRegisterAllocator::preserveRegistersToStackForCall(MacroAssembler& jit, const RegisterSet& usedRegisters, unsigned extraBytesAtTopOfStack)
{
    RELEASE_ASSERT(extraBytesAtTopOfStack % sizeof(void*) == 0);
    if (!usedRegisters.numberOfSetRegisters())
        return 0;
    
    unsigned stackOffset = (usedRegisters.numberOfSetRegisters()) * sizeof(EncodedJSValue);
    stackOffset += extraBytesAtTopOfStack;
    stackOffset = WTF::roundUpToMultipleOf(stackAlignmentBytes(), stackOffset);
    jit.subPtr(
        MacroAssembler::TrustedImm32(stackOffset),
        MacroAssembler::stackPointerRegister);

    unsigned count = 0;
    for (GPRReg reg = MacroAssembler::firstRegister(); reg <= MacroAssembler::lastRegister(); reg = MacroAssembler::nextRegister(reg)) {
        if (usedRegisters.get(reg)) {
            jit.storePtr(reg, MacroAssembler::Address(MacroAssembler::stackPointerRegister, extraBytesAtTopOfStack + (count * sizeof(EncodedJSValue))));
            count++;
        }
    }
    for (FPRReg reg = MacroAssembler::firstFPRegister(); reg <= MacroAssembler::lastFPRegister(); reg = MacroAssembler::nextFPRegister(reg)) {
        if (usedRegisters.get(reg)) {
            jit.storeDouble(reg, MacroAssembler::Address(MacroAssembler::stackPointerRegister, extraBytesAtTopOfStack + (count * sizeof(EncodedJSValue))));
            count++;
        }
    }

    RELEASE_ASSERT(count == usedRegisters.numberOfSetRegisters());

    return stackOffset;
}
void
wasm::GenerateExitPrologue(MacroAssembler& masm, unsigned framePushed, ExitReason reason,
                           ProfilingOffsets* offsets)
{
    masm.haltingAlign(CodeAlignment);
    GenerateProfilingPrologue(masm, framePushed, reason, offsets);
    masm.setFramePushed(framePushed);
}
Example #9
0
void
js::GenerateAsmJSExitPrologue(MacroAssembler &masm, unsigned framePushed, AsmJSExit::Reason reason,
                              Label *begin)
{
    masm.align(CodeAlignment);
    GenerateProfilingPrologue(masm, framePushed, reason, begin);
    masm.setFramePushed(framePushed);
}
Example #10
0
void
js::GenerateAsmJSExitEpilogue(MacroAssembler &masm, unsigned framePushed, AsmJSExit::Reason reason,
                              Label *profilingReturn)
{
    // Inverse of GenerateAsmJSExitPrologue:
    JS_ASSERT(masm.framePushed() == framePushed);
    GenerateProfilingEpilogue(masm, framePushed, reason, profilingReturn);
    masm.setFramePushed(0);
}
void
wasm::GenerateExitEpilogue(MacroAssembler& masm, unsigned framePushed, ExitReason reason,
                           ProfilingOffsets* offsets)
{
    // Inverse of GenerateExitPrologue:
    MOZ_ASSERT(masm.framePushed() == framePushed);
    GenerateProfilingEpilogue(masm, framePushed, reason, offsets);
    masm.setFramePushed(0);
}
Example #12
0
VtableStub* VtableStubs::create_vtable_stub(int vtable_index) {
  const int code_length = VtableStub::pd_code_size_limit(true);
  VtableStub* s = new(code_length) VtableStub(true, vtable_index);
  // Can be NULL if there is no free space in the code cache.
  if (s == NULL) {
    return NULL;
  }

  ResourceMark rm;
  CodeBuffer cb(s->entry_point(), code_length);
  MacroAssembler* masm = new MacroAssembler(&cb);

  assert(VtableStub::receiver_location() == R0->as_VMReg(), "receiver expected in R0");

  const Register tmp = Rtemp; // Rtemp OK, should be free at call sites

  address npe_addr = __ pc();
  __ load_klass(tmp, R0);

  {
  int entry_offset = in_bytes(Klass::vtable_start_offset()) + vtable_index * vtableEntry::size_in_bytes();
  int method_offset = vtableEntry::method_offset_in_bytes() + entry_offset;

  assert ((method_offset & (wordSize - 1)) == 0, "offset should be aligned");
  int offset_mask = AARCH64_ONLY(0xfff << LogBytesPerWord) NOT_AARCH64(0xfff);
  if (method_offset & ~offset_mask) {
    __ add(tmp, tmp, method_offset & ~offset_mask);
  }
  __ ldr(Rmethod, Address(tmp, method_offset & offset_mask));
  }

  address ame_addr = __ pc();
#ifdef AARCH64
  __ ldr(tmp, Address(Rmethod, Method::from_compiled_offset()));
  __ br(tmp);
#else
  __ ldr(PC, Address(Rmethod, Method::from_compiled_offset()));
#endif // AARCH64

  masm->flush();

  if (PrintMiscellaneous && (WizardMode || Verbose)) {
    tty->print_cr("vtable #%d at " PTR_FORMAT "[%d] left over: %d",
                  vtable_index, p2i(s->entry_point()),
                  (int)(s->code_end() - s->entry_point()),
                  (int)(s->code_end() - __ pc()));
  }
  guarantee(__ pc() <= s->code_end(), "overflowed buffer");
  // FIXME ARM: need correct 'slop' - below is x86 code
  // shut the door on sizing bugs
  //int slop = 8;  // 32-bit offset is this much larger than a 13-bit one
  //assert(vtable_index > 10 || __ pc() + slop <= s->code_end(), "room for 32-bit offset");

  s->set_exception_points(npe_addr, ame_addr);
  return s;
}
void JITByIdGenerator::generateFastPathChecks(MacroAssembler& jit, GPRReg butterfly)
{
    m_structureCheck = jit.patchableBranchPtrWithPatch(
        MacroAssembler::NotEqual,
        MacroAssembler::Address(m_base.payloadGPR(), JSCell::structureOffset()),
        m_structureImm, MacroAssembler::TrustedImmPtr(reinterpret_cast<void*>(unusedPointer)));
    
    m_propertyStorageLoad = jit.convertibleLoadPtr(
        MacroAssembler::Address(m_base.payloadGPR(), JSObject::butterflyOffset()), butterfly);
}
Example #14
0
bool
ICCompare_Double::Compiler::generateStubCode(MacroAssembler &masm)
{
    Label failure, notNaN;
    masm.ensureDouble(R0, FloatReg0, &failure);
    masm.ensureDouble(R1, FloatReg1, &failure);

    Register dest = R0.scratchReg();

    Assembler::DoubleCondition cond = JSOpToDoubleCondition(op);
    masm.xorl(dest, dest);
    masm.compareDouble(cond, FloatReg0, FloatReg1);
    masm.setCC(Assembler::ConditionFromDoubleCondition(cond), dest);

    // Check for NaN, if needed.
    Assembler::NaNCond nanCond = Assembler::NaNCondFromDoubleCondition(cond);
    if (nanCond != Assembler::NaN_HandledByCond) {
      masm.j(Assembler::NoParity, &notNaN);
      masm.mov(Imm32(nanCond == Assembler::NaN_IsTrue), dest);
      masm.bind(&notNaN);
    }

    masm.tagValue(JSVAL_TYPE_BOOLEAN, dest, R0);
    EmitReturnFromIC(masm);

    // Failure case - jump to next stub
    masm.bind(&failure);
    EmitStubGuardFailure(masm);
    return true;
}
void JITByIdGenerator::generateFastCommon(MacroAssembler& jit, size_t inlineICSize)
{
    m_start = jit.label();
    size_t startSize = jit.m_assembler.buffer().codeSize();
    m_slowPathJump = jit.jump();
    size_t jumpSize = jit.m_assembler.buffer().codeSize() - startSize;
    size_t nopsToEmitInBytes = inlineICSize - jumpSize;
    jit.emitNops(nopsToEmitInBytes);
    ASSERT(jit.m_assembler.buffer().codeSize() - startSize == inlineICSize);
    m_done = jit.label();
}
Example #16
0
static void
PushRetAddr(MacroAssembler &masm)
{
#if defined(JS_CODEGEN_ARM)
    masm.push(lr);
#elif defined(JS_CODEGEN_MIPS)
    masm.push(ra);
#else
    // The x86/x64 call instruction pushes the return address.
#endif
}
void InlineCacheBuffer::assemble_ic_buffer_code(address code_begin, oop cached_oop, address entry_point) {
  ResourceMark rm;
  CodeBuffer      code(code_begin, ic_stub_code_size());
  MacroAssembler* masm            = new MacroAssembler(&code);
  // note: even though the code contains an embedded oop, we do not need reloc info
  // because
  // (1) the oop is old (i.e., doesn't matter for scavenges)
  // (2) these ICStubs are removed *before* a GC happens, so the roots disappear
  assert(cached_oop == NULL || cached_oop->is_perm(), "must be perm oop");
  masm->lea(rax, OopAddress((address) cached_oop));
  masm->jump(ExternalAddress(entry_point));
}
Example #18
0
void
AsmJSPerfSpewer::noteBlocksOffsets(MacroAssembler &masm)
{
    if (!PerfBlockEnabled() || !fp_)
        return;

    for (uint32_t i = 0; i < basicBlocks_.length(); i++) {
        Record &r = basicBlocks_[i];
        r.startOffset = masm.actualOffset(r.start.offset());
        r.endOffset = masm.actualOffset(r.end.offset());
    }
}
// In profiling mode, we need to maintain fp so that we can unwind the stack at
// any pc. In non-profiling mode, the only way to observe WasmActivation::fp is
// to call out to C++ so, as an optimization, we don't update fp. To avoid
// recompilation when the profiling mode is toggled, we generate both prologues
// a priori and switch between prologues when the profiling mode is toggled.
// Specifically, Module::setProfilingEnabled patches all callsites to
// either call the profiling or non-profiling entry point.
void
wasm::GenerateFunctionPrologue(MacroAssembler& masm, unsigned framePushed, FuncOffsets* offsets)
{
#if defined(JS_CODEGEN_ARM)
    // Flush pending pools so they do not get dumped between the 'begin' and
    // 'entry' offsets since the difference must be less than UINT8_MAX.
    masm.flushBuffer();
#endif

    masm.haltingAlign(CodeAlignment);

    GenerateProfilingPrologue(masm, framePushed, ExitReason::None, offsets);
    Label body;
    masm.jump(&body);

    // Generate normal prologue:
    masm.haltingAlign(CodeAlignment);
    offsets->nonProfilingEntry = masm.currentOffset();
    PushRetAddr(masm);
    masm.subFromStackPtr(Imm32(framePushed + AsmJSFrameBytesAfterReturnAddress));

    // Prologue join point, body begin:
    masm.bind(&body);
    masm.setFramePushed(framePushed);
}
Example #20
0
void restoreAllRegisters(MacroAssembler& jit, char* scratchMemory)
{
    // Give ourselves a pointer to the scratch memory.
    jit.move(MacroAssembler::TrustedImmPtr(scratchMemory), MacroAssembler::firstRealRegister());
    
    // Restore all FPR's.
    for (MacroAssembler::FPRegisterID reg = MacroAssembler::firstFPRegister(); reg <= MacroAssembler::lastFPRegister(); reg = MacroAssembler::nextFPRegister(reg))
        jit.loadDouble(MacroAssembler::Address(MacroAssembler::firstRealRegister(), offsetOfFPR(reg)), reg);
    
    for (MacroAssembler::RegisterID reg = MacroAssembler::secondRealRegister(); reg <= MacroAssembler::lastRegister(); reg = MacroAssembler::nextRegister(reg))
        jit.load64(MacroAssembler::Address(MacroAssembler::firstRealRegister(), offsetOfGPR(reg)), reg);
    
    jit.load64(MacroAssembler::Address(MacroAssembler::firstRealRegister(), offsetOfGPR(MacroAssembler::firstRealRegister())), MacroAssembler::firstRealRegister());
}
Example #21
0
bool NativeInstruction::is_ic_miss_trap() {
  if (ic_miss_trap_bits == 0) {
    ResourceMark rm;
    char buf[40];
    CodeBuffer cbuf((address)&buf[0], 20);
    MacroAssembler* a = new MacroAssembler(&cbuf);
    address ia = a->pc();
    a->trap(Assembler::notEqual, Assembler::ptr_cc, G0, ST_RESERVED_FOR_USER_0 + 2);
    int bits = *(int*)ia;
    assert(is_op3(bits, Assembler::trap_op3, Assembler::arith_op), "bad instruction");
    ic_miss_trap_bits = bits;
    assert(ic_miss_trap_bits != 0, "oops");
  }
  return long_at(0) == ic_miss_trap_bits;
}
Example #22
0
int NativeInstruction::illegal_instruction() {
  if (illegal_instruction_bits == 0) {
    ResourceMark rm;
    char buf[40];
    CodeBuffer cbuf((address)&buf[0], 20);
    MacroAssembler* a = new MacroAssembler(&cbuf);
    address ia = a->pc();
    a->trap(ST_RESERVED_FOR_USER_0 + 1);
    int bits = *(int*)ia;
    assert(is_op3(bits, Assembler::trap_op3, Assembler::arith_op), "bad instruction");
    illegal_instruction_bits = bits;
    assert(illegal_instruction_bits != 0, "oops");
  }
  return illegal_instruction_bits;
}
MoveEmitterX86::MoveEmitterX86(MacroAssembler& masm)
  : inCycle_(false),
    masm(masm),
    pushedAtCycle_(-1)
{
    pushedAtStart_ = masm.framePushed();
}
void JITByIdGenerator::generateFastPathChecks(MacroAssembler& jit)
{
    m_structureCheck = jit.patchableBranch32WithPatch(
        MacroAssembler::NotEqual,
        MacroAssembler::Address(m_base.payloadGPR(), JSCell::structureIDOffset()),
        m_structureImm, MacroAssembler::TrustedImm32(0));
}
void JITPutByIdGenerator::generateFastPath(MacroAssembler& jit)
{
    generateFastPathChecks(jit);
    
#if USE(JSVALUE64)
    m_loadOrStore = jit.store64WithAddressOffsetPatch(
        m_value.payloadGPR(), MacroAssembler::Address(m_base.payloadGPR(), 0)).label();
#else
    m_tagLoadOrStore = jit.store32WithAddressOffsetPatch(
        m_value.tagGPR(), MacroAssembler::Address(m_base.payloadGPR(), 0)).label();
    m_loadOrStore = jit.store32WithAddressOffsetPatch(
        m_value.payloadGPR(), MacroAssembler::Address(m_base.payloadGPR(), 0)).label();
#endif
    
    m_done = jit.label();
}
void JITGetByIdGenerator::generateFastPath(MacroAssembler& jit)
{
    generateFastPathChecks(jit, m_value.payloadGPR());
    
#if USE(JSVALUE64)
    m_loadOrStore = jit.load64WithCompactAddressOffsetPatch(
        MacroAssembler::Address(m_value.payloadGPR(), 0), m_value.payloadGPR()).label();
#else
    m_tagLoadOrStore = jit.load32WithCompactAddressOffsetPatch(
        MacroAssembler::Address(m_value.payloadGPR(), 0), m_value.tagGPR()).label();
    m_loadOrStore = jit.load32WithCompactAddressOffsetPatch(
        MacroAssembler::Address(m_value.payloadGPR(), 0), m_value.payloadGPR()).label();
#endif
    
    m_done = jit.label();
}
Example #27
0
MoveOperand::MoveOperand(MacroAssembler& masm, const ABIArg& arg)
{
    switch (arg.kind()) {
      case ABIArg::GPR:
        kind_ = REG;
        code_ = arg.gpr().code();
        break;
#ifdef JS_CODEGEN_REGISTER_PAIR
      case ABIArg::GPR_PAIR:
        kind_ = REG_PAIR;
        code_ = arg.evenGpr().code();
        MOZ_ASSERT(code_ % 2 == 0);
        MOZ_ASSERT(code_ + 1 == arg.oddGpr().code());
        break;
#endif
      case ABIArg::FPU:
        kind_ = FLOAT_REG;
        code_ = arg.fpu().code();
        break;
      case ABIArg::Stack:
        kind_ = MEMORY;
        code_ = masm.getStackPointer().code();
        disp_ = arg.offsetFromArgBase();
        break;
    }
}
static bool Prepare(MacroAssembler& masm)
{
    AllocatableRegisterSet regs(RegisterSet::Volatile());
    LiveRegisterSet save(regs.asLiveSet());
    masm.PushRegsInMask(save);
    return true;
}
Example #29
0
bool
ICUnaryArith_Int32::Compiler::generateStubCode(MacroAssembler& masm)
{
    Label failure;
    masm.branchTestInt32(Assembler::NotEqual, R0, &failure);

    switch (op) {
      case JSOP_BITNOT:
        masm.Mvn(ARMRegister(R1.valueReg(), 32), ARMRegister(R0.valueReg(), 32));
        masm.movePayload(R1.valueReg(), R0.valueReg());
        break;
      case JSOP_NEG:
        // Guard against 0 and MIN_INT, both result in a double.
        masm.branchTest32(Assembler::Zero, R0.valueReg(), Imm32(0x7fffffff), &failure);

        // Compile -x as 0 - x.
        masm.Sub(ARMRegister(R1.valueReg(), 32), wzr, ARMRegister(R0.valueReg(), 32));
        masm.movePayload(R1.valueReg(), R0.valueReg());
        break;
      default:
        MOZ_CRASH("Unexpected op");
    }

    EmitReturnFromIC(masm);

    masm.bind(&failure);
    EmitStubGuardFailure(masm);
    return true;

}
// Similar to GenerateFunctionPrologue (see comment), we generate both a
// profiling and non-profiling epilogue a priori. When the profiling mode is
// toggled, ToggleProfiling patches the 'profiling jump' to either be a nop
// (falling through to the normal prologue) or a jump (jumping to the profiling
// epilogue).
void
wasm::GenerateFunctionEpilogue(MacroAssembler& masm, unsigned framePushed, FuncOffsets* offsets)
{
    MOZ_ASSERT(masm.framePushed() == framePushed);

#if defined(JS_CODEGEN_ARM)
    // Flush pending pools so they do not get dumped between the profilingReturn
    // and profilingJump/profilingEpilogue offsets since the difference must be
    // less than UINT8_MAX.
    masm.flushBuffer();
#endif

    // Generate a nop that is overwritten by a jump to the profiling epilogue
    // when profiling is enabled.
    offsets->profilingJump = masm.nopPatchableToNearJump().offset();

    // Normal epilogue:
    masm.addToStackPtr(Imm32(framePushed + AsmJSFrameBytesAfterReturnAddress));
    masm.ret();
    masm.setFramePushed(0);

    // Profiling epilogue:
    offsets->profilingEpilogue = masm.currentOffset();
    GenerateProfilingEpilogue(masm, framePushed, ExitReason::None, offsets);
}