void MacroAssemblerX86Shared::atomicExchangeToTypedIntArray(Scalar::Type arrayType, const T& mem, Register value, Register temp, AnyRegister output) { switch (arrayType) { case Scalar::Int8: atomicExchange8SignExtend(mem, value, output.gpr()); break; case Scalar::Uint8: atomicExchange8ZeroExtend(mem, value, output.gpr()); break; case Scalar::Int16: atomicExchange16SignExtend(mem, value, output.gpr()); break; case Scalar::Uint16: atomicExchange16ZeroExtend(mem, value, output.gpr()); break; case Scalar::Int32: atomicExchange32(mem, value, output.gpr()); break; case Scalar::Uint32: // At the moment, the code in MCallOptimize.cpp requires the output // type to be double for uint32 arrays. See bug 1077305. MOZ_ASSERT(output.isFloat()); atomicExchange32(mem, value, temp); asMasm().convertUInt32ToDouble(temp, output.fpu()); break; default: MOZ_CRASH("Invalid typed array type"); } }
StupidAllocator::RegisterIndex StupidAllocator::allocateRegister(LInstruction *ins, uint32_t vreg) { // Pick a register for vreg, evicting an existing register if necessary. // Spill code will be placed before ins, and no existing allocated input // for ins will be touched. JS_ASSERT(ins); LDefinition *def = virtualRegisters[vreg]; JS_ASSERT(def); RegisterIndex best = UINT32_MAX; for (size_t i = 0; i < registerCount; i++) { AnyRegister reg = registers[i].reg; if (reg.isFloat() != (def->type() == LDefinition::DOUBLE)) continue; // Skip the register if it is in use for an allocated input or output. if (registerIsReserved(ins, reg)) continue; if (registers[i].vreg == MISSING_ALLOCATION || best == UINT32_MAX || registers[best].age > registers[i].age) { best = i; } } evictRegister(ins, best); return best; }
void MacroAssembler::loadFromTypedArray(int arrayType, const T &src, AnyRegister dest, Register temp, Label *fail) { switch (arrayType) { case TypedArray::TYPE_INT8: load8SignExtend(src, dest.gpr()); break; case TypedArray::TYPE_UINT8: case TypedArray::TYPE_UINT8_CLAMPED: load8ZeroExtend(src, dest.gpr()); break; case TypedArray::TYPE_INT16: load16SignExtend(src, dest.gpr()); break; case TypedArray::TYPE_UINT16: load16ZeroExtend(src, dest.gpr()); break; case TypedArray::TYPE_INT32: load32(src, dest.gpr()); break; case TypedArray::TYPE_UINT32: if (dest.isFloat()) { load32(src, temp); convertUInt32ToDouble(temp, dest.fpu()); } else { load32(src, dest.gpr()); test32(dest.gpr(), dest.gpr()); j(Assembler::Signed, fail); } break; case TypedArray::TYPE_FLOAT32: case TypedArray::TYPE_FLOAT64: { if (arrayType == js::TypedArray::TYPE_FLOAT32) loadFloatAsDouble(src, dest.fpu()); else loadDouble(src, dest.fpu()); // Make sure NaN gets canonicalized. Label notNaN; branchDouble(DoubleOrdered, dest.fpu(), dest.fpu(), ¬NaN); { loadStaticDouble(&js_NaN, dest.fpu()); } bind(¬NaN); break; } default: JS_NOT_REACHED("Invalid typed array type"); break; } }
static void SetRegisterToCoercedUndefined(CONTEXT *context, bool isFloat32, AnyRegister reg) { if (reg.isFloat()) { switch (reg.fpu().code()) { case JSC::X86Registers::xmm0: SetXMMRegToNaN(isFloat32, &XMM_sig(context, 0)); break; case JSC::X86Registers::xmm1: SetXMMRegToNaN(isFloat32, &XMM_sig(context, 1)); break; case JSC::X86Registers::xmm2: SetXMMRegToNaN(isFloat32, &XMM_sig(context, 2)); break; case JSC::X86Registers::xmm3: SetXMMRegToNaN(isFloat32, &XMM_sig(context, 3)); break; case JSC::X86Registers::xmm4: SetXMMRegToNaN(isFloat32, &XMM_sig(context, 4)); break; case JSC::X86Registers::xmm5: SetXMMRegToNaN(isFloat32, &XMM_sig(context, 5)); break; case JSC::X86Registers::xmm6: SetXMMRegToNaN(isFloat32, &XMM_sig(context, 6)); break; case JSC::X86Registers::xmm7: SetXMMRegToNaN(isFloat32, &XMM_sig(context, 7)); break; case JSC::X86Registers::xmm8: SetXMMRegToNaN(isFloat32, &XMM_sig(context, 8)); break; case JSC::X86Registers::xmm9: SetXMMRegToNaN(isFloat32, &XMM_sig(context, 9)); break; case JSC::X86Registers::xmm10: SetXMMRegToNaN(isFloat32, &XMM_sig(context, 10)); break; case JSC::X86Registers::xmm11: SetXMMRegToNaN(isFloat32, &XMM_sig(context, 11)); break; case JSC::X86Registers::xmm12: SetXMMRegToNaN(isFloat32, &XMM_sig(context, 12)); break; case JSC::X86Registers::xmm13: SetXMMRegToNaN(isFloat32, &XMM_sig(context, 13)); break; case JSC::X86Registers::xmm14: SetXMMRegToNaN(isFloat32, &XMM_sig(context, 14)); break; case JSC::X86Registers::xmm15: SetXMMRegToNaN(isFloat32, &XMM_sig(context, 15)); break; default: MOZ_CRASH(); } } else { switch (reg.gpr().code()) { case JSC::X86Registers::eax: RAX_sig(context) = 0; break; case JSC::X86Registers::ecx: RCX_sig(context) = 0; break; case JSC::X86Registers::edx: RDX_sig(context) = 0; break; case JSC::X86Registers::ebx: RBX_sig(context) = 0; break; case JSC::X86Registers::esp: RSP_sig(context) = 0; break; case JSC::X86Registers::ebp: RBP_sig(context) = 0; break; case JSC::X86Registers::esi: RSI_sig(context) = 0; break; case JSC::X86Registers::edi: RDI_sig(context) = 0; break; case JSC::X86Registers::r8: R8_sig(context) = 0; break; case JSC::X86Registers::r9: R9_sig(context) = 0; break; case JSC::X86Registers::r10: R10_sig(context) = 0; break; case JSC::X86Registers::r11: R11_sig(context) = 0; break; case JSC::X86Registers::r12: R12_sig(context) = 0; break; case JSC::X86Registers::r13: R13_sig(context) = 0; break; case JSC::X86Registers::r14: R14_sig(context) = 0; break; case JSC::X86Registers::r15: R15_sig(context) = 0; break; default: MOZ_CRASH(); } } }
void MacroAssembler::loadFromTypedArray(int arrayType, const T &src, AnyRegister dest, Register temp, Label *fail) { switch (arrayType) { case TypedArrayObject::TYPE_INT8: load8SignExtend(src, dest.gpr()); break; case TypedArrayObject::TYPE_UINT8: case TypedArrayObject::TYPE_UINT8_CLAMPED: load8ZeroExtend(src, dest.gpr()); break; case TypedArrayObject::TYPE_INT16: load16SignExtend(src, dest.gpr()); break; case TypedArrayObject::TYPE_UINT16: load16ZeroExtend(src, dest.gpr()); break; case TypedArrayObject::TYPE_INT32: load32(src, dest.gpr()); break; case TypedArrayObject::TYPE_UINT32: if (dest.isFloat()) { load32(src, temp); convertUInt32ToDouble(temp, dest.fpu()); } else { load32(src, dest.gpr()); test32(dest.gpr(), dest.gpr()); j(Assembler::Signed, fail); } break; case TypedArrayObject::TYPE_FLOAT32: case TypedArrayObject::TYPE_FLOAT64: if (arrayType == TypedArrayObject::TYPE_FLOAT32) loadFloatAsDouble(src, dest.fpu()); else loadDouble(src, dest.fpu()); canonicalizeDouble(dest.fpu()); break; default: MOZ_ASSUME_UNREACHABLE("Invalid typed array type"); } }
bool GreedyAllocator::allocateDefinition(LInstruction *ins, LDefinition *def) { VirtualRegister *vr = getVirtualRegister(def); LAllocation output; switch (def->policy()) { case LDefinition::PASSTHROUGH: // This is purely passthru, so ignore it. return true; case LDefinition::DEFAULT: case LDefinition::MUST_REUSE_INPUT: { AnyRegister reg; // Either take the register requested, or allocate a new one. if (def->policy() == LDefinition::MUST_REUSE_INPUT && ins->getOperand(def->getReusedInput())->toUse()->isFixedRegister()) { LAllocation *a = ins->getOperand(def->getReusedInput()); VirtualRegister *vuse = getVirtualRegister(a->toUse()); reg = GetFixedRegister(vuse->def, a->toUse()); } else if (vr->hasRegister()) { reg = vr->reg(); } else { if (!allocate(vr->type(), DISALLOW, ®)) return false; } if (def->policy() == LDefinition::MUST_REUSE_INPUT) { LUse *use = ins->getOperand(def->getReusedInput())->toUse(); VirtualRegister *vuse = getVirtualRegister(use); // If the use already has the given register, we need to evict. if (vuse->hasRegister() && vuse->reg() == reg) { if (!evict(reg)) return false; } // Make sure our input is using a fixed register. if (reg.isFloat()) *use = LUse(reg.fpu(), use->virtualRegister()); else *use = LUse(reg.gpr(), use->virtualRegister()); } output = LAllocation(reg); break; } case LDefinition::PRESET: { // Eviction and disallowing occurred during the definition // pre-scan pass. output = *def->output(); break; } } if (output.isRegister()) { JS_ASSERT_IF(output.isFloatReg(), disallowed.has(output.toFloatReg()->reg())); JS_ASSERT_IF(output.isGeneralReg(), disallowed.has(output.toGeneralReg()->reg())); } // Finally, set the output. def->setOutput(output); return true; }