// If we can emit optimized code for the cycle in moves starting at position i, // do so, and return true. bool MoveEmitterX86::maybeEmitOptimizedCycle(const MoveResolver& moves, size_t i, bool allGeneralRegs, bool allFloatRegs, size_t swapCount) { if (allGeneralRegs && swapCount <= 2) { // Use x86's swap-integer-registers instruction if we only have a few // swaps. (x86 also has a swap between registers and memory but it's // slow.) for (size_t k = 0; k < swapCount; k++) masm.xchg(moves.getMove(i + k).to().reg(), moves.getMove(i + k + 1).to().reg()); return true; } if (allFloatRegs && swapCount == 1) { // There's no xchg for xmm registers, but if we only need a single swap, // it's cheap to do an XOR swap. FloatRegister a = moves.getMove(i).to().floatReg(); FloatRegister b = moves.getMove(i + 1).to().floatReg(); masm.vxorpd(a, b, b); masm.vxorpd(b, a, a); masm.vxorpd(a, b, b); return true; } return false; }
// Examine the cycle in moves starting at position i. Determine if it's a // simple cycle consisting of all register-to-register moves in a single class, // and whether it can be implemented entirely by swaps. size_t MoveEmitterX86::characterizeCycle(const MoveResolver &moves, size_t i, bool *allGeneralRegs, bool *allFloatRegs) { size_t swapCount = 0; for (size_t j = i; ; j++) { const Move &move = moves.getMove(j); // If it isn't a cycle of registers of the same kind, we won't be able // to optimize it. if (!move.to().isGeneralReg()) *allGeneralRegs = false; if (!move.to().isFloatReg()) *allFloatRegs = false; if (!*allGeneralRegs && !*allFloatRegs) return -1; // The first and last move of the cycle are marked with inCycle(). Stop // iterating when we see the last one. if (j != i && move.inCycle()) break; // Check that this move is actually part of the cycle. This is // over-conservative when there are multiple reads from the same source, // but that's expected to be rare. if (move.from() != moves.getMove(j + 1).to()) { *allGeneralRegs = false; *allFloatRegs = false; return -1; } swapCount++; } // Check that the last move cycles back to the first move. const Move &move = moves.getMove(i + swapCount); if (move.from() != moves.getMove(i).to()) { *allGeneralRegs = false; *allFloatRegs = false; return -1; } return swapCount; }
void MoveEmitterARM64::emit(const MoveResolver& moves) { if (moves.numCycles()) { masm.reserveStack(sizeof(void*)); pushedAtCycle_ = masm.framePushed(); } for (size_t i = 0; i < moves.numMoves(); i++) { emitMove(moves.getMove(i)); } }
void MoveEmitterMIPS::emit(const MoveResolver &moves) { if (moves.hasCycles()) { // Reserve stack for cycle resolution masm.reserveStack(sizeof(double)); pushedAtCycle_ = masm.framePushed(); } for (size_t i = 0; i < moves.numMoves(); i++) emit(moves.getMove(i)); }
void MoveEmitterX86::emit(const MoveResolver &moves) { for (size_t i = 0; i < moves.numMoves(); i++) { const MoveOp &move = moves.getMove(i); const MoveOperand &from = move.from(); const MoveOperand &to = move.to(); if (move.isCycleEnd()) { JS_ASSERT(inCycle_); completeCycle(to, move.type()); inCycle_ = false; continue; } if (move.isCycleBegin()) { JS_ASSERT(!inCycle_); // Characterize the cycle. bool allGeneralRegs = true, allFloatRegs = true; size_t swapCount = characterizeCycle(moves, i, &allGeneralRegs, &allFloatRegs); // Attempt to optimize it to avoid using the stack. if (maybeEmitOptimizedCycle(moves, i, allGeneralRegs, allFloatRegs, swapCount)) { i += swapCount; continue; } // Otherwise use the stack. breakCycle(to, move.endCycleType()); inCycle_ = true; } // A normal move which is not part of a cycle. switch (move.type()) { case MoveOp::FLOAT32: emitFloat32Move(from, to); break; case MoveOp::DOUBLE: emitDoubleMove(from, to); break; case MoveOp::INT32: emitInt32Move(from, to); break; case MoveOp::GENERAL: emitGeneralMove(from, to); break; default: MOZ_ASSUME_UNREACHABLE("Unexpected move type"); } } }
void MoveEmitterX86::emit(const MoveResolver &moves) { for (size_t i = 0; i < moves.numMoves(); i++) { const Move &move = moves.getMove(i); const MoveOperand &from = move.from(); const MoveOperand &to = move.to(); if (move.inCycle()) { // If this is the end of a cycle for which we're using the stack, // handle the end. if (inCycle_) { completeCycle(to, move.kind()); inCycle_ = false; continue; } // Characterize the cycle. bool allGeneralRegs = true, allFloatRegs = true; size_t swapCount = characterizeCycle(moves, i, &allGeneralRegs, &allFloatRegs); // Attempt to optimize it to avoid using the stack. if (maybeEmitOptimizedCycle(moves, i, allGeneralRegs, allFloatRegs, swapCount)) { i += swapCount; continue; } // Otherwise use the stack. breakCycle(to, move.kind()); inCycle_ = true; } // A normal move which is not part of a cycle. if (move.kind() == Move::DOUBLE) emitDoubleMove(from, to); else emitGeneralMove(from, to); } }
void MoveEmitterX86::emit(const MoveResolver& moves) { #if defined(JS_CODEGEN_X86) && defined(DEBUG) // Clobber any scratch register we have, to make regalloc bugs more visible. if (hasScratchRegister()) masm.mov(ImmWord(0xdeadbeef), scratchRegister()); #endif for (size_t i = 0; i < moves.numMoves(); i++) { const MoveOp& move = moves.getMove(i); const MoveOperand& from = move.from(); const MoveOperand& to = move.to(); if (move.isCycleEnd()) { MOZ_ASSERT(inCycle_); completeCycle(to, move.type()); inCycle_ = false; continue; } if (move.isCycleBegin()) { MOZ_ASSERT(!inCycle_); // Characterize the cycle. bool allGeneralRegs = true, allFloatRegs = true; size_t swapCount = characterizeCycle(moves, i, &allGeneralRegs, &allFloatRegs); // Attempt to optimize it to avoid using the stack. if (maybeEmitOptimizedCycle(moves, i, allGeneralRegs, allFloatRegs, swapCount)) { i += swapCount; continue; } // Otherwise use the stack. breakCycle(to, move.endCycleType()); inCycle_ = true; } // A normal move which is not part of a cycle. switch (move.type()) { case MoveOp::FLOAT32: emitFloat32Move(from, to); break; case MoveOp::DOUBLE: emitDoubleMove(from, to); break; case MoveOp::INT32: emitInt32Move(from, to); break; case MoveOp::GENERAL: emitGeneralMove(from, to); break; case MoveOp::INT32X4: emitInt32X4Move(from, to); break; case MoveOp::FLOAT32X4: emitFloat32X4Move(from, to); break; default: MOZ_CRASH("Unexpected move type"); } } }