/* * TUNING: On some implementations, it is quicker to pass addresses * to the handlers rather than load the operands into core registers * and then move the values to FP regs in the handlers. Other implementations * may prefer passing data in registers (and the latter approach would * yeild cleaner register handling - avoiding the requirement that operands * be flushed to memory prior to the call). */ static bool genArithOpFloat(CompilationUnit *cUnit, MIR *mir, RegLocation rlDest, RegLocation rlSrc1, RegLocation rlSrc2) { TemplateOpCode opCode; /* * Don't attempt to optimize register usage since these opcodes call out to * the handlers. */ switch (mir->dalvikInsn.opCode) { case OP_ADD_FLOAT_2ADDR: case OP_ADD_FLOAT: opCode = TEMPLATE_ADD_FLOAT_VFP; break; case OP_SUB_FLOAT_2ADDR: case OP_SUB_FLOAT: opCode = TEMPLATE_SUB_FLOAT_VFP; break; case OP_DIV_FLOAT_2ADDR: case OP_DIV_FLOAT: opCode = TEMPLATE_DIV_FLOAT_VFP; break; case OP_MUL_FLOAT_2ADDR: case OP_MUL_FLOAT: opCode = TEMPLATE_MUL_FLOAT_VFP; break; case OP_REM_FLOAT_2ADDR: case OP_REM_FLOAT: case OP_NEG_FLOAT: { return genArithOpFloatPortable(cUnit, mir, rlDest, rlSrc1, rlSrc2); } default: return true; } loadValueAddressDirect(cUnit, rlDest, r0); loadValueAddressDirect(cUnit, rlSrc1, r1); loadValueAddressDirect(cUnit, rlSrc2, r2); genDispatchToHandler(cUnit, opCode); rlDest = dvmCompilerUpdateLoc(cUnit, rlDest); if (rlDest.location == kLocPhysReg) { dvmCompilerClobber(cUnit, rlDest.lowReg); } return false; }
static bool genArithOpFloat(CompilationUnit *cUnit, MIR *mir, RegLocation rlDest, RegLocation rlSrc1, RegLocation rlSrc2) { int op = kThumbBkpt; RegLocation rlResult; /* * Don't attempt to optimize register usage since these opcodes call out to * the handlers. */ switch (mir->dalvikInsn.opcode) { case OP_ADD_FLOAT_2ADDR: case OP_ADD_FLOAT: op = kThumb2Vadds; break; case OP_SUB_FLOAT_2ADDR: case OP_SUB_FLOAT: op = kThumb2Vsubs; break; case OP_DIV_FLOAT_2ADDR: case OP_DIV_FLOAT: op = kThumb2Vdivs; break; case OP_MUL_FLOAT_2ADDR: case OP_MUL_FLOAT: op = kThumb2Vmuls; break; case OP_REM_FLOAT_2ADDR: case OP_REM_FLOAT: case OP_NEG_FLOAT: { return genArithOpFloatPortable(cUnit, mir, rlDest, rlSrc1, rlSrc2); } default: return true; } rlSrc1 = loadValue(cUnit, rlSrc1, kFPReg); rlSrc2 = loadValue(cUnit, rlSrc2, kFPReg); rlResult = dvmCompilerEvalLoc(cUnit, rlDest, kFPReg, true); newLIR3(cUnit, (ArmOpcode)op, rlResult.lowReg, rlSrc1.lowReg, rlSrc2.lowReg); storeValue(cUnit, rlDest, rlResult); return false; }
static bool genArithOpFloat(CompilationUnit *cUnit, MIR *mir, int vDest, int vSrc1, int vSrc2) { int op = THUMB_BKPT; /* * Don't attempt to optimize register usage since these opcodes call out to * the handlers. */ switch (mir->dalvikInsn.opCode) { case OP_ADD_FLOAT_2ADDR: case OP_ADD_FLOAT: op = THUMB2_VADDS; break; case OP_SUB_FLOAT_2ADDR: case OP_SUB_FLOAT: op = THUMB2_VSUBS; break; case OP_DIV_FLOAT_2ADDR: case OP_DIV_FLOAT: op = THUMB2_VDIVS; break; case OP_MUL_FLOAT_2ADDR: case OP_MUL_FLOAT: op = THUMB2_VMULS; break; case OP_REM_FLOAT_2ADDR: case OP_REM_FLOAT: case OP_NEG_FLOAT: { return genArithOpFloatPortable(cUnit, mir, vDest, vSrc1, vSrc2); } default: return true; } loadFloat(cUnit, vSrc1, fr2); loadFloat(cUnit, vSrc2, fr4); newLIR3(cUnit, op, fr0, fr2, fr4); storeFloat(cUnit, fr0, vDest, 0); return false; }