/* * Load a class pointer value into a fixed or temp register. Target * register is clobbered, and marked inUse. */ static ArmLIR *loadClassPointer(CompilationUnit *cUnit, int rDest, int value) { ArmLIR *res; cUnit->hasClassLiterals = true; if (dvmCompilerIsTemp(cUnit, rDest)) { dvmCompilerClobber(cUnit, rDest); dvmCompilerMarkInUse(cUnit, rDest); } ArmLIR *dataTarget = scanLiteralPool(cUnit->classPointerList, value, 0); if (dataTarget == NULL) { dataTarget = addWordData(cUnit, &cUnit->classPointerList, value); /* Counts the number of class pointers in this translation */ cUnit->numClassPointers++; } ArmLIR *loadPcRel = (ArmLIR *) dvmCompilerNew(sizeof(ArmLIR), true); loadPcRel->opcode = kThumbLdrPcRel; loadPcRel->generic.target = (LIR *) dataTarget; loadPcRel->operands[0] = rDest; setupResourceMasks(loadPcRel); setMemRefType(loadPcRel, true, kLiteral); loadPcRel->aliasInfo = dataTarget->operands[0]; res = loadPcRel; dvmCompilerAppendLIR(cUnit, (LIR *) loadPcRel); return res; }
/* * The following are building blocks to construct low-level IRs with 0 - 4 * operands. */ static ArmLIR *newLIR0(CompilationUnit *cUnit, ArmOpcode opcode) { ArmLIR *insn = (ArmLIR *) dvmCompilerNew(sizeof(ArmLIR), true); assert(isPseudoOpcode(opcode) || (getEncoding(opcode)->flags & NO_OPERAND)); insn->opcode = opcode; setupResourceMasks(insn); dvmCompilerAppendLIR(cUnit, (LIR *) insn); return insn; }
static ArmLIR *newLIR1(CompilationUnit *cUnit, ArmOpcode opcode, int dest) { ArmLIR *insn = (ArmLIR *) dvmCompilerNew(sizeof(ArmLIR), true); assert(isPseudoOpcode(opcode) || (getEncoding(opcode)->flags & IS_UNARY_OP)); insn->opcode = opcode; insn->operands[0] = dest; setupResourceMasks(insn); dvmCompilerAppendLIR(cUnit, (LIR *) insn); return insn; }
static ArmLIR *newLIR2(CompilationUnit *cUnit, ArmOpcode opcode, int dest, int src1) { ArmLIR *insn = (ArmLIR *) dvmCompilerNew(sizeof(ArmLIR), true); assert(isPseudoOpcode(opcode) || (EncodingMap[opcode].flags & IS_BINARY_OP)); insn->opcode = opcode; insn->operands[0] = dest; insn->operands[1] = src1; setupResourceMasks(insn); dvmCompilerAppendLIR(cUnit, (LIR *) insn); return insn; }
/* * Load a immediate using a shortcut if possible; otherwise * grab from the per-translation literal pool. If target is * a high register, build constant into a low register and copy. * * No additional register clobbering operation performed. Use this version when * 1) rDest is freshly returned from dvmCompilerAllocTemp or * 2) The codegen is under fixed register usage */ static ArmLIR *loadConstantNoClobber(CompilationUnit *cUnit, int rDest, int value) { ArmLIR *res; int tDest = LOWREG(rDest) ? rDest : dvmCompilerAllocTemp(cUnit); /* See if the value can be constructed cheaply */ if ((value >= 0) && (value <= 255)) { res = newLIR2(cUnit, kThumbMovImm, tDest, value); if (rDest != tDest) { opRegReg(cUnit, kOpMov, rDest, tDest); dvmCompilerFreeTemp(cUnit, tDest); } return res; } else if ((value & 0xFFFFFF00) == 0xFFFFFF00) { res = newLIR2(cUnit, kThumbMovImm, tDest, ~value); newLIR2(cUnit, kThumbMvn, tDest, tDest); if (rDest != tDest) { opRegReg(cUnit, kOpMov, rDest, tDest); dvmCompilerFreeTemp(cUnit, tDest); } return res; } /* No shortcut - go ahead and use literal pool */ ArmLIR *dataTarget = scanLiteralPool(cUnit->literalList, value, 255); if (dataTarget == NULL) { dataTarget = addWordData(cUnit, &cUnit->literalList, value); } ArmLIR *loadPcRel = (ArmLIR *) dvmCompilerNew(sizeof(ArmLIR), true); loadPcRel->opcode = kThumbLdrPcRel; loadPcRel->generic.target = (LIR *) dataTarget; loadPcRel->operands[0] = tDest; setupResourceMasks(loadPcRel); setMemRefType(loadPcRel, true, kLiteral); loadPcRel->aliasInfo = dataTarget->operands[0]; res = loadPcRel; dvmCompilerAppendLIR(cUnit, (LIR *) loadPcRel); /* * To save space in the constant pool, we use the ADD_RRI8 instruction to * add up to 255 to an existing constant value. */ if (dataTarget->operands[0] != value) { newLIR2(cUnit, kThumbAddRI8, tDest, value - dataTarget->operands[0]); } if (rDest != tDest) { opRegReg(cUnit, kOpMov, rDest, tDest); dvmCompilerFreeTemp(cUnit, tDest); } return res; }
static ArmLIR *newLIR4(CompilationUnit *cUnit, ArmOpcode opcode, int dest, int src1, int src2, int info) { ArmLIR *insn = (ArmLIR *) dvmCompilerNew(sizeof(ArmLIR), true); assert(isPseudoOpcode(opcode) || (getEncoding(opcode)->flags & IS_QUAD_OP)); insn->opcode = opcode; insn->operands[0] = dest; insn->operands[1] = src1; insn->operands[2] = src2; insn->operands[3] = info; setupResourceMasks(insn); dvmCompilerAppendLIR(cUnit, (LIR *) insn); return insn; }
static ArmLIR *newLIR3(CompilationUnit *cUnit, ArmOpcode opcode, int dest, int src1, int src2) { ArmLIR *insn = (ArmLIR *) dvmCompilerNew(sizeof(ArmLIR), true); if (!(getEncoding(opcode)->flags & IS_TERTIARY_OP)) { ALOGE("Bad LIR3: %s[%d]",EncodingMap[opcode].name,opcode); } assert(isPseudoOpcode(opcode) || (getEncoding(opcode)->flags & IS_TERTIARY_OP)); insn->opcode = opcode; insn->operands[0] = dest; insn->operands[1] = src1; insn->operands[2] = src2; setupResourceMasks(insn); dvmCompilerAppendLIR(cUnit, (LIR *) insn); return insn; }
/* Handle the content in each basic block */ static bool methodBlockCodeGen(CompilationUnit *cUnit, BasicBlock *bb) { MIR *mir; ArmLIR *labelList = (ArmLIR *) cUnit->blockLabelList; int blockId = bb->id; cUnit->curBlock = bb; labelList[blockId].operands[0] = bb->startOffset; /* Insert the block label */ labelList[blockId].opcode = kArmPseudoNormalBlockLabel; dvmCompilerAppendLIR(cUnit, (LIR *) &labelList[blockId]); dvmCompilerClobberAllRegs(cUnit); dvmCompilerResetNullCheck(cUnit); ArmLIR *headLIR = NULL; if (bb->blockType == kEntryBlock) { /* r0 = callsitePC */ opImm(cUnit, kOpPush, (1 << r0 | 1 << r1 | 1 << r5FP | 1 << r14lr)); opRegImm(cUnit, kOpSub, r5FP, sizeof(StackSaveArea) + cUnit->method->registersSize * 4); } else if (bb->blockType == kExitBlock) { /* No need to pop r0 and r1 */ opRegImm(cUnit, kOpAdd, r13sp, 8); opImm(cUnit, kOpPop, (1 << r5FP | 1 << r15pc)); } for (mir = bb->firstMIRInsn; mir; mir = mir->next) { dvmCompilerResetRegPool(cUnit); if (gDvmJit.disableOpt & (1 << kTrackLiveTemps)) { dvmCompilerClobberAllRegs(cUnit); } if (gDvmJit.disableOpt & (1 << kSuppressLoads)) { dvmCompilerResetDefTracking(cUnit); } Opcode dalvikOpcode = mir->dalvikInsn.opcode; InstructionFormat dalvikFormat = dexGetFormatFromOpcode(dalvikOpcode); ArmLIR *boundaryLIR; /* * Don't generate the boundary LIR unless we are debugging this * trace or we need a scheduling barrier. */ if (headLIR == NULL || cUnit->printMe == true) { boundaryLIR = newLIR2(cUnit, kArmPseudoDalvikByteCodeBoundary, mir->offset, (int) dvmCompilerGetDalvikDisassembly( &mir->dalvikInsn, "")); /* Remember the first LIR for this block */ if (headLIR == NULL) { headLIR = boundaryLIR; /* Set the first boundaryLIR as a scheduling barrier */ headLIR->defMask = ENCODE_ALL; } } /* Don't generate the SSA annotation unless verbose mode is on */ if (cUnit->printMe && mir->ssaRep) { char *ssaString = dvmCompilerGetSSAString(cUnit, mir->ssaRep); newLIR1(cUnit, kArmPseudoSSARep, (int) ssaString); } bool notHandled; switch (dalvikFormat) { case kFmt10t: case kFmt20t: case kFmt30t: notHandled = handleMethodFmt10t_Fmt20t_Fmt30t(cUnit, mir, bb, labelList); break; case kFmt10x: notHandled = handleMethodFmt10x(cUnit, mir); break; case kFmt11n: case kFmt31i: notHandled = handleMethodFmt11n_Fmt31i(cUnit, mir); break; case kFmt11x: notHandled = handleMethodFmt11x(cUnit, mir, bb, labelList); break; case kFmt12x: notHandled = handleMethodFmt12x(cUnit, mir); break; case kFmt20bc: notHandled = handleMethodFmt20bc(cUnit, mir); break; case kFmt21c: case kFmt31c: notHandled = handleMethodFmt21c_Fmt31c(cUnit, mir); break; case kFmt21h: notHandled = handleMethodFmt21h(cUnit, mir); break; case kFmt21s: notHandled = handleMethodFmt21s(cUnit, mir); break; case kFmt21t: notHandled = handleMethodFmt21t(cUnit, mir, bb, labelList); break; case kFmt22b: case kFmt22s: notHandled = handleMethodFmt22b_Fmt22s(cUnit, mir); break; case kFmt22c: notHandled = handleMethodFmt22c(cUnit, mir); break; case kFmt22cs: notHandled = handleMethodFmt22cs(cUnit, mir); break; case kFmt22t: notHandled = handleMethodFmt22t(cUnit, mir, bb, labelList); break; case kFmt22x: case kFmt32x: notHandled = handleMethodFmt22x_Fmt32x(cUnit, mir); break; case kFmt23x: notHandled = handleMethodFmt23x(cUnit, mir); break; case kFmt31t: notHandled = handleMethodFmt31t(cUnit, mir); break; case kFmt3rc: case kFmt35c: notHandled = handleMethodFmt35c_3rc(cUnit, mir, bb, labelList); break; case kFmt3rms: case kFmt35ms: notHandled = handleMethodFmt35ms_3rms(cUnit, mir, bb, labelList); break; case kFmt35mi: case kFmt3rmi: notHandled = handleMethodExecuteInline(cUnit, mir); break; case kFmt51l: notHandled = handleMethodFmt51l(cUnit, mir); break; default: notHandled = true; break; } /* FIXME - to be implemented */ if (notHandled == true && dalvikOpcode >= kNumPackedOpcodes) { notHandled = false; } if (notHandled) { ALOGE("%#06x: Opcode %#x (%s) / Fmt %d not handled", mir->offset, dalvikOpcode, dexGetOpcodeName(dalvikOpcode), dalvikFormat); dvmCompilerAbort(cUnit); break; } } if (headLIR) { /* * Eliminate redundant loads/stores and delay stores into later * slots */ dvmCompilerApplyLocalOptimizations(cUnit, (LIR *) headLIR, cUnit->lastLIRInsn); /* * Generate an unconditional branch to the fallthrough block. */ if (bb->fallThrough) { genUnconditionalBranch(cUnit, &labelList[bb->fallThrough->id]); } } return false; }