/* * Put { move r10, lr; jal ftrace_caller } in a bundle, this lets dynamic * tracer just add one cycle overhead to every kernel function when disabled. */ static unsigned long ftrace_gen_branch(unsigned long pc, unsigned long addr, bool link) { tilegx_bundle_bits opcode_x0, opcode_x1; long pcrel_by_instr = (addr - pc) >> TILEGX_LOG2_BUNDLE_SIZE_IN_BYTES; if (link) { /* opcode: jal addr */ opcode_x1 = create_Opcode_X1(JUMP_OPCODE_X1) | create_JumpOpcodeExtension_X1(JAL_JUMP_OPCODE_X1) | create_JumpOff_X1(pcrel_by_instr); } else { /* opcode: j addr */ opcode_x1 = create_Opcode_X1(JUMP_OPCODE_X1) | create_JumpOpcodeExtension_X1(J_JUMP_OPCODE_X1) | create_JumpOff_X1(pcrel_by_instr); } /* * Also put { move r10, lr; jal ftrace_stub } in a bundle, which * is used to replace the instruction in address ftrace_call. */ if (addr == FTRACE_ADDR || addr == (unsigned long)ftrace_stub) { /* opcode: or r10, lr, zero */ opcode_x0 = create_Dest_X0(10) | create_SrcA_X0(TREG_LR) | create_SrcB_X0(TREG_ZERO) | create_RRROpcodeExtension_X0(OR_RRR_0_OPCODE_X0) | create_Opcode_X0(RRR_0_OPCODE_X0); } else { /* opcode: fnop */ opcode_x0 = create_UnaryOpcodeExtension_X0(FNOP_UNARY_OPCODE_X0) | create_RRROpcodeExtension_X0(UNARY_RRR_0_OPCODE_X0) | create_Opcode_X0(RRR_0_OPCODE_X0); } return opcode_x1 | opcode_x0; }
static inline tilepro_bundle_bits addi_X1( tilepro_bundle_bits n, int dest, int src, int imm) { n &= ~TILE_X1_MASK; n |= (create_SrcA_X1(src) | create_Dest_X1(dest) | create_Imm8_X1(imm) | create_S_X1(0) | create_Opcode_X1(IMM_0_OPCODE_X1) | create_ImmOpcodeExtension_X1(ADDI_IMM_0_OPCODE_X1)); return n; }
static inline tile_bundle_bits move_X1(tile_bundle_bits n, int dest, int src) { tile_bundle_bits result; tile_bundle_bits op; result = n & (~TILE_X1_MASK); op = create_Opcode_X1(SPECIAL_0_OPCODE_X1) | create_RRROpcodeExtension_X1(OR_SPECIAL_0_OPCODE_X1) | create_Dest_X1(dest) | create_SrcB_X1(TREG_ZERO) | create_SrcA_X1(src) ; result |= op; return result; }