BufferOffset Assembler::Logical(const Register& rd, const Register& rn, const Operand& operand, LogicalOp op) { VIXL_ASSERT(rd.size() == rn.size()); if (operand.IsImmediate()) { int64_t immediate = operand.immediate(); unsigned reg_size = rd.size(); VIXL_ASSERT(immediate != 0); VIXL_ASSERT(immediate != -1); VIXL_ASSERT(rd.Is64Bits() || is_uint32(immediate)); // If the operation is NOT, invert the operation and immediate. if ((op & NOT) == NOT) { op = static_cast<LogicalOp>(op & ~NOT); immediate = rd.Is64Bits() ? ~immediate : (~immediate & kWRegMask); } unsigned n, imm_s, imm_r; if (IsImmLogical(immediate, reg_size, &n, &imm_s, &imm_r)) { // Immediate can be encoded in the instruction. return LogicalImmediate(rd, rn, n, imm_s, imm_r, op); } else { // This case is handled in the macro assembler. VIXL_UNREACHABLE(); } } else { VIXL_ASSERT(operand.IsShiftedRegister()); VIXL_ASSERT(operand.reg().size() == rd.size()); Instr dp_op = static_cast<Instr>(op | LogicalShiftedFixed); return DataProcShiftedRegister(rd, rn, operand, LeaveFlags, dp_op); } }
BufferOffset Assembler::DataProcShiftedRegister(const Register& rd, const Register& rn, const Operand& operand, FlagsUpdate S, Instr op) { VIXL_ASSERT(operand.IsShiftedRegister()); VIXL_ASSERT(rn.Is64Bits() || (rn.Is32Bits() && is_uint5(operand.shift_amount()))); return Emit(SF(rd) | op | Flags(S) | ShiftDP(operand.shift()) | ImmDPShift(operand.shift_amount()) | Rm(operand.reg()) | Rn(rn) | Rd(rd)); }
void Assembler::adr(const Register& rd, int imm21) { VIXL_ASSERT(rd.Is64Bits()); EmitBranch(ADR | ImmPCRelAddress(imm21) | Rd(rd)); }
void Assembler::adrp(Instruction* at, const Register& rd, int imm21) { VIXL_ASSERT(rd.Is64Bits()); EmitBranch(at, ADRP | ImmPCRelAddress(imm21) | Rd(rd)); }
void Assembler::tbnz(Instruction* at, const Register& rt, unsigned bit_pos, int imm14) { VIXL_ASSERT(rt.Is64Bits() || (rt.Is32Bits() && (bit_pos < kWRegSize))); EmitBranch(at, TBNZ | ImmTestBranchBit(bit_pos) | ImmTestBranch(imm14) | Rt(rt)); }