static int reloc_insn_imm(enum aarch64_reloc_op op, void *place, u64 val, int lsb, int len, enum aarch64_insn_imm_type imm_type) { u64 imm, imm_mask; s64 sval; u32 insn = le32_to_cpu(*(u32 *)place); /* Calculate the relocation value. */ sval = do_reloc(op, place, val); sval >>= lsb; /* Extract the value bits and shift them to bit 0. */ imm_mask = (BIT(lsb + len) - 1) >> lsb; imm = sval & imm_mask; /* Update the instruction's immediate field. */ insn = aarch64_insn_encode_immediate(imm_type, insn, imm); *(u32 *)place = cpu_to_le32(insn); /* * Extract the upper value bits (including the sign bit) and * shift them to bit 0. */ sval = (s64)(sval & ~(imm_mask >> 1)) >> (len - 1); /* * Overflow has occurred if the upper bits are not all equal to * the sign bit of the value. */ if ((u64)(sval + 1) >= 2) return -ERANGE; return 0; }
static int reloc_insn_movw(enum aarch64_reloc_op op, void *place, u64 val, int lsb, enum aarch64_insn_imm_type imm_type) { u64 imm, limit = 0; s64 sval; u32 insn = le32_to_cpu(*(u32 *)place); sval = do_reloc(op, place, val); sval >>= lsb; imm = sval & 0xffff; if (imm_type == AARCH64_INSN_IMM_MOVNZ) { /* * For signed MOVW relocations, we have to manipulate the * instruction encoding depending on whether or not the * immediate is less than zero. */ insn &= ~(3 << 29); if ((s64)imm >= 0) { /* >=0: Set the instruction to MOVZ (opcode 10b). */ insn |= 2 << 29; } else { /* * <0: Set the instruction to MOVN (opcode 00b). * Since we've masked the opcode already, we * don't need to do anything other than * inverting the new immediate field. */ imm = ~imm; } imm_type = AARCH64_INSN_IMM_MOVK; } /* Update the instruction with the new encoding. */ insn = aarch64_insn_encode_immediate(imm_type, insn, imm); *(u32 *)place = cpu_to_le32(insn); /* Shift out the immediate field. */ sval >>= 16; /* * For unsigned immediates, the overflow check is straightforward. * For signed immediates, the sign bit is actually the bit past the * most significant bit of the field. * The AARCH64_INSN_IMM_16 immediate type is unsigned. */ if (imm_type != AARCH64_INSN_IMM_16) { sval++; limit++; } /* Check the upper bits depending on the sign of the immediate. */ if ((u64)sval > limit) return -ERANGE; return 0; }
static int reloc_insn_movw(enum aarch64_reloc_op op, __le32 *place, u64 val, int lsb, enum aarch64_insn_movw_imm_type imm_type) { u64 imm; s64 sval; u32 insn = le32_to_cpu(*place); sval = do_reloc(op, place, val); imm = sval >> lsb; if (imm_type == AARCH64_INSN_IMM_MOVNZ) { /* * For signed MOVW relocations, we have to manipulate the * instruction encoding depending on whether or not the * immediate is less than zero. */ insn &= ~(3 << 29); if (sval >= 0) { /* >=0: Set the instruction to MOVZ (opcode 10b). */ insn |= 2 << 29; } else { /* * <0: Set the instruction to MOVN (opcode 00b). * Since we've masked the opcode already, we * don't need to do anything other than * inverting the new immediate field. */ imm = ~imm; } } /* Update the instruction with the new encoding. */ insn = aarch64_insn_encode_immediate(AARCH64_INSN_IMM_16, insn, imm); *place = cpu_to_le32(insn); if (imm > U16_MAX) return -ERANGE; return 0; }