Пример #1
0
ErrorOr<void> X86TargetRelocationHandler::applyRelocation(
    ELFWriter &writer, llvm::FileOutputBuffer &buf, const lld::AtomLayout &atom,
    const Reference &ref) const {
  uint8_t *atomContent = buf.getBufferStart() + atom._fileOffset;
  uint8_t *location = atomContent + ref.offsetInAtom();
  uint64_t targetVAddress = writer.addressOfAtom(ref.target());
  uint64_t relocVAddress = atom._virtualAddr + ref.offsetInAtom();

  switch (ref.kind()) {
  case R_386_32:
    reloc32(location, relocVAddress, targetVAddress, ref.addend());
    break;
  case R_386_PC32:
    relocPC32(location, relocVAddress, targetVAddress, ref.addend());
    break;
  case lld::Reference::kindLayoutAfter:
  case lld::Reference::kindLayoutBefore:
  case lld::Reference::kindInGroup:
    break;
  default : {
    std::string str;
    llvm::raw_string_ostream s(str);
    auto name = _targetInfo.stringFromRelocKind(ref.kind());
    s << "Unhandled relocation: "
      << (name ? *name : "<unknown>" ) << " (" << ref.kind() << ")";
    s.flush();
    llvm_unreachable(str.c_str());
  }
  }

  return error_code::success();
}
Пример #2
0
std::error_code X86TargetRelocationHandler::applyRelocation(
    ELFWriter &writer, llvm::FileOutputBuffer &buf, const AtomLayout &atom,
    const Reference &ref) const {
  uint8_t *atomContent = buf.getBufferStart() + atom._fileOffset;
  uint8_t *loc = atomContent + ref.offsetInAtom();
  uint64_t target = writer.addressOfAtom(ref.target());
  uint64_t reloc = atom._virtualAddr + ref.offsetInAtom();

  if (ref.kindNamespace() != Reference::KindNamespace::ELF)
    return std::error_code();
  assert(ref.kindArch() == Reference::KindArch::x86);
  switch (ref.kindValue()) {
  case R_386_32:
    reloc32(loc, reloc, target, ref.addend());
    break;
  case R_386_PC32:
    relocPC32(loc, reloc, target, ref.addend());
    break;
  default:
    return make_unhandled_reloc_error();
  }
  return std::error_code();
}
Пример #3
0
void ArchHandler_arm64::appendSectionRelocations(
    const DefinedAtom &atom, uint64_t atomSectionOffset, const Reference &ref,
    FindSymbolIndexForAtom symbolIndexForAtom,
    FindSectionIndexForAtom sectionIndexForAtom,
    FindAddressForAtom addressForAtom, normalized::Relocations &relocs) {
  if (ref.kindNamespace() != Reference::KindNamespace::mach_o)
    return;
  assert(ref.kindArch() == Reference::KindArch::AArch64);
  uint32_t sectionOffset = atomSectionOffset + ref.offsetInAtom();
  switch (static_cast<Arm64Kind>(ref.kindValue())) {
  case branch26:
    if (ref.addend()) {
      appendReloc(relocs, sectionOffset, ref.addend(), 0,
                  ARM64_RELOC_ADDEND | rLength4);
      appendReloc(relocs, sectionOffset, symbolIndexForAtom(*ref.target()), 0,
                  ARM64_RELOC_BRANCH26 | rPcRel | rExtern | rLength4);
     } else {
      appendReloc(relocs, sectionOffset, symbolIndexForAtom(*ref.target()), 0,
                  ARM64_RELOC_BRANCH26 | rPcRel | rExtern | rLength4);
    }
    return;
  case page21:
    if (ref.addend()) {
      appendReloc(relocs, sectionOffset, ref.addend(), 0,
                  ARM64_RELOC_ADDEND | rLength4);
      appendReloc(relocs, sectionOffset, symbolIndexForAtom(*ref.target()), 0,
                  ARM64_RELOC_PAGE21 | rPcRel | rExtern | rLength4);
     } else {
      appendReloc(relocs, sectionOffset, symbolIndexForAtom(*ref.target()), 0,
                  ARM64_RELOC_PAGE21 | rPcRel | rExtern | rLength4);
    }
    return;
  case offset12:
  case offset12scale2:
  case offset12scale4:
  case offset12scale8:
  case offset12scale16:
    if (ref.addend()) {
      appendReloc(relocs, sectionOffset, ref.addend(), 0,
                  ARM64_RELOC_ADDEND | rLength4);
      appendReloc(relocs, sectionOffset, symbolIndexForAtom(*ref.target()), 0,
                  ARM64_RELOC_PAGEOFF12  | rExtern | rLength4);
     } else {
      appendReloc(relocs, sectionOffset, symbolIndexForAtom(*ref.target()), 0,
                  ARM64_RELOC_PAGEOFF12 | rExtern | rLength4);
    }
    return;
  case gotPage21:
    assert(ref.addend() == 0);
    appendReloc(relocs, sectionOffset, symbolIndexForAtom(*ref.target()), 0,
                  ARM64_RELOC_GOT_LOAD_PAGE21 | rPcRel | rExtern | rLength4);
    return;
  case gotOffset12:
    assert(ref.addend() == 0);
    appendReloc(relocs, sectionOffset, symbolIndexForAtom(*ref.target()), 0,
                  ARM64_RELOC_GOT_LOAD_PAGEOFF12 | rExtern | rLength4);
    return;
  case tlvPage21:
    assert(ref.addend() == 0);
    appendReloc(relocs, sectionOffset, symbolIndexForAtom(*ref.target()), 0,
                  ARM64_RELOC_TLVP_LOAD_PAGE21 | rPcRel | rExtern | rLength4);
    return;
  case tlvOffset12:
    assert(ref.addend() == 0);
    appendReloc(relocs, sectionOffset, symbolIndexForAtom(*ref.target()), 0,
                  ARM64_RELOC_TLVP_LOAD_PAGEOFF12 | rExtern | rLength4);
    return;
  case pointer64:
    if (ref.target()->name().empty())
      appendReloc(relocs, sectionOffset, sectionIndexForAtom(*ref.target()), 0,
                  ARM64_RELOC_UNSIGNED           | rLength8);
    else
      appendReloc(relocs, sectionOffset, symbolIndexForAtom(*ref.target()), 0,
                  ARM64_RELOC_UNSIGNED | rExtern | rLength8);
    return;
  case delta64:
    appendReloc(relocs, sectionOffset, symbolIndexForAtom(atom), 0,
                ARM64_RELOC_SUBTRACTOR | rExtern | rLength8);
    appendReloc(relocs, sectionOffset, symbolIndexForAtom(*ref.target()), 0,
                ARM64_RELOC_UNSIGNED  | rExtern | rLength8);
    return;
  case delta32:
    appendReloc(relocs, sectionOffset, symbolIndexForAtom(atom), 0,
                ARM64_RELOC_SUBTRACTOR | rExtern | rLength4 );
    appendReloc(relocs, sectionOffset, symbolIndexForAtom(*ref.target()), 0,
                ARM64_RELOC_UNSIGNED   | rExtern | rLength4 );
    return;
  case pointer64ToGOT:
    assert(ref.addend() == 0);
    appendReloc(relocs, sectionOffset, symbolIndexForAtom(*ref.target()), 0,
                  ARM64_RELOC_POINTER_TO_GOT | rExtern | rLength8);
    return;
  case delta32ToGOT:
    assert(ref.addend() == 0);
    appendReloc(relocs, sectionOffset, symbolIndexForAtom(*ref.target()), 0,
                  ARM64_RELOC_POINTER_TO_GOT | rPcRel | rExtern | rLength4);
    return;
  case addOffset12:
    llvm_unreachable("lazy reference kind implies GOT pass was run");
  case lazyPointer:
  case lazyImmediateLocation:
    llvm_unreachable("lazy reference kind implies Stubs pass was run");
  case imageOffset:
  case imageOffsetGot:
    llvm_unreachable("deltas from mach_header can only be in final images");
  case unwindCIEToPersonalityFunction:
  case unwindFDEToFunction:
  case unwindInfoToEhFrame:
  case negDelta32:
    // Do nothing.
    return;
  case invalid:
    // Fall into llvm_unreachable().
    break;
  }
  llvm_unreachable("unknown arm64 Reference Kind");
}
Пример #4
0
void ArchHandler_arm64::applyFixupRelocatable(const Reference &ref,
                                              uint8_t *loc,
                                              uint64_t fixupAddress,
                                              uint64_t targetAddress,
                                              uint64_t inAtomAddress,
                                              bool targetUnnamed) {
  if (ref.kindNamespace() != Reference::KindNamespace::mach_o)
    return;
  assert(ref.kindArch() == Reference::KindArch::AArch64);
  ulittle32_t *loc32 = reinterpret_cast<ulittle32_t *>(loc);
  ulittle64_t *loc64 = reinterpret_cast<ulittle64_t *>(loc);
  switch (static_cast<Arm64Kind>(ref.kindValue())) {
  case branch26:
    *loc32 = setDisplacementInBranch26(*loc32, 0);
    return;
  case page21:
  case gotPage21:
  case tlvPage21:
    *loc32 = setDisplacementInADRP(*loc32, 0);
    return;
  case offset12:
  case offset12scale2:
  case offset12scale4:
  case offset12scale8:
  case offset12scale16:
  case gotOffset12:
  case tlvOffset12:
    *loc32 = setImm12(*loc32, 0);
    return;
  case pointer64:
    if (targetUnnamed)
      *loc64 = targetAddress + ref.addend();
    else
      *loc64 = ref.addend();
    return;
  case delta64:
    *loc64 = ref.addend() + inAtomAddress - fixupAddress;
    return;
  case unwindFDEToFunction:
    // We don't emit unwindFDEToFunction in -r mode as they are implicitly
    // generated from the data in the __eh_frame section.  So here we need
    // to use the targetAddress so that we can generate the full relocation
    // when we parse again later.
    *loc64 = targetAddress - fixupAddress;
    return;
  case delta32:
    *loc32 = ref.addend() + inAtomAddress - fixupAddress;
    return;
  case negDelta32:
    // We don't emit negDelta32 in -r mode as they are implicitly
    // generated from the data in the __eh_frame section.  So here we need
    // to use the targetAddress so that we can generate the full relocation
    // when we parse again later.
    *loc32 = fixupAddress - targetAddress + ref.addend();
    return;
  case pointer64ToGOT:
    *loc64 = 0;
    return;
  case delta32ToGOT:
    *loc32 = inAtomAddress - fixupAddress;
    return;
  case unwindCIEToPersonalityFunction:
    // We don't emit unwindCIEToPersonalityFunction in -r mode as they are
    // implicitly generated from the data in the __eh_frame section.  So here we
    // need to use the targetAddress so that we can generate the full relocation
    // when we parse again later.
    *loc32 = targetAddress - fixupAddress;
    return;
  case addOffset12:
    llvm_unreachable("lazy reference kind implies GOT pass was run");
  case lazyPointer:
  case lazyImmediateLocation:
    llvm_unreachable("lazy reference kind implies Stubs pass was run");
  case imageOffset:
  case imageOffsetGot:
  case unwindInfoToEhFrame:
    llvm_unreachable("fixup implies __unwind_info");
    return;
  case invalid:
    // Fall into llvm_unreachable().
    break;
  }
  llvm_unreachable("unknown arm64 Reference Kind");
}
Пример #5
0
void ArchHandler_arm64::applyFixupFinal(const Reference &ref, uint8_t *loc,
                                        uint64_t fixupAddress,
                                        uint64_t targetAddress,
                                        uint64_t inAtomAddress,
                                        uint64_t imageBaseAddress,
                                        FindAddressForAtom findSectionAddress) {
  if (ref.kindNamespace() != Reference::KindNamespace::mach_o)
    return;
  assert(ref.kindArch() == Reference::KindArch::AArch64);
  ulittle32_t *loc32 = reinterpret_cast<ulittle32_t *>(loc);
  ulittle64_t *loc64 = reinterpret_cast<ulittle64_t *>(loc);
  int32_t displacement;
  uint32_t instruction;
  uint32_t value32;
  uint32_t value64;
  switch (static_cast<Arm64Kind>(ref.kindValue())) {
  case branch26:
    displacement = (targetAddress - fixupAddress) + ref.addend();
    *loc32 = setDisplacementInBranch26(*loc32, displacement);
    return;
  case page21:
  case gotPage21:
  case tlvPage21:
    displacement =
        ((targetAddress + ref.addend()) & (-4096)) - (fixupAddress & (-4096));
    *loc32 = setDisplacementInADRP(*loc32, displacement);
    return;
  case offset12:
  case gotOffset12:
  case tlvOffset12:
    displacement = (targetAddress + ref.addend()) & 0x00000FFF;
    *loc32 = setImm12(*loc32, displacement);
    return;
  case offset12scale2:
    displacement = (targetAddress + ref.addend()) & 0x00000FFF;
    assert(((displacement & 0x1) == 0) &&
           "scaled imm12 not accessing 2-byte aligneds");
    *loc32 = setImm12(*loc32, displacement >> 1);
    return;
  case offset12scale4:
    displacement = (targetAddress + ref.addend()) & 0x00000FFF;
    assert(((displacement & 0x3) == 0) &&
           "scaled imm12 not accessing 4-byte aligned");
    *loc32 = setImm12(*loc32, displacement >> 2);
    return;
  case offset12scale8:
    displacement = (targetAddress + ref.addend()) & 0x00000FFF;
    assert(((displacement & 0x7) == 0) &&
           "scaled imm12 not accessing 8-byte aligned");
    *loc32 = setImm12(*loc32, displacement >> 3);
    return;
  case offset12scale16:
    displacement = (targetAddress + ref.addend()) & 0x00000FFF;
    assert(((displacement & 0xF) == 0) &&
           "scaled imm12 not accessing 16-byte aligned");
    *loc32 = setImm12(*loc32, displacement >> 4);
    return;
  case addOffset12:
    instruction = *loc32;
    assert(((instruction & 0xFFC00000) == 0xF9400000) &&
           "GOT reloc is not an LDR instruction");
    displacement = (targetAddress + ref.addend()) & 0x00000FFF;
    value32 = 0x91000000 | (instruction & 0x000003FF);
    instruction = setImm12(value32, displacement);
    *loc32 = instruction;
    return;
  case pointer64:
  case pointer64ToGOT:
    *loc64 = targetAddress + ref.addend();
    return;
  case delta64:
  case unwindFDEToFunction:
    *loc64 = (targetAddress - fixupAddress) + ref.addend();
    return;
  case delta32:
  case delta32ToGOT:
  case unwindCIEToPersonalityFunction:
    *loc32 = (targetAddress - fixupAddress) + ref.addend();
    return;
  case negDelta32:
    *loc32 = fixupAddress - targetAddress + ref.addend();
    return;
  case lazyPointer:
    // Do nothing
    return;
  case lazyImmediateLocation:
    *loc32 = ref.addend();
    return;
  case imageOffset:
    *loc32 = (targetAddress - imageBaseAddress) + ref.addend();
    return;
  case imageOffsetGot:
    llvm_unreachable("imageOffsetGot should have been changed to imageOffset");
    break;
  case unwindInfoToEhFrame:
    value64 = targetAddress - findSectionAddress(*ref.target()) + ref.addend();
    assert(value64 < 0xffffffU && "offset in __eh_frame too large");
    *loc32 = (*loc32 & 0xff000000U) | value64;
    return;
  case invalid:
    // Fall into llvm_unreachable().
    break;
  }
  llvm_unreachable("invalid arm64 Reference Kind");
}
Пример #6
0
std::error_code X86_64TargetRelocationHandler::applyRelocation(
    ELFWriter &writer, llvm::FileOutputBuffer &buf, const AtomLayout &atom,
    const Reference &ref) const {
  uint8_t *atomContent = buf.getBufferStart() + atom._fileOffset;
  uint8_t *loc = atomContent + ref.offsetInAtom();
  uint64_t target = writer.addressOfAtom(ref.target());
  uint64_t reloc = atom._virtualAddr + ref.offsetInAtom();

  if (ref.kindNamespace() != Reference::KindNamespace::ELF)
    return std::error_code();
  assert(ref.kindArch() == Reference::KindArch::x86_64);
  switch (ref.kindValue()) {
  case R_X86_64_NONE:
    break;
  case R_X86_64_64:
    reloc64(loc, reloc, target, ref.addend());
    break;
  case R_X86_64_PC32:
  case R_X86_64_GOTPCREL:
    relocPC32(loc, reloc, target, ref.addend());
    break;
  case R_X86_64_32:
    reloc32(loc, reloc, target, ref.addend());
    break;
  case R_X86_64_32S:
    reloc32S(loc, reloc, target, ref.addend());
    break;
  case R_X86_64_16:
    reloc16(loc, reloc, target, ref.addend());
    break;
  case R_X86_64_PC16:
    relocPC16(loc, reloc, target, ref.addend());
    break;
  case R_X86_64_TPOFF64:
  case R_X86_64_DTPOFF32:
  case R_X86_64_TPOFF32: {
    _tlsSize = _layout.getTLSSize();
    if (ref.kindValue() == R_X86_64_TPOFF32 ||
        ref.kindValue() == R_X86_64_DTPOFF32) {
      write32le(loc, target - _tlsSize);
    } else {
      write64le(loc, target - _tlsSize);
    }
    break;
  }
  case R_X86_64_TLSGD: {
    relocPC32(loc, reloc, target, ref.addend());
    break;
  }
  case R_X86_64_TLSLD: {
    // Rewrite to move %fs:0 into %rax. Technically we should verify that the
    // next relocation is a PC32 to __tls_get_addr...
    static uint8_t instr[] = { 0x66, 0x66, 0x66, 0x64, 0x48, 0x8b, 0x04, 0x25,
                               0x00, 0x00, 0x00, 0x00 };
    std::memcpy(loc - 3, instr, sizeof(instr));
    break;
  }
  case R_X86_64_PC64:
    relocPC64(loc, reloc, target, ref.addend());
    break;
  case LLD_R_X86_64_GOTRELINDEX: {
    const DefinedAtom *target = cast<const DefinedAtom>(ref.target());
    for (const Reference *r : *target) {
      if (r->kindValue() == R_X86_64_JUMP_SLOT) {
        uint32_t index;
        if (!_layout.getPLTRelocationTable()->getRelocationIndex(*r, index))
          llvm_unreachable("Relocation doesn't exist");
        reloc32(loc, 0, index, 0);
        break;
      }
    }
    break;
  }
  // Runtime only relocations. Ignore here.
  case R_X86_64_RELATIVE:
  case R_X86_64_IRELATIVE:
  case R_X86_64_JUMP_SLOT:
  case R_X86_64_GLOB_DAT:
  case R_X86_64_DTPMOD64:
  case R_X86_64_DTPOFF64:
    break;
  default:
    return make_unhandled_reloc_error();
  }

  return std::error_code();
}
Пример #7
0
void ArchHandler_x86_64::applyFixupRelocatable(const Reference &ref,
                                               uint8_t *loc,
                                               uint64_t fixupAddress,
                                               uint64_t targetAddress,
                                               uint64_t inAtomAddress)  {
  if (ref.kindNamespace() != Reference::KindNamespace::mach_o)
    return;
  assert(ref.kindArch() == Reference::KindArch::x86_64);
  ulittle32_t *loc32 = reinterpret_cast<ulittle32_t *>(loc);
  ulittle64_t *loc64 = reinterpret_cast<ulittle64_t *>(loc);
  switch (static_cast<X86_64Kind>(ref.kindValue())) {
  case branch32:
  case ripRel32:
  case ripRel32Got:
  case ripRel32GotLoad:
  case ripRel32Tlv:
    *loc32 = ref.addend();
    return;
  case ripRel32Anon:
    *loc32 = (targetAddress - (fixupAddress + 4)) + ref.addend();
    return;
  case tlvInitSectionOffset:
  case pointer64:
    *loc64 = ref.addend();
    return;
  case pointer64Anon:
    *loc64 = targetAddress + ref.addend();
    return;
  case ripRel32Minus1:
    *loc32 = ref.addend() - 1;
    return;
  case ripRel32Minus1Anon:
    *loc32 = (targetAddress - (fixupAddress + 5)) + ref.addend();
    return;
  case ripRel32Minus2:
    *loc32 = ref.addend() - 2;
    return;
  case ripRel32Minus2Anon:
    *loc32 = (targetAddress - (fixupAddress + 6)) + ref.addend();
    return;
  case ripRel32Minus4:
    *loc32 = ref.addend() - 4;
    return;
  case ripRel32Minus4Anon:
    *loc32 = (targetAddress - (fixupAddress + 8)) + ref.addend();
    return;
  case delta32:
    *loc32 = ref.addend() + inAtomAddress - fixupAddress;
    return;
  case delta32Anon:
    // The value we write here should be the the delta to the target
    // after taking in to account the difference from the fixup back to the
    // last defined label
    // ie, if we have:
    // _base: ...
    // Lfixup: .quad Ltarget - .
    // ...
    // Ltarget:
    //
    // Then we want to encode the value (Ltarget + addend) - (LFixup - _base)
    *loc32 = (targetAddress + ref.addend()) - (fixupAddress - inAtomAddress);
    return;
  case delta64:
    *loc64 = ref.addend() + inAtomAddress - fixupAddress;
    return;
  case delta64Anon:
    // The value we write here should be the the delta to the target
    // after taking in to account the difference from the fixup back to the
    // last defined label
    // ie, if we have:
    // _base: ...
    // Lfixup: .quad Ltarget - .
    // ...
    // Ltarget:
    //
    // Then we want to encode the value (Ltarget + addend) - (LFixup - _base)
    *loc64 = (targetAddress + ref.addend()) - (fixupAddress - inAtomAddress);
    return;
  case negDelta64:
    *loc64 = ref.addend() + fixupAddress - inAtomAddress;
    return;
  case negDelta32:
    *loc32 = ref.addend() + fixupAddress - inAtomAddress;
    return;
  case ripRel32GotLoadNowLea:
    llvm_unreachable("ripRel32GotLoadNowLea implies GOT pass was run");
    return;
  case lazyPointer:
  case lazyImmediateLocation:
    llvm_unreachable("lazy reference kind implies Stubs pass was run");
    return;
  case imageOffset:
  case imageOffsetGot:
  case unwindInfoToEhFrame:
    llvm_unreachable("fixup implies __unwind_info");
    return;
  case unwindFDEToFunction:
    // Do nothing for now
    return;
  case invalid:
    // Fall into llvm_unreachable().
    break;
  }
  llvm_unreachable("unknown x86_64 Reference Kind");
}
Пример #8
0
void ArchHandler_x86_64::applyFixupFinal(
    const Reference &ref, uint8_t *loc, uint64_t fixupAddress,
    uint64_t targetAddress, uint64_t inAtomAddress, uint64_t imageBaseAddress,
    FindAddressForAtom findSectionAddress) {
  if (ref.kindNamespace() != Reference::KindNamespace::mach_o)
    return;
  assert(ref.kindArch() == Reference::KindArch::x86_64);
  ulittle32_t *loc32 = reinterpret_cast<ulittle32_t *>(loc);
  ulittle64_t *loc64 = reinterpret_cast<ulittle64_t *>(loc);
  switch (static_cast<X86_64Kind>(ref.kindValue())) {
  case branch32:
  case ripRel32:
  case ripRel32Anon:
  case ripRel32Got:
  case ripRel32GotLoad:
  case ripRel32Tlv:
    *loc32 = targetAddress - (fixupAddress + 4) + ref.addend();
    return;
  case pointer64:
  case pointer64Anon:
    *loc64 = targetAddress + ref.addend();
    return;
  case tlvInitSectionOffset:
    *loc64 = targetAddress - findSectionAddress(*ref.target()) + ref.addend();
    return;
  case ripRel32Minus1:
  case ripRel32Minus1Anon:
    *loc32 = targetAddress - (fixupAddress + 5) + ref.addend();
    return;
  case ripRel32Minus2:
  case ripRel32Minus2Anon:
    *loc32 = targetAddress - (fixupAddress + 6) + ref.addend();
    return;
  case ripRel32Minus4:
  case ripRel32Minus4Anon:
    *loc32 = targetAddress - (fixupAddress + 8) + ref.addend();
    return;
  case delta32:
  case delta32Anon:
    *loc32 = targetAddress - fixupAddress + ref.addend();
    return;
  case delta64:
  case delta64Anon:
  case unwindFDEToFunction:
    *loc64 = targetAddress - fixupAddress + ref.addend();
    return;
  case ripRel32GotLoadNowLea:
    // Change MOVQ to LEA
    assert(loc[-2] == 0x8B);
    loc[-2] = 0x8D;
    *loc32 = targetAddress - (fixupAddress + 4) + ref.addend();
    return;
  case negDelta64:
    *loc64 = fixupAddress - targetAddress + ref.addend();
    return;
  case negDelta32:
    *loc32 = fixupAddress - targetAddress + ref.addend();
    return;
  case lazyPointer:
    // Do nothing
    return;
  case lazyImmediateLocation:
    *loc32 = ref.addend();
    return;
  case imageOffset:
  case imageOffsetGot:
    *loc32 = (targetAddress - imageBaseAddress) + ref.addend();
    return;
  case unwindInfoToEhFrame: {
    uint64_t val = targetAddress - findSectionAddress(*ref.target()) + ref.addend();
    assert(val < 0xffffffU && "offset in __eh_frame too large");
    *loc32 = (*loc32 & 0xff000000U) | val;
    return;
  }
  case invalid:
    // Fall into llvm_unreachable().
    break;
  }
  llvm_unreachable("invalid x86_64 Reference Kind");
}