void AMDGPUTargetELFStreamer::EmitAmdhsaKernelDescriptor( const MCSubtargetInfo &STI, StringRef KernelName, const amdhsa::kernel_descriptor_t &KernelDescriptor, uint64_t NextVGPR, uint64_t NextSGPR, bool ReserveVCC, bool ReserveFlatScr, bool ReserveXNACK) { auto &Streamer = getStreamer(); auto &Context = Streamer.getContext(); MCSymbolELF *KernelCodeSymbol = cast<MCSymbolELF>( Context.getOrCreateSymbol(Twine(KernelName))); MCSymbolELF *KernelDescriptorSymbol = cast<MCSymbolELF>( Context.getOrCreateSymbol(Twine(KernelName) + Twine(".kd"))); // Copy kernel descriptor symbol's binding, other and visibility from the // kernel code symbol. KernelDescriptorSymbol->setBinding(KernelCodeSymbol->getBinding()); KernelDescriptorSymbol->setOther(KernelCodeSymbol->getOther()); KernelDescriptorSymbol->setVisibility(KernelCodeSymbol->getVisibility()); // Kernel descriptor symbol's type and size are fixed. KernelDescriptorSymbol->setType(ELF::STT_OBJECT); KernelDescriptorSymbol->setSize( MCConstantExpr::create(sizeof(KernelDescriptor), Context)); // The visibility of the kernel code symbol must be protected or less to allow // static relocations from the kernel descriptor to be used. if (KernelCodeSymbol->getVisibility() == ELF::STV_DEFAULT) KernelCodeSymbol->setVisibility(ELF::STV_PROTECTED); Streamer.EmitLabel(KernelDescriptorSymbol); Streamer.EmitBytes(StringRef( (const char*)&(KernelDescriptor), offsetof(amdhsa::kernel_descriptor_t, kernel_code_entry_byte_offset))); // FIXME: Remove the use of VK_AMDGPU_REL64 in the expression below. The // expression being created is: // (start of kernel code) - (start of kernel descriptor) // It implies R_AMDGPU_REL64, but ends up being R_AMDGPU_ABS64. Streamer.EmitValue(MCBinaryExpr::createSub( MCSymbolRefExpr::create( KernelCodeSymbol, MCSymbolRefExpr::VK_AMDGPU_REL64, Context), MCSymbolRefExpr::create( KernelDescriptorSymbol, MCSymbolRefExpr::VK_None, Context), Context), sizeof(KernelDescriptor.kernel_code_entry_byte_offset)); Streamer.EmitBytes(StringRef( (const char*)&(KernelDescriptor) + offsetof(amdhsa::kernel_descriptor_t, kernel_code_entry_byte_offset) + sizeof(KernelDescriptor.kernel_code_entry_byte_offset), sizeof(KernelDescriptor) - offsetof(amdhsa::kernel_descriptor_t, kernel_code_entry_byte_offset) - sizeof(KernelDescriptor.kernel_code_entry_byte_offset))); }
// True if the assembler knows nothing about the final value of the symbol. // This doesn't cover the comdat issues, since in those cases the assembler // can at least know that all symbols in the section will move together. static bool isWeak(const MCSymbolELF &Sym) { if (Sym.getType() == ELF::STT_GNU_IFUNC) return true; switch (Sym.getBinding()) { default: llvm_unreachable("Unknown binding"); case ELF::STB_LOCAL: return false; case ELF::STB_GLOBAL: return false; case ELF::STB_WEAK: case ELF::STB_GNU_UNIQUE: return true; } }