/* Copy all literals referenced from a code block to newspace */ void collect_literals_step(F_COMPILED *compiled, CELL code_start, CELL literals_start) { if(collecting_gen >= compiled->last_scan) { CELL scan; CELL literal_end = literals_start + compiled->literals_length; if(collecting_accumulation_gen_p()) compiled->last_scan = collecting_gen; else compiled->last_scan = collecting_gen + 1; for(scan = literals_start; scan < literal_end; scan += CELLS) copy_handle((CELL*)scan); if(compiled->relocation != F) { copy_handle(&compiled->relocation); F_BYTE_ARRAY *relocation = untag_object(compiled->relocation); F_REL *rel = (F_REL *)(relocation + 1); F_REL *rel_end = (F_REL *)((char *)rel + byte_array_capacity(relocation)); while(rel < rel_end) { if(REL_TYPE(rel) == RT_IMMEDIATE) { CELL offset = rel->offset + code_start; F_FIXNUM absolute_value = get(CREF(literals_start,REL_ARGUMENT(rel))); apply_relocation(REL_CLASS(rel),offset,absolute_value); } rel++; } } flush_icache(code_start,literals_start - code_start); } }
static const char *type_name[] = { #define REL_TYPE(X) [X] = #X #if ELF_BITS == 64 REL_TYPE(R_X86_64_NONE), REL_TYPE(R_X86_64_64), REL_TYPE(R_X86_64_PC32), REL_TYPE(R_X86_64_GOT32), REL_TYPE(R_X86_64_PLT32), REL_TYPE(R_X86_64_COPY), REL_TYPE(R_X86_64_GLOB_DAT), REL_TYPE(R_X86_64_JUMP_SLOT), REL_TYPE(R_X86_64_RELATIVE), REL_TYPE(R_X86_64_GOTPCREL), REL_TYPE(R_X86_64_32), REL_TYPE(R_X86_64_32S), REL_TYPE(R_X86_64_16), REL_TYPE(R_X86_64_PC16), REL_TYPE(R_X86_64_8), REL_TYPE(R_X86_64_PC8), #else REL_TYPE(R_386_NONE), REL_TYPE(R_386_32), REL_TYPE(R_386_PC32), REL_TYPE(R_386_GOT32), REL_TYPE(R_386_PLT32), REL_TYPE(R_386_COPY), REL_TYPE(R_386_GLOB_DAT), REL_TYPE(R_386_JMP_SLOT), REL_TYPE(R_386_RELATIVE), REL_TYPE(R_386_GOTOFF), REL_TYPE(R_386_GOTPC),
bool ARMDLObject::relocate(Elf32_Off offset, Elf32_Word size, byte *relSegment) { Elf32_Rel *rel = 0; //relocation entry // Allocate memory for relocation table if (!(rel = (Elf32_Rel *)malloc(size))) { warning("elfloader: Out of memory."); return false; } // Read in our relocation table if (!_file->seek(offset, SEEK_SET) || _file->read(rel, size) != size) { warning("elfloader: Relocation table load failed."); free(rel); return false; } // Treat each relocation entry. Loop over all of them uint32 cnt = size / sizeof(*rel); debug(2, "elfloader: Loaded relocation table. %d entries. base address=%p", cnt, relSegment); int32 a = 0; uint32 relocation = 0; // Loop over relocation entries for (uint32 i = 0; i < cnt; i++) { // Get the symbol this relocation entry is referring to Elf32_Sym *sym = _symtab + (REL_INDEX(rel[i].r_info)); // Get the target instruction in the code. TODO: repect _segmentVMA uint32 *target = (uint32 *)((byte *)relSegment + rel[i].r_offset); uint32 origTarget = *target; //Save for debugging // Act differently based on the type of relocation switch (REL_TYPE(rel[i].r_info)) { case R_ARM_ABS32: if (sym->st_shndx < SHN_LOPROC) { // Only shift for plugin section. a = *target; // Get full 32 bits of addend relocation = a + Elf32_Addr(_segment); // Shift by main offset *target = relocation; debug(8, "elfloader: R_ARM_ABS32: i=%d, a=%x, origTarget=%x, target=%x", i, a, origTarget, *target); } break; case R_ARM_THM_CALL: debug(8, "elfloader: R_ARM_THM_CALL: PC-relative jump, ld takes care of necessary relocation work for us."); break; case R_ARM_CALL: debug(8, "elfloader: R_ARM_CALL: PC-relative jump, ld takes care of necessary relocation work for us."); break; case R_ARM_JUMP24: debug(8, "elfloader: R_ARM_JUMP24: PC-relative jump, ld takes care of all relocation work for us."); break; case R_ARM_V4BX: debug(8, "elfloader: R_ARM_V4BX: No relocation calculation necessary."); break; default: warning("elfloader: Unknown relocation type %d.", REL_TYPE(rel[i].r_info)); free(rel); return false; } } free(rel); return true; }