static SCM load_thunk_from_memory (char *data, size_t len) #define FUNC_NAME "load-thunk-from-memory" { Elf_Ehdr header; Elf_Phdr *ph; const char *err_msg = 0; char *base = 0; size_t n, memsz = 0, alignment = 8; int i; int first_loadable = -1; int start_segment = -1; int prev_segment = -1; int dynamic_segment = -1; SCM init = SCM_BOOL_F, entry = SCM_BOOL_F; if (len < sizeof header) ABORT ("object file too small"); memcpy (&header, data, sizeof header); if ((err_msg = check_elf_header (&header))) goto cleanup; n = header.e_phnum; if (len < header.e_phoff + n * sizeof (Elf_Phdr)) goto cleanup; ph = (Elf_Phdr*) (data + header.e_phoff); for (i = 0; i < n; i++) { if (!ph[i].p_memsz) continue; if (ph[i].p_filesz != ph[i].p_memsz) ABORT ("expected p_filesz == p_memsz"); if (!ph[i].p_flags) ABORT ("expected nonzero segment flags"); if (ph[i].p_align < alignment) { if (ph[i].p_align % alignment) ABORT ("expected new alignment to be multiple of old"); alignment = ph[i].p_align; } if (ph[i].p_type == PT_DYNAMIC) { if (dynamic_segment >= 0) ABORT ("expected only one PT_DYNAMIC segment"); dynamic_segment = i; } if (first_loadable < 0) { if (ph[i].p_vaddr) ABORT ("first loadable vaddr is not 0"); first_loadable = i; } if (ph[i].p_vaddr < memsz) ABORT ("overlapping segments"); if (ph[i].p_offset + ph[i].p_filesz > len) ABORT ("segment beyond end of byte array"); memsz = ph[i].p_vaddr + ph[i].p_memsz; } if (first_loadable < 0) ABORT ("no loadable segments"); if (dynamic_segment < 0) ABORT ("no PT_DYNAMIC segment"); /* Now copy segments. */ /* We leak this memory, as we leak the memory mappings in load_thunk_from_fd_using_mmap. If the file is has an alignment of 8, use the standard malloc. (FIXME to ensure alignment on non-GNU malloc.) Otherwise use posix_memalign. We only use mprotect if the aligment is 4096. */ if (alignment == 8) { base = malloc (memsz); if (!base) goto cleanup; } else if ((errno = posix_memalign ((void **) &base, alignment, memsz))) goto cleanup; memset (base, 0, memsz); for (i = 0; i < n; i++) { if (!ph[i].p_memsz) continue; memcpy (base + ph[i].p_vaddr, data + ph[i].p_offset, ph[i].p_memsz); if (start_segment < 0) { start_segment = prev_segment = i; continue; } if (ph[i].p_flags == ph[start_segment].p_flags) { prev_segment = i; continue; } if (alignment == 4096) if (mprotect_segments (base, &ph[start_segment], &ph[prev_segment])) goto cleanup; /* Open a new set of segments. */ start_segment = prev_segment = i; } /* Mprotect the last segments. */ if (alignment == 4096) if (mprotect_segments (base, &ph[start_segment], &ph[prev_segment])) goto cleanup; if ((err_msg = process_dynamic_segment (base, &ph[dynamic_segment], &init, &entry))) goto cleanup; if (scm_is_true (init)) scm_call_0 (init); /* Finally! Return the thunk. */ return entry; cleanup: { if (errno) SCM_SYSERROR; scm_misc_error (FUNC_NAME, err_msg ? err_msg : "error loading ELF file", SCM_EOL); } }
static SCM load_thunk_from_memory (char *data, size_t len, int is_read_only) #define FUNC_NAME "load-thunk-from-memory" { Elf_Ehdr *header; Elf_Phdr *ph; const char *err_msg = 0; size_t n, alignment = 8; int i; int dynamic_segment = -1; SCM init = SCM_BOOL_F, entry = SCM_BOOL_F; char *frame_maps = 0; if (len < sizeof *header) ABORT ("object file too small"); header = (Elf_Ehdr*) data; if ((err_msg = check_elf_header (header))) goto cleanup; if (header->e_phnum == 0) ABORT ("no loadable segments"); n = header->e_phnum; if (len < header->e_phoff + n * sizeof (Elf_Phdr)) ABORT ("object file too small"); ph = (Elf_Phdr*) (data + header->e_phoff); /* Check that the segment table is sane. */ for (i = 0; i < n; i++) { if (ph[i].p_filesz != ph[i].p_memsz) ABORT ("expected p_filesz == p_memsz"); if (!ph[i].p_flags) ABORT ("expected nonzero segment flags"); if (ph[i].p_align < alignment) { if (ph[i].p_align % alignment) ABORT ("expected new alignment to be multiple of old"); alignment = ph[i].p_align; } if (ph[i].p_type == PT_DYNAMIC) { if (dynamic_segment >= 0) ABORT ("expected only one PT_DYNAMIC segment"); dynamic_segment = i; continue; } if (ph[i].p_type != PT_LOAD) ABORT ("unknown segment type"); if (i == 0) { if (ph[i].p_vaddr != 0) ABORT ("first loadable vaddr is not 0"); } else { if (ph[i].p_vaddr < ph[i-1].p_vaddr + ph[i-1].p_memsz) ABORT ("overlapping segments"); if (ph[i].p_offset + ph[i].p_filesz > len) ABORT ("segment beyond end of byte array"); } } if (dynamic_segment < 0) ABORT ("no PT_DYNAMIC segment"); if (!IS_ALIGNED ((scm_t_uintptr) data, alignment)) ABORT ("incorrectly aligned base"); /* Allow writes to writable pages. */ if (is_read_only) { #ifdef HAVE_SYS_MMAN_H for (i = 0; i < n; i++) { if (ph[i].p_type != PT_LOAD) continue; if (ph[i].p_flags == PF_R) continue; if (ph[i].p_align != 4096) continue; if (mprotect (data + ph[i].p_vaddr, ph[i].p_memsz, segment_flags_to_prot (ph[i].p_flags))) goto cleanup; } #else ABORT ("expected writable pages"); #endif } if ((err_msg = process_dynamic_segment (data, &ph[dynamic_segment], &init, &entry, &frame_maps))) goto cleanup; if (scm_is_true (init)) scm_call_0 (init); register_elf (data, len, frame_maps); /* Finally! Return the thunk. */ return entry; cleanup: { if (errno) SCM_SYSERROR; scm_misc_error (FUNC_NAME, err_msg ? err_msg : "error loading ELF file", SCM_EOL); } }
static SCM load_thunk_from_fd_using_mmap (int fd) #define FUNC_NAME "load-thunk-from-disk" { Elf_Ehdr header; Elf_Phdr *ph; const char *err_msg = 0; char *base = 0; size_t n; int i; int start_segment = -1; int prev_segment = -1; int dynamic_segment = -1; SCM init = SCM_BOOL_F, entry = SCM_BOOL_F; if (full_read (fd, &header, sizeof header) != sizeof header) ABORT ("object file too small"); if ((err_msg = check_elf_header (&header))) goto cleanup; if (lseek (fd, header.e_phoff, SEEK_SET) == (off_t) -1) goto cleanup; n = header.e_phnum; ph = scm_gc_malloc_pointerless (n * sizeof (Elf_Phdr), "segment headers"); if (full_read (fd, ph, n * sizeof (Elf_Phdr)) != n * sizeof (Elf_Phdr)) ABORT ("failed to read program headers"); for (i = 0; i < n; i++) { if (!ph[i].p_memsz) continue; if (ph[i].p_filesz != ph[i].p_memsz) ABORT ("expected p_filesz == p_memsz"); if (!ph[i].p_flags) ABORT ("expected nonzero segment flags"); if (ph[i].p_type == PT_DYNAMIC) { if (dynamic_segment >= 0) ABORT ("expected only one PT_DYNAMIC segment"); dynamic_segment = i; } if (start_segment < 0) { if (!base && ph[i].p_vaddr) ABORT ("first loadable vaddr is not 0"); start_segment = prev_segment = i; continue; } if (ph[i].p_flags == ph[start_segment].p_flags) { if (ph[i].p_vaddr - ph[prev_segment].p_vaddr != ph[i].p_offset - ph[prev_segment].p_offset) ABORT ("coalesced segments not contiguous"); prev_segment = i; continue; } /* Otherwise we have a new kind of segment. Map previous segments. */ if (map_segments (fd, &base, &ph[start_segment], &ph[prev_segment])) goto cleanup; /* Open a new set of segments. */ start_segment = prev_segment = i; } /* Map last segments. */ if (start_segment < 0) ABORT ("no loadable segments"); if (map_segments (fd, &base, &ph[start_segment], &ph[prev_segment])) goto cleanup; if (dynamic_segment < 0) ABORT ("no PT_DYNAMIC segment"); if ((err_msg = process_dynamic_segment (base, &ph[dynamic_segment], &init, &entry))) goto cleanup; if (scm_is_true (init)) scm_call_0 (init); /* Finally! Return the thunk. */ return entry; /* FIXME: munmap on error? */ cleanup: { int errno_save = errno; (void) close (fd); errno = errno_save; if (errno) SCM_SYSERROR; scm_misc_error (FUNC_NAME, err_msg ? err_msg : "error loading ELF file", SCM_EOL); } }