/* Load the ELF binary in the bin->mlist */ static void elf64_load_mlist(r_binfmt_s *bin) { Elf64_Ehdr *ehdr = (Elf64_Ehdr*)bin->mapped; Elf64_Phdr *phdr; int i; uint64_t flags; uint64_t p_vaddr, p_offset, p_filesz; uint32_t p_type, p_flags; uint16_t e_phnum; bin->mlist = r_binfmt_mlist_new(); phdr = (Elf64_Phdr*)(bin->mapped + r_binfmt_get_int64((byte_t*)&ehdr->e_phoff, bin->endian)); e_phnum = r_binfmt_get_int16((byte_t*)&ehdr->e_phnum, bin->endian); for(i = 0; i < e_phnum; i++) { p_type = r_binfmt_get_int32((byte_t*)&phdr[i].p_type, bin->endian); p_flags = r_binfmt_get_int32((byte_t*)&phdr[i].p_flags, bin->endian); p_vaddr = r_binfmt_get_int64((byte_t*)&phdr[i].p_vaddr, bin->endian); p_offset = r_binfmt_get_int64((byte_t*)&phdr[i].p_offset, bin->endian); p_filesz = r_binfmt_get_int64((byte_t*)&phdr[i].p_filesz, bin->endian); if(p_type == PT_LOAD) { flags = 0; if(p_flags & PF_X) flags |= R_BINFMT_MEM_FLAG_PROT_X; if(p_flags & PF_R) flags |= R_BINFMT_MEM_FLAG_PROT_R; if(p_flags & PF_W) flags |= R_BINFMT_MEM_FLAG_PROT_W; r_binfmt_mlist_add(bin->mlist, p_vaddr, bin->mapped + p_offset, p_filesz, flags); } } }
/* Check some ELF structure fields */ static int elf64_check(r_binfmt_s *bin) { Elf64_Ehdr *ehdr = (Elf64_Ehdr*)bin->mapped; Elf64_Phdr *phdr; int i; uint64_t r1, r2; uint64_t e_phoff, p_offset, p_filesz; uint16_t e_phnum; e_phoff = r_binfmt_get_int64((byte_t*)&ehdr->e_phoff, bin->endian); e_phnum = r_binfmt_get_int16((byte_t*)&ehdr->e_phnum, bin->endian); /* Check some ehdr fields */ if(e_phoff >= bin->mapped_size) return 0; if(!r_utils_mul64(&r1, e_phnum, sizeof(Elf64_Phdr))) return 0; if(!r_utils_add64(&r2, e_phoff, e_phnum*sizeof(Elf64_Phdr))) return 0; if(r1 + r2 >= bin->mapped_size) return 0; /* check some phdr fields; */ phdr = (Elf64_Phdr*)(bin->mapped + e_phoff); for(i = 0; i < e_phnum; i++) { p_offset = r_binfmt_get_int64((byte_t*)&phdr[i].p_offset, bin->endian); p_filesz = r_binfmt_get_int64((byte_t*)&phdr[i].p_filesz, bin->endian); if(!r_utils_add64(&r1, p_offset, p_filesz)) return 0; if(r1 >= bin->mapped_size) return 0; } return 1; }
static void r_binfmt_macho64_load_segment(r_binfmt_s *bin, r_binfmt_macho64_segment_s *seg) { u64 vaddr, filesz, fileoff; u32 flags, initprot; vaddr = r_binfmt_get_int64((byte_t*)&seg->vm_addr, bin->endian); filesz = r_binfmt_get_int64((byte_t*)&seg->file_size, bin->endian); fileoff = r_binfmt_get_int64((byte_t*)&seg->file_off, bin->endian); initprot = r_binfmt_get_int32((byte_t*)&seg->init_prot, bin->endian); flags = 0; if(initprot & R_BINFMT_MACHO_PROT_R) flags |= R_BINFMT_MEM_FLAG_PROT_R; if(initprot & R_BINFMT_MACHO_PROT_W) flags |= R_BINFMT_MEM_FLAG_PROT_W; if(initprot & R_BINFMT_MACHO_PROT_X) flags |= R_BINFMT_MEM_FLAG_PROT_X; r_binfmt_mlist_add(bin->mlist, vaddr, bin->mapped + fileoff, filesz, flags); }
/* Check the fields of the machoXX segment */ static int r_binfmt_macho64_check_segment(r_binfmt_s *bin, r_binfmt_macho64_segment_s *seg) { u64 filesz, fileoff; u64 off; off = ((byte_t*)seg) - bin->mapped; if(!r_utils_add64(NULL, off, sizeof(*seg))) return 0; if(bin->mapped_size < off + sizeof(*seg)) return 0; filesz = r_binfmt_get_int64((byte_t*)&seg->file_size, bin->endian); fileoff = r_binfmt_get_int64((byte_t*)&seg->file_off, bin->endian); if(!r_utils_add64(&off, fileoff, filesz)) return 0; if(bin->mapped_size < off) return 0; return 1; }
/* Check if NX bit is enabled */ static r_binfmt_nx_e r_binfmt_elf64_check_nx(r_binfmt_s *bin) { Elf64_Ehdr *ehdr = (Elf64_Ehdr*)(bin->mapped); Elf64_Phdr *phdr; u32 i, p_type; u16 e_phnum; phdr = (Elf64_Phdr*)(bin->mapped + r_binfmt_get_int64((byte_t*)&ehdr->e_phoff, bin->endian)); e_phnum = r_binfmt_get_int16((byte_t*)&ehdr->e_phnum, bin->endian); for(i = 0; i < e_phnum; i++) { p_type = r_binfmt_get_int32((byte_t*)&phdr[i].p_type, bin->endian); if(p_type == PT_GNU_STACK) return R_BINFMT_NX_ENABLED; } return R_BINFMT_NX_DISABLED; }
static addr_t r_binfmt_elf64_getentry(r_binfmt_s *bin) { Elf64_Ehdr *ehdr = (Elf64_Ehdr*)(bin->mapped); return r_binfmt_get_int64((byte_t*)&ehdr->e_entry, bin->endian); }