enum translation_state action_jmp(struct translate *ts) { unsigned char *addr = ts->cur_instr; unsigned char *original_addr = addr; PRINT_DEBUG("original_addr=%x / addr=%x\n", original_addr, addr); unsigned char* transl_addr = ts->transl_instr; int length = ts->next_instr - ts->cur_instr; PRINT_DEBUG_FUNCTION_START("action_jmp(*addr=%p, *transl_addr=%p, length=%i)", addr, transl_addr, length); #if defined(FBT_STATISTIC) fbt_nr_translated_jmp++; #endif /* read call argument (either 8bit or 32bit offset) and add EIP (EIP = addr + length) to argument --> absolute target address = addr + length + offset */ #if defined(ASSERTIONS) assert(!HAS_PREFIX(*addr)); /* no prefixes allowed */ #endif int32_t jump_target=0; if (*addr == 0xE9) { /* 32bit offset */ jump_target = *((int32_t*)(addr + 1)) + (int32_t)original_addr + length; PRINT_DEBUG("jump_target = %x + %x + %x", *(int32_t*)(addr + 1), original_addr, length); } else { /* our argument is only an 8bit offset */ jump_target = (int32_t)(*((char*)(addr + 1)) + original_addr + length); } PRINT_DEBUG("original jmp_target: %p", (void*)jump_target); /* It seems that some binaries like /bin/ls uses direct jmp's into plt instead of direct calls. */ /* We need to handle and inline this jmps correctly. */ #if defined(INLINE_PLT_CALLS) /* Check if call targets plt section and resolve destination address */ unsigned long resolved_addr = sl_resolve_plt_call(jump_target, ts->tld->dso_objects); if (resolved_addr != jump_target) { /* PLT call, check transfer and inline it */ #if defined(VERIFY_CFTX) #if defined(ENABLE_TRANSLATION_TIME_SYMBOL_LOOKUP) struct sh_symbol* symbol = fbt_find_symbol(ts->tld, addr); fbt_check_transfer(ts->tld, original_addr, (unsigned char*)&resolved_addr, CFTX_CALL_IND, symbol); /* treat like an indirect call */ #else fbt_check_transfer(ts->tld, original_addr, (unsigned char*)&resolved_addr, CFTX_CALL_IND); /* treat like an indirect call */ #endif #endif /* VERIFY_CFTX */ jump_target = resolved_addr; } #endif /* End - PLT Jmps inlining! */ /* check if the target is already translated; if it is not, do so now */ void *transl_target = fbt_ccache_find(ts->tld, (void*)jump_target); if (transl_target == NULL) { /* we still have to translate the call target */ PRINT_DEBUG_FUNCTION_END("-> open, transl_length=0"); /* no need to actually jump simply change the next instr pointer to the first instr of the function this will put the body of the function right as the next instr in the translated code */ ts->next_instr = (unsigned char*)jump_target; /* put the target into the tcache so later jumps can use the translated code */ #ifndef TRACK_BASIC_BLOCKS //fbt_ccache_add_entry(ts->tld, (void*)jump_target, ts->transl_instr); #endif return OPEN; } PRINT_DEBUG("translated jmp_target: %p", transl_target); /* write: jmp */ #if defined(TRACK_CFTX) struct control_flow_transfer cft = { .location = transl_addr + 1, .original = addr }; fbt_store_cftx(ts->tld, &cft); #endif /* TRACK_CFTX */ JMP_REL32(transl_addr, (int32_t)transl_target); PRINT_DEBUG_FUNCTION_END("-> close, transl_length=%i", transl_addr - ts->transl_instr); ts->transl_instr = transl_addr; return CLOSE; } enum translation_state action_jmp_indirect(struct translate *ts) { unsigned char *addr = ts->cur_instr; unsigned char* transl_addr = ts->transl_instr; unsigned char *first_byte_after_opcode = ts->first_byte_after_opcode; int length = ts->next_instr - ts->cur_instr; PRINT_DEBUG_FUNCTION_START("action_jmp_indirect(*addr=%p, *transl_addr=%p, " \ "length=%i)", addr, transl_addr, length); #if defined(FBT_STATISTIC) fbt_nr_translated_jmp_ind++; #endif #if defined(SECURITY_METRICS_AND_STATS) && defined(VERIFY_CFTX) struct dso_chain *cur = ts->tld->dso_objects; long ret; while (cur != 0) { /* is the pointer in the current dso? */ if (PTR_IN_REGION(addr, cur->baseaddr, cur->endaddr-cur->baseaddr)) { FBT_UNPROT_DATA(cur, sizeof(struct dso_chain), ret, "action_jmp_indirect: unprotect dso failed."); if(cur->nr_ijmps >= ICF_TABLES_MAX_ENTRIES) { llprintf("ERROR: ijmps table out of space (fbt_actions.c), nr of entries: %d\n", cur->nr_ijmps); fbt_exit_suicide(45); } int i = 0; int found = 0; /* let's see if we already have this ICF in the list */ for(i = 0; i < cur->nr_ijmps; i++) { if(addr == cur->ijmps[i]) { found = 1; break; } } if(!found) { cur->ijmps[cur->nr_ijmps] = addr; cur->nr_ijmps++; } /* TODO: make this thread safe! */ //FBT_PROT_DATA(cur, sizeof(struct dso_chain), ret, "action_jmp_indirect: protect dso failed."); break; } cur = cur->next; } #endif /* defined(SECURITY_METRICS_AND_STATS) && defined(VERIFY_CFTX) */ if (ts->num_prefixes != 0) { /* no prefixes allowed */ fbt_suicide_str("No prefixes handled in action_jmp_indirect! " \ "(fbt_actions.c)\n"); } /* this is a fast version of the ind jmp - handoptimized assembler code * which does a fast lookup in the hashtable and dispatches if it hits * otherwise it recovers to an indirect jump */ /** * pushl $target * jmpl tld->ind_jump_trampoline */ /* write: push indirect target */ *transl_addr++ = 0xFF; /* * 0xFF expects a ModR/M byte following the opcode * The bits 3-5 are part of the opcode (opcode extension into ModR/M byte), * so we copy the ModR/M byte, but modify the opcode extension to 110 */ *transl_addr++ = (*first_byte_after_opcode & 0xC7) | 0x30; //llprintf("action_jmp_indirect: %x\n", addr); /* if there follows a displacement copy this to the ccf */ if (length > 2) { fbt_memcpy(transl_addr, (addr + 2), length - 2); //llprintf("action_jmp_indirect length > 2: %x\n", addr + 2); transl_addr += length - 2; } #if defined(VERIFY_CFTX) #if defined(VERIFY_CFTX_ENABLE_IJMP_LSYMOPT) || defined(ENABLE_TRANSLATION_TIME_SYMBOL_LOOKUP) struct sh_symbol* symbol = fbt_find_symbol(ts->tld, addr); #endif #if defined(VERIFY_CFTX_ENABLE_IJMP_LSYMOPT) if(symbol && symbol->start && symbol->size) { BEGIN_ASM(transl_addr) movl %esp, {ts->tld->stack-2} movl ${ts->tld->stack-2}, %esp
dso *load_elf(dso *loader, const char *name, const char *path, long fd, unsigned char rt_load, unsigned int rtld_mode, Elf32_auxv_t *auxv) { #ifdef D_LOAD sl_printf("\nLoading elf file: %s (%s)\n", path, name); #endif #ifdef SL_STATISTIC /* Some statistics */ loaded_dsos++; curr_loaded_dsos++; max_loaded_dsos = MAX(max_loaded_dsos, curr_loaded_dsos); #endif /* Get file information */ struct kernel_stat file_info; if(sl_fstat(fd, &file_info) == -1) { /* Close file */ sl_close(fd); /* Signal error (longjmp) if we load at runtime */ if(rt_load) signal_error(0, name, 0, "fstat failed"); /* Not at runtime -> fail */ sl_printf("Error load_elf: fstat failed while loading %s.\n", name); sl_exit(1); } /* Map entire file in memory */ void *file_map = sl_mmap(0, file_info.st_size, PROT_READ, MAP_PRIVATE, fd, 0); if ((long)file_map == -1) { /* Close file */ sl_close(fd); /* Signal error (longjmp) if we load at runtime */ if(rt_load) signal_error(0, name, 0, "mmap failed"); /* Not at runtime -> fail */ sl_printf("Error load_elf: mmap of file %s failed.\n", name); sl_exit(1); } /* Get ELF header and check file */ Elf32_Ehdr *elf_hdr = (Elf32_Ehdr *) file_map; long valid = check_elf(elf_hdr); if (valid != 0) { /* Invalid elf file */ sl_close(fd); /* Signal error (longjmp) if we load at runtime */ if(rt_load) signal_error(0, name, 0, "invalid ELF file"); /* Not at runtime -> fail */ sl_printf("Error load_elf: %s is not a valid ELF file (error: %d).\n", name, valid); sl_exit(1); } /* Get program and section header */ Elf32_Phdr *program_hdr = (Elf32_Phdr *) (file_map + elf_hdr->e_phoff); Elf32_Shdr *shdr = (Elf32_Shdr *) (file_map + elf_hdr->e_shoff); /* Segments (text and data) which we have to map in memory */ Elf32_Phdr *load_segments[2]; long num_load = 0; /* Create new shared object */ dso *so = sl_calloc(sizeof(dso), 1); /* Iterate over program headers */ unsigned long i = 0; for(i = 0; i < elf_hdr->e_phnum; ++i) { switch (program_hdr[i].p_type) { case PT_DYNAMIC: /* Dynamic Header */ so->dynamic_section = (Elf32_Dyn *)(program_hdr[i].p_vaddr); break; case PT_LOAD: /* Section must be mapped in memory */ if (num_load >= 2) { sl_printf("Error load_elf: more than two PT_LOAD segments!"); sl_exit(1); } load_segments[num_load++] = program_hdr+i; break; case PT_TLS: /* Thread Local Storage information*/ if (program_hdr[i].p_memsz == 0) break; /* Initialize TLS information */ so->tls_blocksize = program_hdr[i].p_memsz; so->tls_align = program_hdr[i].p_align; so->tls_initimage_size = program_hdr[i].p_filesz; /* TLS image (addr later adjusted) */ so->tls_initimage = (void *) program_hdr[i].p_vaddr; /* Assign next module ID */ so->tls_modid = ++GL(_dl_tls_max_dtv_idx); break; /* case PT_GNU_STACK: if (program_hdr[i].p_flags & PF_X) { sl_printf("Warning: executable stack\n"); sl_exit(1); } break; */ case PT_GNU_RELRO: /* Sections to set readonly after relocation */ /* Address is later adjusted */ so->relro = program_hdr[i].p_vaddr; so->relro_size = program_hdr[i].p_memsz; break; } } /* Map segments into memory and intitialize dso struct */ if(rtld_mode == 1 && auxv != NULL) { Elf32_Phdr *program_hdr_auxv; Elf32_Phdr *load_segments_auxv[2]; program_hdr_auxv = (Elf32_Phdr *)get_aux_value(auxv, AT_PHDR); uint32_t auxv_e_phnum = (uint32_t)get_aux_value(auxv, AT_PHNUM); unsigned long j = 0; unsigned int nr_load = 0; for(j = 0; j < auxv_e_phnum; j++) { switch (program_hdr_auxv[j].p_type) { case PT_LOAD: /* Section must be mapped in memory */ if (nr_load >= 2) { sl_exit(1); } load_segments_auxv[nr_load++] = program_hdr_auxv+j; break; } } map_segments_RTLD(fd, load_segments, elf_hdr->e_type, so, load_segments_auxv); } else { map_segments(fd, load_segments, elf_hdr->e_type, so); } so->ref_count = 1; so->deps_count = 0; so->name = name; so->path = path; so->type = elf_hdr->e_type; so->entry = (void*) elf_hdr->e_entry; so->loader = loader; so->dynamic_section = (Elf32_Dyn *) BYTE_STEP(so->dynamic_section, so->base_addr); so->program_header = (Elf32_Phdr *) BYTE_STEP(elf_hdr->e_phoff, so->text_addr); so->program_header_num = elf_hdr->e_phnum; so->l_real = so; /* Adjust address of TLS init image and relro address */ if (so->tls_initimage) { so->tls_initimage = (char *)so->tls_initimage + (long)so->base_addr; } if (so->relro) { so->relro = (Elf32_Addr) BYTE_STEP(so->relro, so->base_addr); } /* Iterate over section headers */ char *strtab = (char *)file_map + shdr[elf_hdr->e_shstrndx].sh_offset; char *sname=0; for (i=0; i<elf_hdr->e_shnum; ++i) { sname = strtab + shdr[i].sh_name; /* Save important sections */ if (sl_strncmp(sname, ".got", 5) == 0) { so->got = (char *) (so->base_addr + shdr[i].sh_addr); so->got_size = shdr[i].sh_size; } if (sl_strncmp(sname, ".plt", 5) == 0) { so->plt = (char *) (so->base_addr + shdr[i].sh_addr); so->plt_size = shdr[i].sh_size; } if (sl_strncmp(sname, ".got.plt", 9) == 0) { so->gotplt = (char *) (so->base_addr + shdr[i].sh_addr); so->gotplt_size = shdr[i].sh_size; } } /* Resolve */ Elf32_Dyn *dyn; long rpath=-1; for (dyn = so->dynamic_section; dyn->d_tag != DT_NULL; ++dyn) { switch (dyn->d_tag) { case DT_INIT: /* Initialization function */ so->init = (void (*)(int, char**, char**)) BYTE_STEP(dyn->d_un.d_ptr, so->base_addr); break; case DT_INIT_ARRAY: /* Array of initialization functions */ so->init_array = (Elf32_Addr *)BYTE_STEP(dyn->d_un.d_ptr, so->base_addr); break; case DT_INIT_ARRAYSZ: /* Size of init array */ so->init_array_sz = (long)dyn->d_un.d_val / sizeof(Elf32_Addr); break; case DT_FINI: /* Finalization function */ so->fini = (void (*)()) BYTE_STEP(dyn->d_un.d_ptr, so->base_addr); break; case DT_FINI_ARRAY: /* Array of finalization functions */ so->fini_array = (Elf32_Addr *)BYTE_STEP(dyn->d_un.d_ptr, so->base_addr); break; case DT_FINI_ARRAYSZ: /* Size of fini array */ so->fini_array_sz = (long)dyn->d_un.d_val / sizeof(Elf32_Addr); break; case DT_RUNPATH: /* String with library search paths */ rpath = dyn->d_un.d_val; break; case DT_RPATH: /* String with library search paths */ if (rpath == -1) rpath = dyn->d_un.d_val; break; case DT_PLTGOT: /* Plt part of the global offset table */ so->gotplt = (char *) BYTE_STEP(dyn->d_un.d_ptr, so->base_addr); break; case DT_REL: /* Relocation table */ so->rel = (Elf32_Rel *) BYTE_STEP(dyn->d_un.d_ptr, so->base_addr); break; case DT_RELSZ: /* Size of the relocation table */ so->relsz = (long)dyn->d_un.d_val / sizeof(Elf32_Rel); break; case DT_JMPREL: /* Plt relocations part of relocation table */ so->pltrel = (Elf32_Rel *) BYTE_STEP(dyn->d_un.d_ptr, so->base_addr); break; case DT_PLTRELSZ: /* Size of plt relocations part of relocation table */ so->pltrelsz = (long)dyn->d_un.d_val / sizeof(Elf32_Rel); break; case DT_HASH: /* ELF hash table */ so->hash_table = (Elf32_Word *) BYTE_STEP(dyn->d_un.d_ptr, so->base_addr); break; case DT_GNU_HASH: /* GNU hash table */ so->gnu_hash_table = (Elf32_Word *) BYTE_STEP(dyn->d_un.d_ptr, so->base_addr); break; case DT_SYMTAB: /* Dynamic symbol table */ so->symbol_table = (Elf32_Sym *) BYTE_STEP(dyn->d_un.d_ptr, so->base_addr); break; case DT_VERDEF: /* Versions defined in this DSO */ so->verdef = (Elf32_Verdef *) BYTE_STEP(dyn->d_un.d_ptr, so->base_addr); break; case DT_VERDEFNUM: /* Number of versions defined in this DSO */ so->verdef_num = (unsigned long) dyn->d_un.d_val; break; case DT_VERNEED: /* Versions needed by this DSO */ so->verneed = (Elf32_Verneed *) BYTE_STEP(dyn->d_un.d_ptr, so->base_addr); break; case DT_VERNEEDNUM: /* Number of versions needed by this DSO */ so->verneed_num = (unsigned long) dyn->d_un.d_val; break; case DT_VERSYM: /* Version symbol table */ so->versym = (Elf32_Half *) BYTE_STEP(dyn->d_un.d_ptr, so->base_addr); break; case DT_STRTAB: /* Dynamic string table */ so->string_table = (char *) so->base_addr + dyn->d_un.d_ptr; break; case DT_NEEDED: /* Dependencies on other DSOs */ /* Count the number of direct dependencies */ so->deps_count++; break; case DT_FLAGS: /* Flags */ so->flags = dyn->d_un.d_val; if ((so->flags & DF_SYMBOLIC) || (so->flags & DF_TEXTREL)) { sl_printf("Error load_elf: not supported flag 0x%x in %s.\n", so->flags, so->name); sl_exit(1); } break; case DT_FLAGS_1: /* Flags */ so->flags_1 = dyn->d_un.d_val; if ((so->flags_1 & DF_1_GROUP) || (so->flags_1 & DF_1_LOADFLTR) || (so->flags_1 & DF_1_DIRECT) || (so->flags_1 & DF_1_INTERPOSE) || (so->flags_1 & DF_1_NODEFLIB) // || (so->flags_1 & DF_1_NODUMP) || (so->flags_1 & DF_1_CONFALT) || (so->flags_1 & DF_1_ENDFILTEE) || (so->flags_1 & DF_1_DISPRELDNE) || (so->flags_1 & DF_1_DISPRELPND)) { sl_printf("Error load_elf: not supported flag_1 0x%x in %s.\n", so->flags_1, so->name); sl_exit(1); } break; } } /* Initialize the versioning data */ init_versions(so); /* Set library search paths */ if (rpath != -1) so->search_path = decompose_path(so->string_table+rpath,so->name, "RPATH"); /* Allocate memory for deps */ if (so->deps_count != 0) so->deps = sl_malloc(so->deps_count * sizeof(dso *)); /* Add shared object to chain */ chain_add(so); /* Now that we have the stringtable, iterate a second time over dynamic section to get the names of the needed libraries. */ char *lib_name = 0; long num = 0; for (dyn = so->dynamic_section; dyn->d_tag != DT_NULL; dyn++) { switch (dyn->d_tag) { case DT_NEEDED: /* Get name of needed lib */ lib_name = (char *)so->string_table + dyn->d_un.d_val; #ifdef D_LOAD sl_printf("Found dependency in %s: %s\n", so->name, lib_name); #endif /* Do not load the linux dynamic loader, because we replace it */ if (sl_strncmp(lib_name, LINUX_LOADER, sl_strnlen(LINUX_LOADER, MAX_LIB_NAME))==0) { so->deps_count--; continue; } /* Check if we already loaded it */ dso *so_search = chain_search(lib_name); if (so_search == 0) { /* Not already loaded, search for it */ char *lib_path; long fd = search_lib(so, lib_name, &lib_path); if (fd == -1) { /* Not found, signal error (longjmp) if we load at runtime */ if(rt_load) signal_error(0, lib_name, 0, "cannot open shared object file"); /* Not at runtime -> fail */ sl_printf("Error load_elf: lib %s not found.\n", lib_name); sl_exit(1); } /* Copy name */ char *lname = sl_malloc(MAX_LIB_NAME); sl_strncpy(lname, lib_name, MAX_LIB_NAME); PROT_DATA(lname, MAX_LIB_NAME); /* Load it */ dso *so_loaded = load_elf(so, lname, lib_path, fd, rt_load, 0, NULL); /* Increment local scope counter and add to direct deps */ so->lscope_num += so_loaded->lscope_num; so->deps[num] = so_loaded; } else { /* Increment reference counter */ UNPROT(so_search); so_search->ref_count++; PROT(so_search); /* Increment local scope counter and add to direct deps */ so->lscope_num += so_search->lscope_num; so->deps[num] = so_search; } num++; so->lscope_num++; break; } } so->lscope_num++; /* Create local scope list. This has to be done in breadth-first order! */ so->lscope = sl_malloc(so->lscope_num * sizeof(dso *)); long j,k,l; i = 0; /* Add object itself */ so->lscope[i++] = so; /* First add direct dependencies */ for (l=0; l<so->deps_count; ++l) { so->lscope[i] = so->deps[l]; ++i; } /* Now add deps recursively */ for (l=0; l<so->deps_count; ++l) { for (k=0; k<so->deps[l]->lscope_num; ++k) { dso *dep = so->deps[l]->lscope[k]; /* Check if already added */ long found = 0; for (j=0; j<i; ++j) { if (so->lscope[j] == dep) found = 1; } if (found || !dep) continue; so->lscope[i] = dep; ++i; } } /* Initialize Global Offset Table */ init_got(so); #if defined(VERIFY_CFTX) /* Add object to dso chain if libdetox wants to check the control flow transfers */ add_dso(so, (char *)file_map); #if defined(CALLBACK_MAIN_DETECTION) || defined(CALLBACK_DATA_SECTION_SEARCH) /* Right after loading the dso we need to detect the libc callbacks to main/__libc_csu_init */ if(so->loader == NULL && dso_chain->next != 0) { /* This is the main executable. Get the immediate values pushed on the stack right before __libc_start_main is called. */ #if defined(CALLBACK_MAIN_DETECTION) /* This is the main function callback detection hack! * - it tries to find the callback pointers passed to libc * - and it adds them to the callback table so CFTX checks will pass */ long count = 0; unsigned char *iptr; unsigned long ptr; if(so->type == ET_EXEC) iptr = (unsigned char *)so->entry; else /* PIE executable */ iptr = (unsigned char *)((unsigned long)so->base_addr + (unsigned long)so->entry); /* TODO: 48? Remove hardcoded value. */ while(count<48) { /* is it a push instruction with a 32bit immediate value? */ if(*iptr == 0x68) { /* add the immediate value pushed to the stack */ ptr = *((unsigned long*)(++iptr)); fbt_add_callback(dso_chain->next, (void*)ptr); iptr+=4; count+=5; } else { iptr++; count++; } /* in case we reached NOPs we can just stop looking for the pushes */ if(*iptr == 0x90) break; } #endif /* CALLBACK_MAIN_DETECTION */ #if defined(CALLBACK_DATA_SECTION_SEARCH) /* Some x* applications have global function pointers in their .data section. * This are probably widget class objects and their members. It seems that there is no other * way to detect these potential callbacks than scanning through the .data section. This has * only to be done for prelinked executables as we will detect the other callbacks during relocation. */ unsigned long *dptr = so->data_addr; if(dptr) { while((unsigned long)dptr < ((unsigned long)so->data_addr+(unsigned long)so->data_size)) { /* check if the obtained address points to executable memory (potential callback target) */ if (PTR_IN_REGION(*dptr, so->text_addr, so->text_size) || PTR_IN_REGION(*dptr, so->dso->init, so->dso->init_size) || PTR_IN_REGION(*dptr, so->dso->fini, so->dso->fini_size)) { fbt_add_callback(so->dso, (void*)*dptr); } dptr++; /* increase by sizeof(unsigned long) = bytes */ } } /* go through GOT */ dptr = (unsigned long*)(so->got); if(dptr) { while((unsigned long)dptr < ((unsigned long)so->got+(unsigned long)so->got_size)) { /* check if the obtained address points to executable memory (potential callback target) */ if (PTR_IN_REGION(*dptr, so->text_addr, so->text_size) || PTR_IN_REGION(*dptr, so->dso->init, so->dso->init_size) || PTR_IN_REGION(*dptr, so->dso->fini, so->dso->fini_size)) { fbt_add_callback(so->dso, (void*)*dptr); } dptr++; /* increase by sizeof(unsigned long) = bytes */ } } /* go through GOT.PLT */ dptr = (unsigned long*)(so->gotplt); if(dptr) { while((unsigned long)dptr < ((unsigned long)so->gotplt+(unsigned long)so->gotplt_size)) { /* check if the obtained address points to executable memory (potential callback target) */ if (PTR_IN_REGION(*dptr, so->text_addr, so->text_size) || PTR_IN_REGION(*dptr, so->dso->init, so->dso->init_size) || PTR_IN_REGION(*dptr, so->dso->fini, so->dso->fini_size)) { fbt_add_callback(so->dso, (void*)*dptr); } dptr++; /* increase by sizeof(unsigned long) = bytes */ } } /* go through rodata */ dptr = (unsigned long*)(so->dso->rodata); if(dptr) { while((unsigned long)dptr < ((unsigned long)so->dso->rodata+(unsigned long)so->dso->rodata_size)) { /* check if the obtained address points to executable memory (potential callback target) */ if (PTR_IN_REGION(*dptr, so->text_addr, so->text_size) || PTR_IN_REGION(*dptr, so->dso->init, so->dso->init_size) || PTR_IN_REGION(*dptr, so->dso->fini, so->dso->fini_size)) { fbt_add_callback(so->dso, (void*)*dptr); } dptr++; /* increase by sizeof(unsigned long) = bytes */ } } /* go through reldyn */ dptr = (unsigned long*)(so->dso->reldyn); if(dptr) { while((unsigned long)dptr < ((unsigned long)so->dso->reldyn+(unsigned long)so->dso->reldyn_size)) { /* check if the obtained address points to executable memory (potential callback target) */ if (PTR_IN_REGION(*dptr, so->text_addr, so->text_size) || PTR_IN_REGION(*dptr, so->dso->init, so->dso->init_size) || PTR_IN_REGION(*dptr, so->dso->fini, so->dso->fini_size)) { fbt_add_callback(so->dso, (void*)*dptr); } dptr++; /* increase by sizeof(unsigned long) = bytes */ } } /* go through relplt */ dptr = (unsigned long*)(so->dso->relplt); if(dptr) { while((unsigned long)dptr < ((unsigned long)so->dso->relplt+(unsigned long)so->dso->relplt_size)) { /* check if the obtained address points to executable memory (potential callback target) */ if (PTR_IN_REGION(*dptr, so->text_addr, so->text_size) || PTR_IN_REGION(*dptr, so->dso->init, so->dso->init_size) || PTR_IN_REGION(*dptr, so->dso->fini, so->dso->fini_size)) { fbt_add_callback(so->dso, (void*)*dptr); } dptr++; /* increase by sizeof(unsigned long) = bytes */ } } #endif /* CALLBACK_DATA_SECTION_SEARCH */ } #endif /* defined(CALLBACK_MAIN_DETECTION) || defined(CALLBACK_DATA_SECTION_SEARCH) */ #if defined(CALLBACK_LIBRARIES_DATA_SECTION_SEARCH) if(so->loader != NULL) { /* it's a library! */ unsigned long *dptr = so->data_addr; if(dptr) { while((unsigned long)dptr < ((unsigned long)so->data_addr+(unsigned long)so->data_size)) { /* check if the obtained address points to executable memory (potential callback target) */ if (PTR_IN_REGION(*dptr, so->text_addr, so->text_size) || PTR_IN_REGION(*dptr, so->dso->init, so->dso->init_size) || PTR_IN_REGION(*dptr, so->dso->fini, so->dso->fini_size)) { fbt_add_callback(so->dso, (void*)*dptr); } dptr++; /* increase by sizeof(unsigned long) = bytes */ } } /* go through GOT */ dptr = (unsigned long*)(so->got); if(dptr) { while((unsigned long)dptr < ((unsigned long)so->got+(unsigned long)so->got_size)) { /* check if the obtained address points to executable memory (potential callback target) */ if (PTR_IN_REGION(*dptr, so->text_addr, so->text_size) || PTR_IN_REGION(*dptr, so->dso->init, so->dso->init_size) || PTR_IN_REGION(*dptr, so->dso->fini, so->dso->fini_size)) { fbt_add_callback(so->dso, (void*)*dptr); } dptr++; /* increase by sizeof(unsigned long) = bytes */ } } /* go through GOT.PLT */ dptr = (unsigned long*)(so->gotplt); if(dptr) { while((unsigned long)dptr < ((unsigned long)so->gotplt+(unsigned long)so->gotplt_size)) { /* check if the obtained address points to executable memory (potential callback target) */ if (PTR_IN_REGION(*dptr, so->text_addr, so->text_size) || PTR_IN_REGION(*dptr, so->dso->init, so->dso->init_size) || PTR_IN_REGION(*dptr, so->dso->fini, so->dso->fini_size)) { fbt_add_callback(so->dso, (void*)*dptr); } dptr++; /* increase by sizeof(unsigned long) = bytes */ } } /* go through rodata */ dptr = (unsigned long*)(so->dso->rodata); if(dptr) { while((unsigned long)dptr < ((unsigned long)so->dso->rodata+(unsigned long)so->dso->rodata_size)) { /* check if the obtained address points to executable memory (potential callback target) */ if (PTR_IN_REGION(*dptr, so->text_addr, so->text_size) || PTR_IN_REGION(*dptr, so->dso->init, so->dso->init_size) || PTR_IN_REGION(*dptr, so->dso->fini, so->dso->fini_size)) { fbt_add_callback(so->dso, (void*)*dptr); } dptr++; /* increase by sizeof(unsigned long) = bytes */ } } /* go through reldyn */ dptr = (unsigned long*)(so->dso->reldyn); if(dptr) { while((unsigned long)dptr < ((unsigned long)so->dso->reldyn+(unsigned long)so->dso->reldyn_size)) { /* check if the obtained address points to executable memory (potential callback target) */ if (PTR_IN_REGION(*dptr, so->text_addr, so->text_size) || PTR_IN_REGION(*dptr, so->dso->init, so->dso->init_size) || PTR_IN_REGION(*dptr, so->dso->fini, so->dso->fini_size)) { fbt_add_callback(so->dso, (void*)*dptr); } dptr++; /* increase by sizeof(unsigned long) = bytes */ } } /* go through relplt */ dptr = (unsigned long*)(so->dso->relplt); if(dptr) { while((unsigned long)dptr < ((unsigned long)so->dso->relplt+(unsigned long)so->dso->relplt_size)) { /* check if the obtained address points to executable memory (potential callback target) */ if (PTR_IN_REGION(*dptr, so->text_addr, so->text_size) || PTR_IN_REGION(*dptr, so->dso->init, so->dso->init_size) || PTR_IN_REGION(*dptr, so->dso->fini, so->dso->fini_size)) { fbt_add_callback(so->dso, (void*)*dptr); } dptr++; /* increase by sizeof(unsigned long) = bytes */ } } } #endif /* CALLBACK_LIBRARIES_DATA_SECTION_SEARCH */ #endif /* VERIFY_CFTX */ /* All necessary information in memory -> unmap file */ sl_munmap(file_map, file_info.st_size); sl_close(fd); /* Protect dependencies and local search scope */ PROT_DATA(so->deps, so->deps_count*sizeof(dso *)); PROT_DATA(so->lscope, so->lscope_num*sizeof(dso *)); return so; }